]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
Merge branch 'timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 21 Oct 2010 21:08:08 +0000 (14:08 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 21 Oct 2010 21:08:08 +0000 (14:08 -0700)
* 'timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  ntp: Clamp PLL update interval

1438 files changed:
CREDITS
Documentation/DocBook/device-drivers.tmpl
Documentation/DocBook/kernel-api.tmpl
Documentation/DocBook/kernel-locking.tmpl
Documentation/RCU/checklist.txt
Documentation/RCU/stallwarn.txt
Documentation/RCU/trace.txt
Documentation/block/cfq-iosched.txt [new file with mode: 0644]
Documentation/cgroups/blkio-controller.txt
Documentation/cputopology.txt
Documentation/feature-removal-schedule.txt
Documentation/gpio.txt
Documentation/hwmon/sysfs-interface
Documentation/kernel-doc-nano-HOWTO.txt
Documentation/kernel-parameters.txt
Documentation/kprobes.txt
Documentation/mutex-design.txt
Documentation/networking/e1000.txt
Documentation/networking/e1000e.txt [new file with mode: 0644]
Documentation/networking/ixgbevf.txt [changed mode: 0755->0644]
Documentation/power/regulator/overview.txt
Documentation/sound/alsa/HD-Audio-Models.txt
Documentation/vm/page-types.c
Documentation/workqueue.txt [new file with mode: 0644]
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/Kconfig
arch/alpha/include/asm/cacheflush.h
arch/alpha/include/asm/perf_event.h
arch/alpha/include/asm/unistd.h
arch/alpha/kernel/entry.S
arch/alpha/kernel/err_ev6.c
arch/alpha/kernel/err_marvel.c
arch/alpha/kernel/err_titan.c
arch/alpha/kernel/osf_sys.c
arch/alpha/kernel/pci-sysfs.c
arch/alpha/kernel/perf_event.c
arch/alpha/kernel/process.c
arch/alpha/kernel/signal.c
arch/alpha/kernel/srm_env.c
arch/alpha/kernel/systbls.S
arch/alpha/kernel/time.c
arch/alpha/kernel/traps.c
arch/arm/Kconfig
arch/arm/boot/Makefile
arch/arm/boot/compressed/Makefile
arch/arm/boot/compressed/head.S
arch/arm/common/it8152.c
arch/arm/include/asm/dma-mapping.h
arch/arm/include/asm/perf_event.h
arch/arm/include/asm/pgtable.h
arch/arm/include/asm/unistd.h
arch/arm/kernel/calls.S
arch/arm/kernel/entry-common.S
arch/arm/kernel/kprobes-decode.c
arch/arm/kernel/perf_event.c
arch/arm/mach-at91/at91sam9g45.c
arch/arm/mach-at91/at91sam9g45_devices.c
arch/arm/mach-at91/board-sam9261ek.c
arch/arm/mach-at91/clock.c
arch/arm/mach-at91/include/mach/system.h
arch/arm/mach-bcmring/dma.c
arch/arm/mach-davinci/dm355.c
arch/arm/mach-davinci/dm365.c
arch/arm/mach-davinci/dm644x.c
arch/arm/mach-davinci/dm646x.c
arch/arm/mach-dove/include/mach/io.h
arch/arm/mach-ep93xx/clock.c
arch/arm/mach-ep93xx/dma-m2p.c
arch/arm/mach-imx/Kconfig
arch/arm/mach-imx/mach-cpuimx27.c
arch/arm/mach-ixp4xx/common-pci.c
arch/arm/mach-ixp4xx/include/mach/hardware.h
arch/arm/mach-kirkwood/include/mach/kirkwood.h
arch/arm/mach-kirkwood/pcie.c
arch/arm/mach-mmp/include/mach/system.h
arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c
arch/arm/mach-mx25/mach-cpuimx25.c
arch/arm/mach-mx3/clock-imx35.c
arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c
arch/arm/mach-mx3/mach-cpuimx35.c
arch/arm/mach-mx5/clock-mx51.c
arch/arm/mach-pxa/cpufreq-pxa2xx.c
arch/arm/mach-pxa/cpufreq-pxa3xx.c
arch/arm/mach-pxa/include/mach/hardware.h
arch/arm/mach-pxa/include/mach/io.h
arch/arm/mach-pxa/include/mach/mfp-pxa300.h
arch/arm/mach-pxa/palm27x.c
arch/arm/mach-pxa/vpac270.c
arch/arm/mach-s3c64xx/dev-spi.c
arch/arm/mach-s3c64xx/mach-real6410.c
arch/arm/mach-s5p6440/cpu.c
arch/arm/mach-s5p6442/cpu.c
arch/arm/mach-s5pc100/cpu.c
arch/arm/mach-s5pv210/clock.c
arch/arm/mach-s5pv210/cpu.c
arch/arm/mach-shmobile/Makefile
arch/arm/mach-shmobile/board-ap4evb.c
arch/arm/mach-shmobile/clock-sh7372.c
arch/arm/mach-shmobile/clock.c
arch/arm/mach-shmobile/pm_runtime.c [new file with mode: 0644]
arch/arm/mach-u300/include/mach/gpio.h
arch/arm/mach-vexpress/ct-ca9x4.c
arch/arm/mach-vexpress/v2m.c
arch/arm/mm/Kconfig
arch/arm/mm/alignment.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/ioremap.c
arch/arm/mm/mmu.c
arch/arm/mm/proc-v7.S
arch/arm/oprofile/Makefile
arch/arm/oprofile/common.c
arch/arm/plat-mxc/Kconfig
arch/arm/plat-mxc/include/mach/eukrea-baseboards.h
arch/arm/plat-mxc/tzic.c
arch/arm/plat-nomadik/timer.c
arch/arm/plat-omap/Kconfig
arch/arm/plat-omap/iommu.c
arch/arm/plat-omap/mcbsp.c
arch/arm/plat-omap/sram.c
arch/arm/plat-pxa/pwm.c
arch/arm/plat-s5p/dev-fimc0.c
arch/arm/plat-s5p/dev-fimc1.c
arch/arm/plat-s5p/dev-fimc2.c
arch/arm/plat-samsung/adc.c
arch/arm/plat-samsung/clock.c
arch/arm/plat-samsung/gpio-config.c
arch/arm/plat-samsung/include/plat/gpio-cfg.h
arch/arm/tools/mach-types
arch/avr32/kernel/module.c
arch/frv/Kconfig
arch/frv/kernel/signal.c
arch/frv/lib/Makefile
arch/h8300/kernel/module.c
arch/ia64/include/asm/compat.h
arch/ia64/include/asm/hardirq.h
arch/ia64/include/asm/system.h
arch/ia64/kernel/fsys.S
arch/m32r/include/asm/elf.h
arch/m32r/include/asm/signal.h
arch/m32r/include/asm/unistd.h
arch/m32r/kernel/.gitignore [new file with mode: 0644]
arch/m32r/kernel/entry.S
arch/m32r/kernel/ptrace.c
arch/m32r/kernel/signal.c
arch/m68k/include/asm/unistd.h
arch/m68k/kernel/entry.S
arch/m68k/mac/macboing.c
arch/m68knommu/kernel/syscalltable.S
arch/mips/Kbuild
arch/mips/Kconfig
arch/mips/alchemy/common/prom.c
arch/mips/boot/compressed/Makefile
arch/mips/cavium-octeon/Kconfig
arch/mips/cavium-octeon/cpu.c
arch/mips/cavium-octeon/executive/Makefile
arch/mips/dec/Platform
arch/mips/include/asm/atomic.h
arch/mips/include/asm/compat.h
arch/mips/include/asm/cop2.h
arch/mips/include/asm/fcntl.h
arch/mips/include/asm/gic.h
arch/mips/include/asm/mach-tx49xx/kmalloc.h
arch/mips/include/asm/mips-boards/maltaint.h
arch/mips/include/asm/page.h
arch/mips/include/asm/siginfo.h
arch/mips/include/asm/thread_info.h
arch/mips/include/asm/unistd.h
arch/mips/jz4740/Platform
arch/mips/kernel/branch.c
arch/mips/kernel/irq-gic.c
arch/mips/kernel/kgdb.c
arch/mips/kernel/kspd.c
arch/mips/kernel/linux32.c
arch/mips/kernel/mips-mt-fpaff.c
arch/mips/kernel/ptrace.c
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/scall64-o32.S
arch/mips/kernel/signal.c
arch/mips/kernel/signal_n32.c
arch/mips/kernel/unaligned.c
arch/mips/mm/dma-default.c
arch/mips/mm/sc-rm7k.c
arch/mips/mti-malta/malta-int.c
arch/mips/pci/pci-rc32434.c
arch/mips/pnx8550/common/reset.c
arch/mips/pnx8550/common/setup.c
arch/mn10300/Kconfig
arch/mn10300/Kconfig.debug
arch/mn10300/include/asm/bitops.h
arch/mn10300/include/asm/signal.h
arch/mn10300/kernel/mn10300-serial.c
arch/mn10300/kernel/module.c
arch/mn10300/kernel/signal.c
arch/mn10300/mm/Makefile
arch/mn10300/mm/cache-disabled.c [moved from arch/frv/lib/perf_event.c with 53% similarity]
arch/mn10300/mm/cache.c
arch/parisc/Kconfig
arch/parisc/include/asm/compat.h
arch/parisc/include/asm/perf_event.h
arch/parisc/kernel/module.c
arch/powerpc/Kconfig
arch/powerpc/include/asm/compat.h
arch/powerpc/include/asm/fsldma.h
arch/powerpc/include/asm/paca.h
arch/powerpc/include/asm/system.h
arch/powerpc/kernel/module.c
arch/powerpc/kernel/perf_callchain.c
arch/powerpc/kernel/perf_event.c
arch/powerpc/kernel/perf_event_fsl_emb.c
arch/powerpc/kernel/signal.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/time.c
arch/powerpc/platforms/512x/clock.c
arch/powerpc/platforms/52xx/efika.c
arch/powerpc/platforms/52xx/mpc52xx_common.c
arch/s390/Kconfig
arch/s390/include/asm/compat.h
arch/s390/include/asm/hardirq.h
arch/s390/include/asm/perf_event.h
arch/s390/include/asm/system.h
arch/s390/include/asm/topology.h
arch/s390/kernel/module.c
arch/s390/kernel/topology.c
arch/sh/Kconfig
arch/sh/include/asm/perf_event.h
arch/sh/kernel/module.c
arch/sh/kernel/perf_callchain.c
arch/sh/kernel/perf_event.c
arch/sh/oprofile/Makefile
arch/sh/oprofile/common.c
arch/sh/oprofile/op_impl.h [deleted file]
arch/sparc/Kconfig
arch/sparc/include/asm/compat.h
arch/sparc/include/asm/jump_label.h [new file with mode: 0644]
arch/sparc/include/asm/perf_event.h
arch/sparc/kernel/Makefile
arch/sparc/kernel/jump_label.c [new file with mode: 0644]
arch/sparc/kernel/module.c
arch/sparc/kernel/pcr.c
arch/sparc/kernel/perf_event.c
arch/sparc/kernel/signal32.c
arch/sparc/kernel/signal_32.c
arch/sparc/kernel/signal_64.c
arch/sparc/kernel/sys_sparc_32.c
arch/sparc/kernel/unaligned_32.c
arch/sparc/kernel/windows.c
arch/tile/include/arch/chip_tile64.h
arch/tile/include/arch/chip_tilepro.h
arch/tile/include/asm/compat.h
arch/tile/include/asm/io.h
arch/tile/include/asm/processor.h
arch/tile/include/asm/ptrace.h
arch/tile/include/asm/sigcontext.h
arch/tile/include/asm/signal.h
arch/tile/include/asm/syscalls.h
arch/tile/kernel/intvec_32.S
arch/tile/kernel/process.c
arch/tile/kernel/signal.c
arch/tile/kernel/stack.c
arch/um/drivers/hostaudio_kern.c
arch/um/drivers/net_kern.c
arch/um/drivers/ubd_kern.c
arch/um/kernel/exec.c
arch/um/kernel/internal.h
arch/um/kernel/syscall.c
arch/x86/Kconfig
arch/x86/Kconfig.debug
arch/x86/Makefile
arch/x86/boot/early_serial_console.c
arch/x86/ia32/ia32_aout.c
arch/x86/ia32/ia32entry.S
arch/x86/include/asm/alternative.h
arch/x86/include/asm/amd_iommu.h
arch/x86/include/asm/amd_iommu_proto.h
arch/x86/include/asm/amd_iommu_types.h
arch/x86/include/asm/amd_nb.h [moved from arch/x86/include/asm/k8.h with 61% similarity]
arch/x86/include/asm/apb_timer.h
arch/x86/include/asm/bitops.h
arch/x86/include/asm/compat.h
arch/x86/include/asm/cpu.h
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/dwarf2.h
arch/x86/include/asm/entry_arch.h
arch/x86/include/asm/fixmap.h
arch/x86/include/asm/gart.h
arch/x86/include/asm/hardirq.h
arch/x86/include/asm/hpet.h
arch/x86/include/asm/hw_breakpoint.h
arch/x86/include/asm/hw_irq.h
arch/x86/include/asm/i387.h
arch/x86/include/asm/io.h
arch/x86/include/asm/iomap.h
arch/x86/include/asm/irq_remapping.h
arch/x86/include/asm/irq_vectors.h
arch/x86/include/asm/jump_label.h [new file with mode: 0644]
arch/x86/include/asm/kvm_emulate.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/mrst.h
arch/x86/include/asm/mwait.h [new file with mode: 0644]
arch/x86/include/asm/olpc_ofw.h
arch/x86/include/asm/page_types.h
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/paravirt_types.h
arch/x86/include/asm/perf_event_p4.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable_64.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/setup.h
arch/x86/include/asm/vmi.h [deleted file]
arch/x86/include/asm/vmi_time.h [deleted file]
arch/x86/kernel/Makefile
arch/x86/kernel/acpi/cstate.c
arch/x86/kernel/alternative.c
arch/x86/kernel/amd_iommu.c
arch/x86/kernel/amd_iommu_init.c
arch/x86/kernel/amd_nb.c [moved from arch/x86/kernel/k8.c with 66% similarity]
arch/x86/kernel/apb_timer.c
arch/x86/kernel/aperture_64.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/apic/probe_64.c
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/cpu.h
arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/cpu/mcheck/mce_amd.c
arch/x86/kernel/cpu/mcheck/therm_throt.c
arch/x86/kernel/cpu/mtrr/cleanup.c
arch/x86/kernel/cpu/mtrr/generic.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_amd.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_ds.c
arch/x86/kernel/cpu/perf_event_p4.c
arch/x86/kernel/cpu/perfctr-watchdog.c
arch/x86/kernel/cpu/scattered.c
arch/x86/kernel/crash_dump_64.c
arch/x86/kernel/early-quirks.c
arch/x86/kernel/early_printk.c
arch/x86/kernel/early_printk_mrst.c [new file with mode: 0644]
arch/x86/kernel/entry_32.S
arch/x86/kernel/entry_64.S
arch/x86/kernel/ftrace.c
arch/x86/kernel/hpet.c
arch/x86/kernel/hw_breakpoint.c
arch/x86/kernel/i387.c
arch/x86/kernel/irq.c
arch/x86/kernel/irq_work.c [new file with mode: 0644]
arch/x86/kernel/irqinit.c
arch/x86/kernel/jump_label.c [new file with mode: 0644]
arch/x86/kernel/kprobes.c
arch/x86/kernel/machine_kexec_64.c
arch/x86/kernel/module.c
arch/x86/kernel/olpc-xo1.c [new file with mode: 0644]
arch/x86/kernel/olpc.c
arch/x86/kernel/olpc_ofw.c
arch/x86/kernel/paravirt.c
arch/x86/kernel/pci-gart_64.c
arch/x86/kernel/pmtimer_64.c [deleted file]
arch/x86/kernel/process_64.c
arch/x86/kernel/reboot.c
arch/x86/kernel/setup.c
arch/x86/kernel/setup_percpu.c
arch/x86/kernel/sfi.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/sys_i386_32.c
arch/x86/kernel/trampoline.c
arch/x86/kernel/traps.c
arch/x86/kernel/tsc.c
arch/x86/kernel/vmi_32.c [deleted file]
arch/x86/kernel/vmiclock_32.c [deleted file]
arch/x86/kvm/emulate.c
arch/x86/kvm/i8259.c
arch/x86/kvm/irq.h
arch/x86/kvm/lapic.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/lguest/boot.c
arch/x86/lib/memcpy_32.c
arch/x86/lib/memcpy_64.S
arch/x86/lib/memmove_64.c
arch/x86/mm/fault.c
arch/x86/mm/init_32.c
arch/x86/mm/init_64.c
arch/x86/mm/iomap_32.c
arch/x86/mm/k8topology_64.c
arch/x86/mm/kmemcheck/kmemcheck.c
arch/x86/mm/kmemcheck/opcode.c
arch/x86/mm/numa_64.c
arch/x86/mm/pgtable.c
arch/x86/mm/srat_64.c
arch/x86/mm/tlb.c
arch/x86/oprofile/backtrace.c
arch/x86/oprofile/nmi_int.c
arch/x86/pci/olpc.c
arch/x86/xen/mmu.c
arch/x86/xen/time.c
block/blk-cgroup.c
block/blk-core.c
block/blk-map.c
block/blk-merge.c
block/blk-sysfs.c
block/blk.h
block/bsg.c
block/cfq-iosched.c
block/elevator.c
drivers/Makefile
drivers/acpi/Kconfig
drivers/acpi/acpi_pad.c
drivers/acpi/acpica/aclocal.h
drivers/acpi/acpica/exutils.c
drivers/acpi/acpica/rsutils.c
drivers/acpi/apei/Kconfig
drivers/acpi/apei/apei-base.c
drivers/acpi/apei/einj.c
drivers/acpi/apei/erst-dbg.c
drivers/acpi/apei/erst.c
drivers/acpi/apei/ghes.c
drivers/acpi/apei/hest.c
drivers/acpi/atomicio.c
drivers/acpi/battery.c
drivers/acpi/blacklist.c
drivers/acpi/bus.c
drivers/acpi/fan.c
drivers/acpi/processor_core.c
drivers/acpi/processor_driver.c
drivers/acpi/processor_perflib.c
drivers/acpi/sleep.c
drivers/acpi/sysfs.c
drivers/acpi/video_detect.c
drivers/ata/ahci.c
drivers/ata/ahci.h
drivers/ata/ahci_platform.c
drivers/ata/ata_piix.c
drivers/ata/libahci.c
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/ata/libata-sff.c
drivers/ata/pata_artop.c
drivers/ata/pata_via.c
drivers/ata/sata_mv.c
drivers/atm/iphase.c
drivers/atm/iphase.h
drivers/atm/solos-pci.c
drivers/base/power/main.c
drivers/base/topology.c
drivers/block/Kconfig
drivers/block/Makefile
drivers/block/cciss.c
drivers/block/loop.c
drivers/block/mg_disk.c
drivers/block/pktcdvd.c
drivers/block/ps3disk.c
drivers/block/rbd.c [new file with mode: 0644]
drivers/block/rbd_types.h [new file with mode: 0644]
drivers/block/virtio_blk.c
drivers/char/agp/Kconfig
drivers/char/agp/amd64-agp.c
drivers/char/agp/generic.c
drivers/char/agp/intel-agp.c
drivers/char/agp/intel-agp.h
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/mem.c
drivers/char/tpm/tpm.c
drivers/char/virtio_console.c
drivers/char/vt_ioctl.c
drivers/cpuidle/governors/menu.c
drivers/dca/dca-core.c
drivers/dma/ioat/dma_v2.c
drivers/dma/mv_xor.c
drivers/dma/shdma.c
drivers/edac/Kconfig
drivers/edac/Makefile
drivers/edac/amd64_edac.c
drivers/edac/amd64_edac.h
drivers/edac/amd64_edac_dbg.c
drivers/edac/edac_device_sysfs.c
drivers/edac/edac_mc.c
drivers/edac/edac_mc_sysfs.c
drivers/edac/edac_mce_amd.c [deleted file]
drivers/edac/edac_module.c
drivers/edac/edac_module.h
drivers/edac/edac_pci_sysfs.c
drivers/edac/edac_stub.c
drivers/edac/i7core_edac.c
drivers/edac/mce_amd.c [new file with mode: 0644]
drivers/edac/mce_amd.h [moved from drivers/edac/edac_mce_amd.h with 65% similarity]
drivers/edac/mce_amd_inj.c [new file with mode: 0644]
drivers/firewire/ohci.c
drivers/firewire/ohci.h
drivers/firmware/Kconfig
drivers/gpio/sx150x.c
drivers/gpu/drm/drm_buffer.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_info.c
drivers/gpu/drm/drm_pci.c
drivers/gpu/drm/drm_platform.c
drivers/gpu/drm/drm_sysfs.c
drivers/gpu/drm/drm_vm.c
drivers/gpu/drm/i810/i810_dma.c
drivers/gpu/drm/i830/i830_dma.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dvo.c
drivers/gpu/drm/i915/intel_fb.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/radeon/atombios.h
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_blit_kms.c
drivers/gpu/drm/radeon/r600_blit_shaders.h
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_combios.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_cursor.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_fb.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_object.h
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rs690.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_page_alloc.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/vga/vgaarb.c
drivers/hid/hid-cando.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-mosart.c
drivers/hid/hid-topseed.c
drivers/hid/hidraw.c
drivers/hid/usbhid/hid-core.c
drivers/hid/usbhid/hid-quirks.c
drivers/hid/usbhid/hiddev.c
drivers/hid/usbhid/usbhid.h
drivers/hwmon/Kconfig
drivers/hwmon/adm1031.c
drivers/hwmon/coretemp.c
drivers/hwmon/emc1403.c
drivers/hwmon/f71882fg.c
drivers/hwmon/f75375s.c
drivers/hwmon/hp_accel.c
drivers/hwmon/lis3lv02d.c
drivers/hwmon/lis3lv02d_i2c.c
drivers/hwmon/lis3lv02d_spi.c
drivers/hwmon/lm95241.c
drivers/hwmon/pkgtemp.c
drivers/hwmon/w83627ehf.c
drivers/i2c/busses/i2c-cpm.c
drivers/i2c/busses/i2c-davinci.c
drivers/i2c/busses/i2c-ibm_iic.c
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-mpc.c
drivers/i2c/busses/i2c-octeon.c
drivers/i2c/busses/i2c-omap.c
drivers/i2c/busses/i2c-pca-isa.c
drivers/i2c/busses/i2c-pca-platform.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/i2c/i2c-core.c
drivers/ide/ide-probe.c
drivers/idle/intel_idle.c [changed mode: 0755->0644]
drivers/infiniband/hw/cxgb3/cxio_hal.h
drivers/infiniband/hw/cxgb3/iwch_cm.c
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/nes/nes_hw.c
drivers/infiniband/hw/nes/nes_hw.h
drivers/infiniband/hw/nes/nes_nic.c
drivers/input/evdev.c
drivers/input/input.c
drivers/input/joydev.c
drivers/input/misc/hp_sdc_rtc.c
drivers/input/misc/uinput.c
drivers/input/mouse/bcm5974.c
drivers/input/serio/hil_mlc.c
drivers/input/serio/hp_sdc.c
drivers/input/serio/i8042.c
drivers/input/tablet/wacom_sys.c
drivers/input/tablet/wacom_wac.c
drivers/isdn/sc/interrupt.c
drivers/leds/leds-ns2.c
drivers/macintosh/adb.c
drivers/md/bitmap.c
drivers/md/md.c
drivers/md/raid1.c
drivers/media/IR/ir-keytable.c
drivers/media/IR/ir-lirc-codec.c
drivers/media/IR/ir-raw-event.c
drivers/media/IR/ir-sysfs.c
drivers/media/IR/keymaps/rc-rc6-mce.c
drivers/media/IR/mceusb.c
drivers/media/dvb/dvb-usb/dib0700_core.c
drivers/media/dvb/dvb-usb/dib0700_devices.c
drivers/media/dvb/dvb-usb/opera1.c
drivers/media/dvb/frontends/dib7000p.c
drivers/media/dvb/frontends/dib7000p.h
drivers/media/dvb/siano/smscoreapi.c
drivers/media/radio/si470x/radio-si470x-i2c.c
drivers/media/video/cx231xx/Makefile
drivers/media/video/cx231xx/cx231xx-cards.c
drivers/media/video/cx25840/cx25840-core.c
drivers/media/video/cx88/Kconfig
drivers/media/video/gspca/gspca.c
drivers/media/video/gspca/sn9c20x.c
drivers/media/video/ivtv/ivtvfb.c
drivers/media/video/mem2mem_testdev.c
drivers/media/video/mt9m111.c
drivers/media/video/mt9v022.c
drivers/media/video/mx2_camera.c
drivers/media/video/pvrusb2/pvrusb2-ctrl.c
drivers/media/video/s5p-fimc/fimc-core.c
drivers/media/video/saa7134/saa7134-cards.c
drivers/media/video/saa7164/saa7164-buffer.c
drivers/media/video/uvc/uvc_driver.c
drivers/media/video/uvc/uvcvideo.h
drivers/media/video/v4l2-compat-ioctl32.c
drivers/media/video/videobuf-dma-contig.c
drivers/media/video/videobuf-dma-sg.c
drivers/mfd/max8925-core.c
drivers/mfd/wm831x-irq.c
drivers/misc/Kconfig
drivers/misc/Makefile
drivers/misc/bh1780gli.c
drivers/misc/vmw_balloon.c [moved from drivers/misc/vmware_balloon.c with 100% similarity]
drivers/mmc/core/core.c
drivers/mmc/core/sdio.c
drivers/mmc/host/at91_mci.c
drivers/mmc/host/imxmmc.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/s3cmci.c
drivers/mmc/host/sdhci-s3c.c
drivers/mmc/host/tmio_mmc.c
drivers/mmc/host/tmio_mmc.h
drivers/mtd/nand/bf5xx_nand.c
drivers/mtd/nand/mxc_nand.c
drivers/mtd/nand/omap2.c
drivers/mtd/nand/pxa3xx_nand.c
drivers/mtd/onenand/samsung.c
drivers/net/3c527.c
drivers/net/3c59x.c
drivers/net/Kconfig
drivers/net/atlx/atl1.c
drivers/net/b44.c
drivers/net/benet/be.h
drivers/net/benet/be_cmds.c
drivers/net/benet/be_cmds.h
drivers/net/benet/be_ethtool.c
drivers/net/benet/be_hw.h
drivers/net/benet/be_main.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_main.c
drivers/net/cxgb3/cxgb3_main.c
drivers/net/e1000e/hw.h
drivers/net/e1000e/ich8lan.c
drivers/net/e1000e/netdev.c
drivers/net/ehea/ehea_main.c
drivers/net/ehea/ehea_qmr.h
drivers/net/eql.c
drivers/net/fec.c
drivers/net/hamradio/6pack.c
drivers/net/hamradio/mkiss.c
drivers/net/ibm_newemac/core.c
drivers/net/irda/sir_dev.c
drivers/net/ks8851.c
drivers/net/ll_temac_main.c
drivers/net/ll_temac_mdio.c
drivers/net/netxen/netxen_nic_init.c
drivers/net/niu.c
drivers/net/pcmcia/pcnet_cs.c
drivers/net/phy/mdio_bus.c
drivers/net/ppp_async.c
drivers/net/ppp_generic.c
drivers/net/qlcnic/qlcnic_init.c
drivers/net/r8169.c
drivers/net/rionet.c
drivers/net/sgiseeq.c
drivers/net/skge.c
drivers/net/smsc911x.c
drivers/net/stmmac/stmmac_main.c
drivers/net/tg3.c
drivers/net/tg3.h
drivers/net/tulip/de2104x.c
drivers/net/usb/hso.c
drivers/net/usb/ipheth.c
drivers/net/via-velocity.c
drivers/net/wan/cosa.c
drivers/net/wimax/i2400m/rx.c
drivers/net/wireless/ath/ath9k/ani.c
drivers/net/wireless/iwlwifi/iwl-agn-lib.c
drivers/net/wireless/iwlwifi/iwl-core.c
drivers/net/wireless/iwlwifi/iwl3945-base.c
drivers/oprofile/buffer_sync.c
drivers/oprofile/cpu_buffer.c
drivers/oprofile/oprof.c
drivers/oprofile/oprof.h
drivers/oprofile/oprofile_files.c
drivers/oprofile/oprofile_perf.c [new file with mode: 0644]
drivers/oprofile/oprofilefs.c
drivers/parport/share.c
drivers/pci/intel-iommu.c
drivers/pci/iov.c
drivers/pci/pci.h
drivers/pci/quirks.c
drivers/pcmcia/pcmcia_resource.c
drivers/pcmcia/pd6729.c
drivers/platform/x86/intel_ips.c
drivers/platform/x86/thinkpad_acpi.c
drivers/power/apm_power.c
drivers/power/intel_mid_battery.c
drivers/regulator/88pm8607.c
drivers/regulator/ab3100.c
drivers/regulator/ab8500.c
drivers/regulator/ad5398.c
drivers/regulator/core.c
drivers/regulator/isl6271a-regulator.c
drivers/regulator/max1586.c
drivers/regulator/max8649.c
drivers/regulator/max8998.c
drivers/regulator/tps6507x-regulator.c
drivers/regulator/tps6586x-regulator.c
drivers/regulator/wm831x-ldo.c
drivers/regulator/wm8350-regulator.c
drivers/rtc/rtc-ab3100.c
drivers/rtc/rtc-bfin.c
drivers/rtc/rtc-ds3232.c
drivers/rtc/rtc-m41t80.c
drivers/rtc/rtc-pl031.c
drivers/rtc/rtc-s3c.c
drivers/s390/char/tape_block.c
drivers/s390/net/ctcm_main.c
drivers/scsi/be2iscsi/be_iscsi.c
drivers/scsi/be2iscsi/be_mgmt.c
drivers/scsi/constants.c
drivers/scsi/hpsa.c
drivers/scsi/osd/osd_initiator.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_dbg.h
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_mid.c
drivers/scsi/qla2xxx/qla_nx.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_version.h
drivers/scsi/scsi.c
drivers/scsi/scsi_lib.c
drivers/scsi/sd.c
drivers/scsi/sym53c8xx_2/sym_hipd.c
drivers/serial/amba-pl010.c
drivers/serial/ioc3_serial.c
drivers/serial/mfd.c
drivers/serial/mpc52xx_uart.c
drivers/serial/mrst_max3110.c
drivers/serial/serial_cs.c
drivers/spi/amba-pl022.c
drivers/spi/dw_spi.c
drivers/spi/spi.c
drivers/spi/spi_gpio.c
drivers/spi/spi_mpc8xxx.c
drivers/spi/spi_s3c64xx.c
drivers/staging/batman-adv/hard-interface.c
drivers/staging/batman-adv/send.c
drivers/staging/ti-st/st.h
drivers/staging/ti-st/st_core.c
drivers/staging/ti-st/st_core.h
drivers/staging/ti-st/st_kim.c
drivers/staging/tm6000/Kconfig
drivers/staging/tm6000/tm6000-input.c
drivers/staging/vt6655/wpactl.c
drivers/usb/core/Kconfig
drivers/usb/core/file.c
drivers/usb/core/message.c
drivers/usb/host/ehci-pci.c
drivers/usb/musb/cppi_dma.c
drivers/usb/musb/musb_debugfs.c
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/musb_gadget.h
drivers/usb/musb/musb_gadget_ep0.c
drivers/usb/musb/musb_host.c
drivers/usb/otg/twl4030-usb.c
drivers/usb/serial/mos7720.c
drivers/usb/serial/mos7840.c
drivers/vhost/net.c
drivers/vhost/vhost.c
drivers/vhost/vhost.h
drivers/video/console/fbcon.c
drivers/video/efifb.c
drivers/video/pxa168fb.c
drivers/video/sis/sis_main.c
drivers/video/via/ioctl.c
drivers/watchdog/Kconfig
drivers/watchdog/sb_wdog.c
drivers/watchdog/ts72xx_wdt.c
drivers/xen/xenbus/xenbus_probe.c
fs/9p/vfs_dir.c
fs/9p/vfs_inode.c
fs/9p/vfs_super.c
fs/affs/super.c
fs/aio.c
fs/binfmt_aout.c
fs/binfmt_misc.c
fs/bio-integrity.c
fs/ceph/Kconfig
fs/ceph/Makefile
fs/ceph/README [deleted file]
fs/ceph/addr.c
fs/ceph/caps.c
fs/ceph/ceph_frag.c
fs/ceph/debugfs.c
fs/ceph/dir.c
fs/ceph/export.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/ioctl.c
fs/ceph/ioctl.h
fs/ceph/locks.c
fs/ceph/mds_client.c
fs/ceph/mds_client.h
fs/ceph/mdsmap.c
fs/ceph/pagelist.c [deleted file]
fs/ceph/snap.c
fs/ceph/strings.c [moved from fs/ceph/ceph_strings.c with 59% similarity]
fs/ceph/super.c
fs/ceph/super.h
fs/ceph/xattr.c
fs/char_dev.c
fs/cifs/Kconfig
fs/cifs/asn1.c
fs/cifs/cifsencrypt.c
fs/cifs/cifsglob.h
fs/cifs/cifspdu.h
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/inode.c
fs/cifs/netmisc.c
fs/cifs/ntlmssp.h
fs/cifs/sess.c
fs/cifs/transport.c
fs/coda/psdev.c
fs/compat.c
fs/direct-io.c
fs/exec.c
fs/exofs/inode.c
fs/fcntl.c
fs/fs-writeback.c
fs/fuse/dev.c
fs/fuse/file.c
fs/gfs2/Kconfig
fs/gfs2/aops.c
fs/gfs2/bmap.c
fs/gfs2/bmap.h
fs/gfs2/dentry.c
fs/gfs2/dir.c
fs/gfs2/dir.h
fs/gfs2/export.c
fs/gfs2/file.c
fs/gfs2/glock.c
fs/gfs2/glock.h
fs/gfs2/glops.c
fs/gfs2/incore.h
fs/gfs2/inode.c
fs/gfs2/inode.h
fs/gfs2/lock_dlm.c
fs/gfs2/log.c
fs/gfs2/main.c
fs/gfs2/ops_fstype.c
fs/gfs2/ops_inode.c
fs/gfs2/quota.c
fs/gfs2/recovery.c
fs/gfs2/rgrp.c
fs/gfs2/rgrp.h
fs/gfs2/super.c
fs/gfs2/sys.c
fs/gfs2/trace_gfs2.h
fs/gfs2/trans.h
fs/gfs2/xattr.c
fs/hfs/bfind.c
fs/hfs/btree.c
fs/hfs/btree.h
fs/hfsplus/bfind.c
fs/hfsplus/bitmap.c
fs/hfsplus/brec.c
fs/hfsplus/btree.c
fs/hfsplus/catalog.c
fs/hfsplus/dir.c
fs/hfsplus/extents.c
fs/hfsplus/hfsplus_fs.h
fs/hfsplus/hfsplus_raw.h
fs/hfsplus/inode.c
fs/hfsplus/ioctl.c
fs/hfsplus/options.c
fs/hfsplus/part_tbl.c
fs/hfsplus/super.c
fs/hfsplus/unicode.c
fs/hfsplus/wrapper.c
fs/minix/namei.c
fs/nfs/Kconfig
fs/nfs/client.c
fs/nfs/file.c
fs/nfs/super.c
fs/nfsd/Kconfig
fs/nfsd/nfs4state.c
fs/nfsd/nfsfh.h
fs/notify/Kconfig
fs/ocfs2/acl.c
fs/ocfs2/alloc.c
fs/ocfs2/blockcheck.c
fs/ocfs2/cluster/tcp.c
fs/ocfs2/dir.c
fs/ocfs2/dlm/dlmcommon.h
fs/ocfs2/dlm/dlmdebug.c
fs/ocfs2/dlm/dlmdomain.c
fs/ocfs2/dlm/dlmmaster.c
fs/ocfs2/dlmglue.h
fs/ocfs2/file.c
fs/ocfs2/inode.c
fs/ocfs2/mmap.c
fs/ocfs2/namei.c
fs/ocfs2/ocfs2_fs.h
fs/ocfs2/ocfs2_ioctl.h
fs/ocfs2/refcounttree.c
fs/ocfs2/reservations.c
fs/ocfs2/suballoc.c
fs/ocfs2/suballoc.h
fs/ocfs2/symlink.c
fs/ocfs2/xattr.c
fs/proc/base.c
fs/proc/page.c
fs/proc/task_mmu.c
fs/proc/vmcore.c
fs/reiserfs/ioctl.c
fs/xfs/linux-2.6/xfs_buf.c
fs/xfs/linux-2.6/xfs_ioctl.c
fs/xfs/linux-2.6/xfs_sync.c
fs/xfs/xfs_log_cil.c
fs/xfs/xfs_log_priv.h
include/acpi/acpixf.h
include/asm-generic/gpio.h
include/asm-generic/hardirq.h
include/asm-generic/pgtable.h
include/asm-generic/vmlinux.lds.h
include/drm/drmP.h
include/drm/drm_crtc.h
include/drm/drm_pciids.h
include/drm/ttm/ttm_bo_api.h
include/linux/Kbuild
include/linux/acpi_pmtmr.h
include/linux/ceph/auth.h [moved from fs/ceph/auth.h with 97% similarity]
include/linux/ceph/buffer.h [moved from fs/ceph/buffer.h with 100% similarity]
include/linux/ceph/ceph_debug.h [moved from fs/ceph/ceph_debug.h with 86% similarity]
include/linux/ceph/ceph_frag.h [moved from fs/ceph/ceph_frag.h with 100% similarity]
include/linux/ceph/ceph_fs.h [moved from fs/ceph/ceph_fs.h with 99% similarity]
include/linux/ceph/ceph_hash.h [moved from fs/ceph/ceph_hash.h with 100% similarity]
include/linux/ceph/debugfs.h [new file with mode: 0644]
include/linux/ceph/decode.h [moved from fs/ceph/decode.h with 96% similarity]
include/linux/ceph/libceph.h [new file with mode: 0644]
include/linux/ceph/mdsmap.h [moved from fs/ceph/mdsmap.h with 100% similarity]
include/linux/ceph/messenger.h [moved from fs/ceph/messenger.h with 95% similarity]
include/linux/ceph/mon_client.h [moved from fs/ceph/mon_client.h with 99% similarity]
include/linux/ceph/msgpool.h [moved from fs/ceph/msgpool.h with 100% similarity]
include/linux/ceph/msgr.h [moved from fs/ceph/msgr.h with 100% similarity]
include/linux/ceph/osd_client.h [moved from fs/ceph/osd_client.h with 76% similarity]
include/linux/ceph/osdmap.h [moved from fs/ceph/osdmap.h with 97% similarity]
include/linux/ceph/pagelist.h [moved from fs/ceph/pagelist.h with 62% similarity]
include/linux/ceph/rados.h [moved from fs/ceph/rados.h with 100% similarity]
include/linux/ceph/types.h [moved from fs/ceph/types.h with 100% similarity]
include/linux/cgroup.h
include/linux/compat.h
include/linux/compiler.h
include/linux/coredump.h
include/linux/cpuidle.h
include/linux/cred.h
include/linux/crush/crush.h [moved from fs/ceph/crush/crush.h with 100% similarity]
include/linux/crush/hash.h [moved from fs/ceph/crush/hash.h with 100% similarity]
include/linux/crush/mapper.h [moved from fs/ceph/crush/mapper.h with 100% similarity]
include/linux/debug_locks.h
include/linux/dma-mapping.h
include/linux/dmaengine.h
include/linux/dynamic_debug.h
include/linux/edac.h
include/linux/elevator.h
include/linux/fdtable.h
include/linux/fs.h
include/linux/ftrace_event.h
include/linux/genhd.h
include/linux/gpio.h
include/linux/hardirq.h
include/linux/i2c/sx150x.h
include/linux/idr.h
include/linux/init_task.h
include/linux/input.h
include/linux/interrupt.h
include/linux/io-mapping.h
include/linux/iocontext.h
include/linux/irq_work.h [new file with mode: 0644]
include/linux/jump_label.h [new file with mode: 0644]
include/linux/jump_label_ref.h [new file with mode: 0644]
include/linux/kernel.h
include/linux/key.h
include/linux/kfifo.h
include/linux/ksm.h
include/linux/kvm_host.h
include/linux/lglock.h
include/linux/libata.h
include/linux/lockdep.h
include/linux/mm.h
include/linux/mm_types.h
include/linux/mmc/sdio.h
include/linux/mmzone.h
include/linux/module.h
include/linux/mutex.h
include/linux/netfilter/nfnetlink_conntrack.h
include/linux/netfilter/xt_SECMARK.h
include/linux/netlink.h
include/linux/netpoll.h
include/linux/nfs_fs.h
include/linux/notifier.h
include/linux/oprofile.h
include/linux/pci_ids.h
include/linux/percpu-defs.h
include/linux/percpu.h
include/linux/perf_event.h
include/linux/quotaops.h
include/linux/radix-tree.h
include/linux/rculist.h
include/linux/rculist_nulls.h
include/linux/rcupdate.h
include/linux/rcutiny.h
include/linux/rcutree.h
include/linux/sched.h
include/linux/security.h
include/linux/selinux.h
include/linux/semaphore.h
include/linux/socket.h
include/linux/spi/dw_spi.h
include/linux/srcu.h
include/linux/stop_machine.h
include/linux/sunrpc/auth_gss.h
include/linux/sunrpc/clnt.h
include/linux/swap.h
include/linux/thread_info.h
include/linux/topology.h
include/linux/tracepoint.h
include/linux/types.h
include/linux/vmstat.h
include/linux/wait.h
include/linux/workqueue.h
include/media/videobuf-dma-sg.h
include/net/addrconf.h
include/net/bluetooth/bluetooth.h
include/net/cls_cgroup.h
include/net/dst.h
include/net/ip_vs.h
include/net/netfilter/nf_conntrack.h
include/net/route.h
include/net/sock.h
include/net/tcp.h
include/net/udp.h
include/net/xfrm.h
include/trace/events/irq.h
include/trace/events/napi.h
include/trace/events/net.h [new file with mode: 0644]
include/trace/events/power.h
include/trace/events/sched.h
include/trace/events/skb.h
init/Kconfig
ipc/sem.c
kernel/Makefile
kernel/cgroup.c
kernel/compat.c
kernel/cpuset.c
kernel/debug/kdb/kdb_bp.c
kernel/exit.c
kernel/fork.c
kernel/futex.c
kernel/futex_compat.c
kernel/gcov/fs.c
kernel/groups.c
kernel/hrtimer.c
kernel/hung_task.c
kernel/hw_breakpoint.c
kernel/irq_work.c [new file with mode: 0644]
kernel/jump_label.c [new file with mode: 0644]
kernel/kfifo.c
kernel/kprobes.c
kernel/lockdep.c
kernel/module.c
kernel/mutex.c
kernel/perf_event.c
kernel/pid.c
kernel/pm_qos_params.c
kernel/power/hibernate.c
kernel/power/snapshot.c
kernel/power/swap.c
kernel/printk.c
kernel/rcupdate.c
kernel/rcutiny.c
kernel/rcutiny_plugin.h
kernel/rcutorture.c
kernel/rcutree.c
kernel/rcutree.h
kernel/rcutree_plugin.h
kernel/rcutree_trace.c
kernel/sched.c
kernel/sched_fair.c
kernel/sched_features.h
kernel/sched_rt.c
kernel/sched_stoptask.c [new file with mode: 0644]
kernel/signal.c
kernel/smp.c
kernel/softirq.c
kernel/srcu.c
kernel/stop_machine.c
kernel/sys.c
kernel/sysctl.c
kernel/sysctl_check.c
kernel/test_kprobes.c
kernel/timer.c
kernel/trace/Kconfig
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_event_perf.c
kernel/trace/trace_events.c
kernel/trace/trace_functions_graph.c
kernel/trace/trace_irqsoff.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_sched_wakeup.c
kernel/trace/trace_workqueue.c
kernel/tracepoint.c
kernel/watchdog.c
kernel/workqueue.c
lib/Kconfig.debug
lib/bug.c
lib/dynamic_debug.c
lib/list_sort.c
lib/radix-tree.c
lib/scatterlist.c
lib/swiotlb.c
mm/Kconfig
mm/backing-dev.c
mm/bounce.c
mm/compaction.c
mm/fremap.c
mm/hugetlb.c
mm/ksm.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory.c
mm/memory_hotplug.c
mm/mlock.c
mm/mmap.c
mm/mmzone.c
mm/oom_kill.c
mm/page_alloc.c
mm/percpu.c
mm/rmap.c
mm/swapfile.c
mm/vmalloc.c
mm/vmscan.c
mm/vmstat.c
net/8021q/vlan_core.c
net/9p/client.c
net/9p/trans_rdma.c
net/9p/trans_virtio.c
net/Kconfig
net/Makefile
net/atm/br2684.c
net/atm/mpc.c
net/bluetooth/l2cap.c
net/bluetooth/rfcomm/sock.c
net/caif/caif_socket.c
net/ceph/Kconfig [new file with mode: 0644]
net/ceph/Makefile [new file with mode: 0644]
net/ceph/armor.c [moved from fs/ceph/armor.c with 100% similarity]
net/ceph/auth.c [moved from fs/ceph/auth.c with 97% similarity]
net/ceph/auth_none.c [moved from fs/ceph/auth_none.c with 96% similarity]
net/ceph/auth_none.h [moved from fs/ceph/auth_none.h with 94% similarity]
net/ceph/auth_x.c [moved from fs/ceph/auth_x.c with 99% similarity]
net/ceph/auth_x.h [moved from fs/ceph/auth_x.h with 96% similarity]
net/ceph/auth_x_protocol.h [moved from fs/ceph/auth_x_protocol.h with 100% similarity]
net/ceph/buffer.c [moved from fs/ceph/buffer.c with 86% similarity]
net/ceph/ceph_common.c [new file with mode: 0644]
net/ceph/ceph_fs.c [moved from fs/ceph/ceph_fs.c with 92% similarity]
net/ceph/ceph_hash.c [moved from fs/ceph/ceph_hash.c with 98% similarity]
net/ceph/ceph_strings.c [new file with mode: 0644]
net/ceph/crush/crush.c [moved from fs/ceph/crush/crush.c with 99% similarity]
net/ceph/crush/hash.c [moved from fs/ceph/crush/hash.c with 99% similarity]
net/ceph/crush/mapper.c [moved from fs/ceph/crush/mapper.c with 99% similarity]
net/ceph/crypto.c [moved from fs/ceph/crypto.c with 99% similarity]
net/ceph/crypto.h [moved from fs/ceph/crypto.h with 95% similarity]
net/ceph/debugfs.c [new file with mode: 0644]
net/ceph/messenger.c [moved from fs/ceph/messenger.c with 89% similarity]
net/ceph/mon_client.c [moved from fs/ceph/mon_client.c with 94% similarity]
net/ceph/msgpool.c [moved from fs/ceph/msgpool.c with 95% similarity]
net/ceph/osd_client.c [moved from fs/ceph/osd_client.c with 84% similarity]
net/ceph/osdmap.c [moved from fs/ceph/osdmap.c with 97% similarity]
net/ceph/pagelist.c [new file with mode: 0644]
net/ceph/pagevec.c [new file with mode: 0644]
net/core/datagram.c
net/core/dev.c
net/core/ethtool.c
net/core/iovec.c
net/core/net-traces.c
net/core/skbuff.c
net/core/sock.c
net/core/stream.c
net/ipv4/Kconfig
net/ipv4/datagram.c
net/ipv4/fib_frontend.c
net/ipv4/fib_trie.c
net/ipv4/igmp.c
net/ipv4/ip_gre.c
net/ipv4/ip_output.c
net/ipv4/ip_sockglue.c
net/ipv4/netfilter/ipt_REJECT.c
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
net/ipv4/netfilter/nf_defrag_ipv4.c
net/ipv4/netfilter/nf_nat_core.c
net/ipv4/netfilter/nf_nat_snmp_basic.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_timer.c
net/ipv4/udp.c
net/ipv4/xfrm4_policy.c
net/ipv4/xfrm4_state.c
net/ipv6/addrconf.c
net/ipv6/addrlabel.c
net/ipv6/datagram.c
net/ipv6/ip6_output.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/reassembly.c
net/ipv6/route.c
net/ipv6/udp.c
net/ipv6/xfrm6_state.c
net/irda/irlan/irlan_common.c
net/llc/af_llc.c
net/llc/llc_station.c
net/mac80211/agg-tx.c
net/mac80211/rx.c
net/mac80211/status.c
net/netfilter/core.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_ftp.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_conntrack_ecache.c
net/netfilter/nf_conntrack_extend.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_proto.c
net/netfilter/nf_conntrack_sip.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nf_log.c
net/netfilter/nf_queue.c
net/netfilter/nf_tproxy_core.c
net/netfilter/xt_CT.c
net/netfilter/xt_SECMARK.c
net/phonet/pep.c
net/rds/page.c
net/rds/tcp_connect.c
net/rds/tcp_listen.c
net/rds/tcp_recv.c
net/rds/tcp_send.c
net/rose/af_rose.c
net/sched/cls_cgroup.c
net/sched/cls_u32.c
net/sched/sch_atm.c
net/sctp/auth.c
net/sctp/output.c
net/sctp/sm_statefuns.c
net/sctp/socket.c
net/sunrpc/auth.c
net/sunrpc/auth_gss/auth_gss.c
net/sunrpc/auth_gss/gss_krb5_mech.c
net/sunrpc/auth_gss/gss_spkm3_mech.c
net/sunrpc/clnt.c
net/sunrpc/rpc_pipe.c
net/sunrpc/xprtsock.c
net/unix/af_unix.c
net/wireless/wext-priv.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
samples/kfifo/dma-example.c
scripts/Makefile
scripts/Makefile.build
scripts/Makefile.lib
scripts/basic/Makefile
scripts/basic/docproc.c
scripts/basic/hash.c [deleted file]
scripts/gcc-goto.sh [new file with mode: 0644]
scripts/kconfig/conf.c
scripts/kconfig/expr.h
scripts/kconfig/menu.c
scripts/kconfig/symbol.c
scripts/kernel-doc
scripts/recordmcount.c [new file with mode: 0644]
scripts/recordmcount.h [new file with mode: 0644]
security/apparmor/.gitignore
security/apparmor/apparmorfs.c
security/apparmor/include/resource.h
security/apparmor/lib.c
security/apparmor/lsm.c
security/apparmor/path.c
security/apparmor/policy.c
security/apparmor/resource.c
security/capability.c
security/commoncap.c
security/integrity/ima/ima.h
security/integrity/ima/ima_iint.c
security/integrity/ima/ima_main.c
security/keys/keyctl.c
security/security.c
security/selinux/Makefile
security/selinux/exports.c
security/selinux/hooks.c
security/selinux/include/classmap.h
security/selinux/include/security.h
security/selinux/selinuxfs.c
security/selinux/ss/Makefile [deleted file]
security/selinux/ss/avtab.c
security/selinux/ss/avtab.h
security/selinux/ss/conditional.c
security/selinux/ss/conditional.h
security/selinux/ss/ebitmap.c
security/selinux/ss/ebitmap.h
security/selinux/ss/policydb.c
security/selinux/ss/policydb.h
security/selinux/ss/services.c
security/selinux/ss/status.c [new file with mode: 0644]
security/smack/smack_lsm.c
security/tomoyo/common.c
security/tomoyo/common.h
sound/core/control.c
sound/core/pcm.c
sound/core/pcm_native.c
sound/core/rawmidi.c
sound/core/seq/oss/seq_oss_init.c
sound/i2c/other/ak4xxx-adda.c
sound/isa/msnd/msnd_pinnacle.c
sound/oss/soundcard.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_analog.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_nvhdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/pci/oxygen/oxygen.c
sound/pci/oxygen/oxygen.h
sound/pci/oxygen/oxygen_lib.c
sound/pci/oxygen/virtuoso.c
sound/pci/oxygen/xonar_wm87x6.c
sound/pci/rme9652/hdsp.c
sound/pci/rme9652/hdspm.c
sound/ppc/snd_ps3.c
sound/soc/s3c24xx/s3c-dma.c
sound/soc/sh/migor.c
sound/soc/soc-cache.c
sound/usb/card.c
sound/usb/clock.c
sound/usb/endpoint.c
sound/usb/format.c
sound/usb/mixer.c
sound/usb/pcm.c
tools/perf/Documentation/perf-annotate.txt
tools/perf/Documentation/perf-report.txt
tools/perf/Makefile
tools/perf/builtin-annotate.c
tools/perf/builtin-report.c
tools/perf/feature-tests.mak
tools/perf/perf.h
tools/perf/scripts/python/bin/netdev-times-record [new file with mode: 0644]
tools/perf/scripts/python/bin/netdev-times-report [new file with mode: 0644]
tools/perf/scripts/python/netdev-times.py [new file with mode: 0644]
tools/perf/util/cache.h
tools/perf/util/callchain.c
tools/perf/util/callchain.h
tools/perf/util/hist.c
tools/perf/util/path.c
tools/perf/util/probe-event.c
tools/perf/util/probe-finder.c
tools/perf/util/sort.h
tools/perf/util/symbol.c
tools/perf/util/symbol.h
tools/perf/util/trace-event-scripting.c
tools/perf/util/ui/browser.c
tools/perf/util/ui/browser.h
tools/perf/util/ui/browsers/annotate.c
tools/perf/util/ui/browsers/hists.c
tools/perf/util/ui/browsers/map.c
tools/perf/util/ui/util.c
tools/perf/util/util.h
virt/kvm/eventfd.c
virt/kvm/kvm_main.c

diff --git a/CREDITS b/CREDITS
index 72b487869788c14cd40e9535b700f4be166aa12b..41d8e63d5165b5b786db6ab7d8c14fbc49fc0107 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -3554,12 +3554,12 @@ E: cvance@nai.com
 D: portions of the Linux Security Module (LSM) framework and security modules
 
 N: Petr Vandrovec
-E: vandrove@vc.cvut.cz
+E: petr@vandrovec.name
 D: Small contributions to ncpfs
 D: Matrox framebuffer driver
-S: Chudenicka 8
-S: 10200 Prague 10, Hostivar
-S: Czech Republic
+S: 21513 Conradia Ct
+S: Cupertino, CA 95014
+S: USA
 
 N: Thibaut Varene
 E: T-Bone@parisc-linux.org
index ecd35e9d4410a2b8c28241331f4f96f5197b6c3a..feca0758391e145a8fb76e5a7059ce7f057e79ab 100644 (file)
@@ -46,7 +46,6 @@
 
      <sect1><title>Atomic and pointer manipulation</title>
 !Iarch/x86/include/asm/atomic.h
-!Iarch/x86/include/asm/unaligned.h
      </sect1>
 
      <sect1><title>Delaying, scheduling, and timer routines</title>
index a20c6f6fffc32aabb214c24ff9f53d477a265f69..6899f471fb152ebe7b405bcf48e4ff0b3f3aef93 100644 (file)
@@ -57,7 +57,6 @@
      </para>
 
      <sect1><title>String Conversions</title>
-!Ilib/vsprintf.c
 !Elib/vsprintf.c
      </sect1>
      <sect1><title>String Manipulation</title>
index 0b1a3f97f285361a4075c8e267d42b2053747d9a..f66f4df186908f5d6ba79171e303949683107a1e 100644 (file)
@@ -1645,7 +1645,9 @@ the amount of locking which needs to be done.
       all the readers who were traversing the list when we deleted the
       element are finished.  We use <function>call_rcu()</function> to
       register a callback which will actually destroy the object once
-      the readers are finished.
+      all pre-existing readers are finished.  Alternatively,
+      <function>synchronize_rcu()</function> may be used to block until
+      all pre-existing are finished.
     </para>
     <para>
       But how does Read Copy Update know when the readers are
@@ -1714,7 +1716,7 @@ the amount of locking which needs to be done.
 -        object_put(obj);
 +        list_del_rcu(&amp;obj-&gt;list);
          cache_num--;
-+        call_rcu(&amp;obj-&gt;rcu, cache_delete_rcu, obj);
++        call_rcu(&amp;obj-&gt;rcu, cache_delete_rcu);
  }
 
  /* Must be holding cache_lock */
@@ -1725,14 +1727,6 @@ the amount of locking which needs to be done.
          if (++cache_num > MAX_CACHE_SIZE) {
                  struct object *i, *outcast = NULL;
                  list_for_each_entry(i, &amp;cache, list) {
-@@ -85,6 +94,7 @@
-         obj-&gt;popularity = 0;
-         atomic_set(&amp;obj-&gt;refcnt, 1); /* The cache holds a reference */
-         spin_lock_init(&amp;obj-&gt;lock);
-+        INIT_RCU_HEAD(&amp;obj-&gt;rcu);
-
-         spin_lock_irqsave(&amp;cache_lock, flags);
-         __cache_add(obj);
 @@ -104,12 +114,11 @@
  struct object *cache_find(int id)
  {
@@ -1961,6 +1955,12 @@ machines due to caching.
    </sect1>
   </chapter>
 
+  <chapter id="apiref">
+   <title>Mutex API reference</title>
+!Iinclude/linux/mutex.h
+!Ekernel/mutex.c
+  </chapter>
+
   <chapter id="references">
    <title>Further reading</title>
 
index 790d1a8123760211bdcb6427b75c1b4abf2b7210..0c134f8afc6f60b1316b9551577179f1b6dc3961 100644 (file)
@@ -218,13 +218,22 @@ over a rather long period of time, but improvements are always welcome!
        include:
 
        a.      Keeping a count of the number of data-structure elements
-               used by the RCU-protected data structure, including those
-               waiting for a grace period to elapse.  Enforce a limit
-               on this number, stalling updates as needed to allow
-               previously deferred frees to complete.
-
-               Alternatively, limit only the number awaiting deferred
-               free rather than the total number of elements.
+               used by the RCU-protected data structure, including
+               those waiting for a grace period to elapse.  Enforce a
+               limit on this number, stalling updates as needed to allow
+               previously deferred frees to complete.  Alternatively,
+               limit only the number awaiting deferred free rather than
+               the total number of elements.
+
+               One way to stall the updates is to acquire the update-side
+               mutex.  (Don't try this with a spinlock -- other CPUs
+               spinning on the lock could prevent the grace period
+               from ever ending.)  Another way to stall the updates
+               is for the updates to use a wrapper function around
+               the memory allocator, so that this wrapper function
+               simulates OOM when there is too much memory awaiting an
+               RCU grace period.  There are of course many other
+               variations on this theme.
 
        b.      Limiting update rate.  For example, if updates occur only
                once per hour, then no explicit rate limiting is required,
@@ -365,3 +374,26 @@ over a rather long period of time, but improvements are always welcome!
        and the compiler to freely reorder code into and out of RCU
        read-side critical sections.  It is the responsibility of the
        RCU update-side primitives to deal with this.
+
+17.    Use CONFIG_PROVE_RCU, CONFIG_DEBUG_OBJECTS_RCU_HEAD, and
+       the __rcu sparse checks to validate your RCU code.  These
+       can help find problems as follows:
+
+       CONFIG_PROVE_RCU: check that accesses to RCU-protected data
+               structures are carried out under the proper RCU
+               read-side critical section, while holding the right
+               combination of locks, or whatever other conditions
+               are appropriate.
+
+       CONFIG_DEBUG_OBJECTS_RCU_HEAD: check that you don't pass the
+               same object to call_rcu() (or friends) before an RCU
+               grace period has elapsed since the last time that you
+               passed that same object to call_rcu() (or friends).
+
+       __rcu sparse checks: tag the pointer to the RCU-protected data
+               structure with __rcu, and sparse will warn you if you
+               access that pointer without the services of one of the
+               variants of rcu_dereference().
+
+       These debugging aids can help you find problems that are
+       otherwise extremely difficult to spot.
index 44c6dcc93d6dad8e9cee2cadb9f49fa992c5eb00..862c08ef1fde4436ddac8010ba4a1aae40328719 100644 (file)
@@ -80,6 +80,24 @@ o    A CPU looping with bottom halves disabled.  This condition can
 o      For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the kernel
        without invoking schedule().
 
+o      A CPU-bound real-time task in a CONFIG_PREEMPT kernel, which might
+       happen to preempt a low-priority task in the middle of an RCU
+       read-side critical section.   This is especially damaging if
+       that low-priority task is not permitted to run on any other CPU,
+       in which case the next RCU grace period can never complete, which
+       will eventually cause the system to run out of memory and hang.
+       While the system is in the process of running itself out of
+       memory, you might see stall-warning messages.
+
+o      A CPU-bound real-time task in a CONFIG_PREEMPT_RT kernel that
+       is running at a higher priority than the RCU softirq threads.
+       This will prevent RCU callbacks from ever being invoked,
+       and in a CONFIG_TREE_PREEMPT_RCU kernel will further prevent
+       RCU grace periods from ever completing.  Either way, the
+       system will eventually run out of memory and hang.  In the
+       CONFIG_TREE_PREEMPT_RCU case, you might see stall-warning
+       messages.
+
 o      A bug in the RCU implementation.
 
 o      A hardware failure.  This is quite unlikely, but has occurred
index efd8cc95c06b1470db165a74a0d99fd33ee17dea..a851118775d84c7a1d2356ba6a6c8e6208292887 100644 (file)
@@ -125,6 +125,17 @@ o  "b" is the batch limit for this CPU.  If more than this number
        of RCU callbacks is ready to invoke, then the remainder will
        be deferred.
 
+o      "ci" is the number of RCU callbacks that have been invoked for
+       this CPU.  Note that ci+ql is the number of callbacks that have
+       been registered in absence of CPU-hotplug activity.
+
+o      "co" is the number of RCU callbacks that have been orphaned due to
+       this CPU going offline.
+
+o      "ca" is the number of RCU callbacks that have been adopted due to
+       other CPUs going offline.  Note that ci+co-ca+ql is the number of
+       RCU callbacks registered on this CPU.
+
 There is also an rcu/rcudata.csv file with the same information in
 comma-separated-variable spreadsheet format.
 
@@ -180,7 +191,7 @@ o   "s" is the "signaled" state that drives force_quiescent_state()'s
 
 o      "jfq" is the number of jiffies remaining for this grace period
        before force_quiescent_state() is invoked to help push things
-       along.  Note that CPUs in dyntick-idle mode thoughout the grace
+       along.  Note that CPUs in dyntick-idle mode throughout the grace
        period will not report on their own, but rather must be check by
        some other CPU via force_quiescent_state().
 
diff --git a/Documentation/block/cfq-iosched.txt b/Documentation/block/cfq-iosched.txt
new file mode 100644 (file)
index 0000000..e578fee
--- /dev/null
@@ -0,0 +1,45 @@
+CFQ ioscheduler tunables
+========================
+
+slice_idle
+----------
+This specifies how long CFQ should idle for next request on certain cfq queues
+(for sequential workloads) and service trees (for random workloads) before
+queue is expired and CFQ selects next queue to dispatch from.
+
+By default slice_idle is a non-zero value. That means by default we idle on
+queues/service trees. This can be very helpful on highly seeky media like
+single spindle SATA/SAS disks where we can cut down on overall number of
+seeks and see improved throughput.
+
+Setting slice_idle to 0 will remove all the idling on queues/service tree
+level and one should see an overall improved throughput on faster storage
+devices like multiple SATA/SAS disks in hardware RAID configuration. The down
+side is that isolation provided from WRITES also goes down and notion of
+IO priority becomes weaker.
+
+So depending on storage and workload, it might be useful to set slice_idle=0.
+In general I think for SATA/SAS disks and software RAID of SATA/SAS disks
+keeping slice_idle enabled should be useful. For any configurations where
+there are multiple spindles behind single LUN (Host based hardware RAID
+controller or for storage arrays), setting slice_idle=0 might end up in better
+throughput and acceptable latencies.
+
+CFQ IOPS Mode for group scheduling
+===================================
+Basic CFQ design is to provide priority based time slices. Higher priority
+process gets bigger time slice and lower priority process gets smaller time
+slice. Measuring time becomes harder if storage is fast and supports NCQ and
+it would be better to dispatch multiple requests from multiple cfq queues in
+request queue at a time. In such scenario, it is not possible to measure time
+consumed by single queue accurately.
+
+What is possible though is to measure number of requests dispatched from a
+single queue and also allow dispatch from multiple cfq queue at the same time.
+This effectively becomes the fairness in terms of IOPS (IO operations per
+second).
+
+If one sets slice_idle=0 and if storage supports NCQ, CFQ internally switches
+to IOPS mode and starts providing fairness in terms of number of requests
+dispatched. Note that this mode switching takes effect only for group
+scheduling. For non-cgroup users nothing should change.
index 48e0b21b00594dac1971472bef9105e04d2bfa77..6919d62591d97580d3132f539e0edd50d9985449 100644 (file)
@@ -217,6 +217,7 @@ Details of cgroup files
 CFQ sysfs tunable
 =================
 /sys/block/<disk>/queue/iosched/group_isolation
+-----------------------------------------------
 
 If group_isolation=1, it provides stronger isolation between groups at the
 expense of throughput. By default group_isolation is 0. In general that
@@ -243,6 +244,33 @@ By default one should run with group_isolation=0. If that is not sufficient
 and one wants stronger isolation between groups, then set group_isolation=1
 but this will come at cost of reduced throughput.
 
+/sys/block/<disk>/queue/iosched/slice_idle
+------------------------------------------
+On a faster hardware CFQ can be slow, especially with sequential workload.
+This happens because CFQ idles on a single queue and single queue might not
+drive deeper request queue depths to keep the storage busy. In such scenarios
+one can try setting slice_idle=0 and that would switch CFQ to IOPS
+(IO operations per second) mode on NCQ supporting hardware.
+
+That means CFQ will not idle between cfq queues of a cfq group and hence be
+able to driver higher queue depth and achieve better throughput. That also
+means that cfq provides fairness among groups in terms of IOPS and not in
+terms of disk time.
+
+/sys/block/<disk>/queue/iosched/group_idle
+------------------------------------------
+If one disables idling on individual cfq queues and cfq service trees by
+setting slice_idle=0, group_idle kicks in. That means CFQ will still idle
+on the group in an attempt to provide fairness among groups.
+
+By default group_idle is same as slice_idle and does not do anything if
+slice_idle is enabled.
+
+One can experience an overall throughput drop if you have created multiple
+groups and put applications in that group which are not driving enough
+IO to keep disk busy. In that case set group_idle=0, and CFQ will not idle
+on individual groups and throughput should improve.
+
 What works
 ==========
 - Currently only sync IO queues are support. All the buffered writes are
index f1c5c4bccd3e8ed6674903eedbc59db3cf76ff9e..902d3151f527919ab190d8f30ff9253e3da2c8d0 100644 (file)
@@ -14,25 +14,39 @@ to /proc/cpuinfo.
        identifier (rather than the kernel's).  The actual value is
        architecture and platform dependent.
 
-3) /sys/devices/system/cpu/cpuX/topology/thread_siblings:
+3) /sys/devices/system/cpu/cpuX/topology/book_id:
+
+       the book ID of cpuX. Typically it is the hardware platform's
+       identifier (rather than the kernel's).  The actual value is
+       architecture and platform dependent.
+
+4) /sys/devices/system/cpu/cpuX/topology/thread_siblings:
 
        internel kernel map of cpuX's hardware threads within the same
        core as cpuX
 
-4) /sys/devices/system/cpu/cpuX/topology/core_siblings:
+5) /sys/devices/system/cpu/cpuX/topology/core_siblings:
 
        internal kernel map of cpuX's hardware threads within the same
        physical_package_id.
 
+6) /sys/devices/system/cpu/cpuX/topology/book_siblings:
+
+       internal kernel map of cpuX's hardware threads within the same
+       book_id.
+
 To implement it in an architecture-neutral way, a new source file,
-drivers/base/topology.c, is to export the 4 attributes.
+drivers/base/topology.c, is to export the 4 or 6 attributes. The two book
+related sysfs files will only be created if CONFIG_SCHED_BOOK is selected.
 
 For an architecture to support this feature, it must define some of
 these macros in include/asm-XXX/topology.h:
 #define topology_physical_package_id(cpu)
 #define topology_core_id(cpu)
+#define topology_book_id(cpu)
 #define topology_thread_cpumask(cpu)
 #define topology_core_cpumask(cpu)
+#define topology_book_cpumask(cpu)
 
 The type of **_id is int.
 The type of siblings is (const) struct cpumask *.
@@ -45,6 +59,9 @@ not defined by include/asm-XXX/topology.h:
 3) thread_siblings: just the given CPU
 4) core_siblings: just the given CPU
 
+For architectures that don't support books (CONFIG_SCHED_BOOK) there are no
+default definitions for topology_book_id() and topology_book_cpumask().
+
 Additionally, CPU topology information is provided under
 /sys/devices/system/cpu and includes these files.  The internal
 source for the output is in brackets ("[]").
index 842aa9de84a603298f74f9f4b8c036e0104001cf..5e2bc4ab897a1df72aa03849809747afa68ec142 100644 (file)
@@ -386,34 +386,6 @@ Who:       Tejun Heo <tj@kernel.org>
 
 ----------------------------
 
-What:  Support for VMware's guest paravirtuliazation technique [VMI] will be
-       dropped.
-When:  2.6.37 or earlier.
-Why:   With the recent innovations in CPU hardware acceleration technologies
-       from Intel and AMD, VMware ran a few experiments to compare these
-       techniques to guest paravirtualization technique on VMware's platform.
-       These hardware assisted virtualization techniques have outperformed the
-       performance benefits provided by VMI in most of the workloads. VMware
-       expects that these hardware features will be ubiquitous in a couple of
-       years, as a result, VMware has started a phased retirement of this
-       feature from the hypervisor. We will be removing this feature from the
-       Kernel too. Right now we are targeting 2.6.37 but can retire earlier if
-       technical reasons (read opportunity to remove major chunk of pvops)
-       arise.
-
-       Please note that VMI has always been an optimization and non-VMI kernels
-       still work fine on VMware's platform.
-       Latest versions of VMware's product which support VMI are,
-       Workstation 7.0 and VSphere 4.0 on ESX side, future maintainence
-       releases for these products will continue supporting VMI.
-
-       For more details about VMI retirement take a look at this,
-       http://blogs.vmware.com/guestosguide/2009/09/vmi-retirement.html
-
-Who:   Alok N Kataria <akataria@vmware.com>
-
-----------------------------
-
 What:  Support for lcd_switch and display_get in asus-laptop driver
 When:  March 2010
 Why:   These two features use non-standard interfaces. There are the
index d96a6dba57489bc6bbf3e747d82cd450084e5609..9633da01ff46afb008566ccb53aa381606654e85 100644 (file)
@@ -109,17 +109,19 @@ use numbers 2000-2063 to identify GPIOs in a bank of I2C GPIO expanders.
 
 If you want to initialize a structure with an invalid GPIO number, use
 some negative number (perhaps "-EINVAL"); that will never be valid.  To
-test if a number could reference a GPIO, you may use this predicate:
+test if such number from such a structure could reference a GPIO, you
+may use this predicate:
 
        int gpio_is_valid(int number);
 
 A number that's not valid will be rejected by calls which may request
 or free GPIOs (see below).  Other numbers may also be rejected; for
-example, a number might be valid but unused on a given board.
-
-Whether a platform supports multiple GPIO controllers is currently a
-platform-specific implementation issue.
+example, a number might be valid but temporarily unused on a given board.
 
+Whether a platform supports multiple GPIO controllers is a platform-specific
+implementation issue, as are whether that support can leave "holes" in the space
+of GPIO numbers, and whether new controllers can be added at runtime.  Such issues
+can affect things including whether adjacent GPIO numbers are both valid.
 
 Using GPIOs
 -----------
@@ -480,12 +482,16 @@ To support this framework, a platform's Kconfig will "select" either
 ARCH_REQUIRE_GPIOLIB or ARCH_WANT_OPTIONAL_GPIOLIB
 and arrange that its <asm/gpio.h> includes <asm-generic/gpio.h> and defines
 three functions: gpio_get_value(), gpio_set_value(), and gpio_cansleep().
-They may also want to provide a custom value for ARCH_NR_GPIOS.
 
-ARCH_REQUIRE_GPIOLIB means that the gpio-lib code will always get compiled
+It may also provide a custom value for ARCH_NR_GPIOS, so that it better
+reflects the number of GPIOs in actual use on that platform, without
+wasting static table space.  (It should count both built-in/SoC GPIOs and
+also ones on GPIO expanders.
+
+ARCH_REQUIRE_GPIOLIB means that the gpiolib code will always get compiled
 into the kernel on that architecture.
 
-ARCH_WANT_OPTIONAL_GPIOLIB means the gpio-lib code defaults to off and the user
+ARCH_WANT_OPTIONAL_GPIOLIB means the gpiolib code defaults to off and the user
 can enable it and build it into the kernel optionally.
 
 If neither of these options are selected, the platform does not support
index ff45d1f837c89ab6706726ae525083e22d05af16..48ceabedf55df87dddff2d62864f6f93db9d315b 100644 (file)
@@ -91,12 +91,11 @@ name                The chip name.
                I2C devices get this attribute created automatically.
                RO
 
-update_rate    The rate at which the chip will update readings.
+update_interval        The interval at which the chip will update readings.
                Unit: millisecond
                RW
-               Some devices have a variable update rate. This attribute
-               can be used to change the update rate to the desired
-               frequency.
+               Some devices have a variable update rate or interval.
+               This attribute can be used to change it to the desired value.
 
 
 ************
index 27a52b35d55bf1b6009551c97f400da5c14f59bc..3d8a97747f7731c801ca7d3a1483858feeb76b6c 100644 (file)
@@ -345,5 +345,10 @@ documentation, in <filename>, for the functions listed.
 section titled <section title> from <filename>.
 Spaces are allowed in <section title>; do not quote the <section title>.
 
+!C<filename> is replaced by nothing, but makes the tools check that
+all DOC: sections and documented functions, symbols, etc. are used.
+This makes sense to use when you use !F/!P only and want to verify
+that all documentation is included.
+
 Tim.
 */ <twaugh@redhat.com>
index 8dd7248508a9e12ac8908c24fb4a3649f157697a..3a0009e03d14bae7d9f6ccbf242675b92ce38c26 100644 (file)
@@ -455,7 +455,7 @@ and is between 256 and 4096 characters. It is defined in the file
                        [ARM] imx_timer1,OSTS,netx_timer,mpu_timer2,
                                pxa_timer,timer3,32k_counter,timer0_1
                        [AVR32] avr32
-                       [X86-32] pit,hpet,tsc,vmi-timer;
+                       [X86-32] pit,hpet,tsc;
                                scx200_hrt on Geode; cyclone on IBM x440
                        [MIPS] MIPS
                        [PARISC] cr16
@@ -2153,6 +2153,11 @@ and is between 256 and 4096 characters. It is defined in the file
                        Reserves a hole at the top of the kernel virtual
                        address space.
 
+       reservelow=     [X86]
+                       Format: nn[K]
+                       Set the amount of memory to reserve for BIOS at
+                       the bottom of the address space.
+
        reset_devices   [KNL] Force drivers to reset the underlying device
                        during initialization.
 
@@ -2435,6 +2440,10 @@ and is between 256 and 4096 characters. It is defined in the file
                        disables clocksource verification at runtime.
                        Used to enable high-resolution timer mode on older
                        hardware, and in virtualized environment.
+                       [x86] noirqtime: Do not use TSC to do irq accounting.
+                       Used to run time disable IRQ_TIME_ACCOUNTING on any
+                       platforms where RDTSC is slow and this accounting
+                       can add overhead.
 
        turbografx.map[2|3]=    [HW,JOY]
                        TurboGraFX parallel port interface
index 1762b81fcdf2ec4a235423687865453a16196aed..741fe66d6eca4852894cac79d6ad2d3162486c7a 100644 (file)
@@ -542,9 +542,11 @@ Kprobes does not use mutexes or allocate memory except during
 registration and unregistration.
 
 Probe handlers are run with preemption disabled.  Depending on the
-architecture, handlers may also run with interrupts disabled.  In any
-case, your handler should not yield the CPU (e.g., by attempting to
-acquire a semaphore).
+architecture and optimization state, handlers may also run with
+interrupts disabled (e.g., kretprobe handlers and optimized kprobe
+handlers run without interrupt disabled on x86/x86-64).  In any case,
+your handler should not yield the CPU (e.g., by attempting to acquire
+a semaphore).
 
 Since a return probe is implemented by replacing the return
 address with the trampoline's address, stack backtraces and calls
index c91ccc0720fa97f42a1a616fd83e23628ae85ab1..38c10fd7f4110448facd7089b985c4776d264d85 100644 (file)
@@ -9,7 +9,7 @@ firstly, there's nothing wrong with semaphores. But if the simpler
 mutex semantics are sufficient for your code, then there are a couple
 of advantages of mutexes:
 
- - 'struct mutex' is smaller on most architectures: .e.g on x86,
+ - 'struct mutex' is smaller on most architectures: E.g. on x86,
    'struct semaphore' is 20 bytes, 'struct mutex' is 16 bytes.
    A smaller structure size means less RAM footprint, and better
    CPU-cache utilization.
@@ -136,3 +136,4 @@ the APIs of 'struct mutex' have been streamlined:
  void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
  int  mutex_lock_interruptible_nested(struct mutex *lock,
                                       unsigned int subclass);
+ int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
index 2df71861e578b6ebcb2b94371d4a32fbdf7fca61..d9271e74e488a54177c548a5ad859d51ffdaff28 100644 (file)
@@ -1,82 +1,35 @@
 Linux* Base Driver for the Intel(R) PRO/1000 Family of Adapters
 ===============================================================
 
-September 26, 2006
-
+Intel Gigabit Linux driver.
+Copyright(c) 1999 - 2010 Intel Corporation.
 
 Contents
 ========
 
-- In This Release
 - Identifying Your Adapter
-- Building and Installation
 - Command Line Parameters
 - Speed and Duplex Configuration
 - Additional Configurations
-- Known Issues
 - Support
 
-
-In This Release
-===============
-
-This file describes the Linux* Base Driver for the Intel(R) PRO/1000 Family
-of Adapters.  This driver includes support for Itanium(R)2-based systems.
-
-For questions related to hardware requirements, refer to the documentation
-supplied with your Intel PRO/1000 adapter. All hardware requirements listed
-apply to use with Linux.
-
-The following features are now available in supported kernels:
- - Native VLANs
- - Channel Bonding (teaming)
- - SNMP
-
-Channel Bonding documentation can be found in the Linux kernel source:
-/Documentation/networking/bonding.txt
-
-The driver information previously displayed in the /proc filesystem is not
-supported in this release.  Alternatively, you can use ethtool (version 1.6
-or later), lspci, and ifconfig to obtain the same information.
-
-Instructions on updating ethtool can be found in the section "Additional
-Configurations" later in this document.
-
-NOTE: The Intel(R) 82562v 10/100 Network Connection only provides 10/100
-support.
-
-
 Identifying Your Adapter
 ========================
 
 For more information on how to identify your adapter, go to the Adapter &
 Driver ID Guide at:
 
-    http://support.intel.com/support/network/adapter/pro100/21397.htm
+    http://support.intel.com/support/go/network/adapter/idguide.htm
 
 For the latest Intel network drivers for Linux, refer to the following
 website.  In the search field, enter your adapter name or type, or use the
 networking link on the left to search for your adapter:
 
-    http://downloadfinder.intel.com/scripts-df/support_intel.asp
-
+    http://support.intel.com/support/go/network/adapter/home.htm
 
 Command Line Parameters
 =======================
 
-If the driver is built as a module, the  following optional parameters
-are used by entering them on the command line with the modprobe command
-using this syntax:
-
-     modprobe e1000 [<option>=<VAL1>,<VAL2>,...]
-
-For example, with two PRO/1000 PCI adapters, entering:
-
-     modprobe e1000 TxDescriptors=80,128
-
-loads the e1000 driver with 80 TX descriptors for the first adapter and
-128 TX descriptors for the second adapter.
-
 The default value for each parameter is generally the recommended setting,
 unless otherwise noted.
 
@@ -89,10 +42,6 @@ NOTES:  For more information about the AutoNeg, Duplex, and Speed
         parameters, see the application note at:
         http://www.intel.com/design/network/applnots/ap450.htm
 
-        A descriptor describes a data buffer and attributes related to
-        the data buffer.  This information is accessed by the hardware.
-
-
 AutoNeg
 -------
 (Supported only on adapters with copper connections)
@@ -106,7 +55,6 @@ Duplex parameters must not be specified.
 NOTE:  Refer to the Speed and Duplex section of this readme for more
        information on the AutoNeg parameter.
 
-
 Duplex
 ------
 (Supported only on adapters with copper connections)
@@ -119,7 +67,6 @@ set to auto-negotiate, the board auto-detects the correct duplex.  If the
 link partner is forced (either full or half), Duplex defaults to half-
 duplex.
 
-
 FlowControl
 -----------
 Valid Range:   0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx)
@@ -128,16 +75,16 @@ Default Value: Reads flow control settings from the EEPROM
 This parameter controls the automatic generation(Tx) and response(Rx)
 to Ethernet PAUSE frames.
 
-
 InterruptThrottleRate
 ---------------------
 (not supported on Intel(R) 82542, 82543 or 82544-based adapters)
-Valid Range:   0,1,3,100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
+Valid Range:   0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative,
+                                   4=simplified balancing)
 Default Value: 3
 
 The driver can limit the amount of interrupts per second that the adapter
-will generate for incoming packets. It does this by writing a value to the 
-adapter that is based on the maximum amount of interrupts that the adapter 
+will generate for incoming packets. It does this by writing a value to the
+adapter that is based on the maximum amount of interrupts that the adapter
 will generate per second.
 
 Setting InterruptThrottleRate to a value greater or equal to 100
@@ -146,37 +93,43 @@ per second, even if more packets have come in. This reduces interrupt
 load on the system and can lower CPU utilization under heavy load,
 but will increase latency as packets are not processed as quickly.
 
-The default behaviour of the driver previously assumed a static 
-InterruptThrottleRate value of 8000, providing a good fallback value for 
-all traffic types,but lacking in small packet performance and latency. 
-The hardware can handle many more small packets per second however, and 
+The default behaviour of the driver previously assumed a static
+InterruptThrottleRate value of 8000, providing a good fallback value for
+all traffic types,but lacking in small packet performance and latency.
+The hardware can handle many more small packets per second however, and
 for this reason an adaptive interrupt moderation algorithm was implemented.
 
 Since 7.3.x, the driver has two adaptive modes (setting 1 or 3) in which
-it dynamically adjusts the InterruptThrottleRate value based on the traffic 
+it dynamically adjusts the InterruptThrottleRate value based on the traffic
 that it receives. After determining the type of incoming traffic in the last
-timeframe, it will adjust the InterruptThrottleRate to an appropriate value 
+timeframe, it will adjust the InterruptThrottleRate to an appropriate value
 for that traffic.
 
 The algorithm classifies the incoming traffic every interval into
-classes.  Once the class is determined, the InterruptThrottleRate value is 
-adjusted to suit that traffic type the best. There are three classes defined: 
+classes.  Once the class is determined, the InterruptThrottleRate value is
+adjusted to suit that traffic type the best. There are three classes defined:
 "Bulk traffic", for large amounts of packets of normal size; "Low latency",
 for small amounts of traffic and/or a significant percentage of small
-packets; and "Lowest latency", for almost completely small packets or 
+packets; and "Lowest latency", for almost completely small packets or
 minimal traffic.
 
-In dynamic conservative mode, the InterruptThrottleRate value is set to 4000 
-for traffic that falls in class "Bulk traffic". If traffic falls in the "Low 
-latency" or "Lowest latency" class, the InterruptThrottleRate is increased 
+In dynamic conservative mode, the InterruptThrottleRate value is set to 4000
+for traffic that falls in class "Bulk traffic". If traffic falls in the "Low
+latency" or "Lowest latency" class, the InterruptThrottleRate is increased
 stepwise to 20000. This default mode is suitable for most applications.
 
 For situations where low latency is vital such as cluster or
 grid computing, the algorithm can reduce latency even more when
 InterruptThrottleRate is set to mode 1. In this mode, which operates
-the same as mode 3, the InterruptThrottleRate will be increased stepwise to 
+the same as mode 3, the InterruptThrottleRate will be increased stepwise to
 70000 for traffic in class "Lowest latency".
 
+In simplified mode the interrupt rate is based on the ratio of Tx and
+Rx traffic.  If the bytes per second rate is approximately equal, the
+interrupt rate will drop as low as 2000 interrupts per second.  If the
+traffic is mostly transmit or mostly receive, the interrupt rate could
+be as high as 8000.
+
 Setting InterruptThrottleRate to 0 turns off any interrupt moderation
 and may improve small packet latency, but is generally not suitable
 for bulk throughput traffic.
@@ -212,8 +165,6 @@ NOTE:  When e1000 is loaded with default settings and multiple adapters
        be platform-specific.  If CPU utilization is not a concern, use
        RX_POLLING (NAPI) and default driver settings.
 
-
-
 RxDescriptors
 -------------
 Valid Range:   80-256 for 82542 and 82543-based adapters
@@ -225,15 +176,14 @@ by the driver.  Increasing this value allows the driver to buffer more
 incoming packets, at the expense of increased system memory utilization.
 
 Each descriptor is 16 bytes.  A receive buffer is also allocated for each
-descriptor and can be either 2048, 4096, 8192, or 16384 bytes, depending 
+descriptor and can be either 2048, 4096, 8192, or 16384 bytes, depending
 on the MTU setting. The maximum MTU size is 16110.
 
-NOTE:  MTU designates the frame size.  It only needs to be set for Jumbo 
-       Frames.  Depending on the available system resources, the request 
-       for a higher number of receive descriptors may be denied.  In this 
+NOTE:  MTU designates the frame size.  It only needs to be set for Jumbo
+       Frames.  Depending on the available system resources, the request
+       for a higher number of receive descriptors may be denied.  In this
        case, use a lower number.
 
-
 RxIntDelay
 ----------
 Valid Range:   0-65535 (0=off)
@@ -254,7 +204,6 @@ CAUTION:  When setting RxIntDelay to a value other than 0, adapters may
           restoring the network connection.  To eliminate the potential
           for the hang ensure that RxIntDelay is set to 0.
 
-
 RxAbsIntDelay
 -------------
 (This parameter is supported only on 82540, 82545 and later adapters.)
@@ -268,7 +217,6 @@ packet is received within the set amount of time.  Proper tuning,
 along with RxIntDelay, may improve traffic throughput in specific network
 conditions.
 
-
 Speed
 -----
 (This parameter is supported only on adapters with copper connections.)
@@ -280,7 +228,6 @@ Speed forces the line speed to the specified value in megabits per second
 partner is set to auto-negotiate, the board will auto-detect the correct
 speed.  Duplex should also be set when Speed is set to either 10 or 100.
 
-
 TxDescriptors
 -------------
 Valid Range:   80-256 for 82542 and 82543-based adapters
@@ -295,6 +242,36 @@ NOTE:  Depending on the available system resources, the request for a
        higher number of transmit descriptors may be denied.  In this case,
        use a lower number.
 
+TxDescriptorStep
+----------------
+Valid Range:    1 (use every Tx Descriptor)
+               4 (use every 4th Tx Descriptor)
+
+Default Value:  1 (use every Tx Descriptor)
+
+On certain non-Intel architectures, it has been observed that intense TX
+traffic bursts of short packets may result in an improper descriptor
+writeback. If this occurs, the driver will report a "TX Timeout" and reset
+the adapter, after which the transmit flow will restart, though data may
+have stalled for as much as 10 seconds before it resumes.
+
+The improper writeback does not occur on the first descriptor in a system
+memory cache-line, which is typically 32 bytes, or 4 descriptors long.
+
+Setting TxDescriptorStep to a value of 4 will ensure that all TX descriptors
+are aligned to the start of a system memory cache line, and so this problem
+will not occur.
+
+NOTES: Setting TxDescriptorStep to 4 effectively reduces the number of
+       TxDescriptors available for transmits to 1/4 of the normal allocation.
+       This has a possible negative performance impact, which may be
+       compensated for by allocating more descriptors using the TxDescriptors
+       module parameter.
+
+       There are other conditions which may result in "TX Timeout", which will
+       not be resolved by the use of the TxDescriptorStep parameter. As the
+       issue addressed by this parameter has never been observed on Intel
+       Architecture platforms, it should not be used on Intel platforms.
 
 TxIntDelay
 ----------
@@ -307,7 +284,6 @@ efficiency if properly tuned for specific network traffic.  If the
 system is reporting dropped transmits, this value may be set too high
 causing the driver to run out of available transmit descriptors.
 
-
 TxAbsIntDelay
 -------------
 (This parameter is supported only on 82540, 82545 and later adapters.)
@@ -330,6 +306,35 @@ Default Value: 1
 A value of '1' indicates that the driver should enable IP checksum
 offload for received packets (both UDP and TCP) to the adapter hardware.
 
+Copybreak
+---------
+Valid Range:   0-xxxxxxx (0=off)
+Default Value: 256
+Usage: insmod e1000.ko copybreak=128
+
+Driver copies all packets below or equaling this size to a fresh Rx
+buffer before handing it up the stack.
+
+This parameter is different than other parameters, in that it is a
+single (not 1,1,1 etc.) parameter applied to all driver instances and
+it is also available during runtime at
+/sys/module/e1000/parameters/copybreak
+
+SmartPowerDownEnable
+--------------------
+Valid Range: 0-1
+Default Value:  0 (disabled)
+
+Allows PHY to turn off in lower power states. The user can turn off
+this parameter in supported chipsets.
+
+KumeranLockLoss
+---------------
+Valid Range: 0-1
+Default Value: 1 (enabled)
+
+This workaround skips resetting the PHY at shutdown for the initial
+silicon releases of ICH8 systems.
 
 Speed and Duplex Configuration
 ==============================
@@ -385,40 +390,9 @@ If the link partner is forced to a specific speed and duplex, then this
 parameter should not be used.  Instead, use the Speed and Duplex parameters
 previously mentioned to force the adapter to the same speed and duplex.
 
-
 Additional Configurations
 =========================
 
-  Configuring the Driver on Different Distributions
-  -------------------------------------------------
-  Configuring a network driver to load properly when the system is started
-  is distribution dependent.  Typically, the configuration process involves
-  adding an alias line to /etc/modules.conf or /etc/modprobe.conf as well
-  as editing other system startup scripts and/or configuration files.  Many
-  popular Linux distributions ship with tools to make these changes for you.
-  To learn the proper way to configure a network device for your system,
-  refer to your distribution documentation.  If during this process you are
-  asked for the driver or module name, the name for the Linux Base Driver
-  for the Intel(R) PRO/1000 Family of Adapters is e1000.
-
-  As an example, if you install the e1000 driver for two PRO/1000 adapters
-  (eth0 and eth1) and set the speed and duplex to 10full and 100half, add
-  the following to modules.conf or or modprobe.conf:
-
-       alias eth0 e1000
-       alias eth1 e1000
-       options e1000 Speed=10,100 Duplex=2,1
-
-  Viewing Link Messages
-  ---------------------
-  Link messages will not be displayed to the console if the distribution is
-  restricting system messages.  In order to see network driver link messages
-  on your console, set dmesg to eight by entering the following:
-
-       dmesg -n 8
-
-  NOTE: This setting is not saved across reboots.
-
   Jumbo Frames
   ------------
   Jumbo Frames support is enabled by changing the MTU to a value larger than
@@ -437,9 +411,11 @@ Additional Configurations
    setting in a different location.
 
   Notes:
-
-  - To enable Jumbo Frames, increase the MTU size on the interface beyond
-    1500.
+  Degradation in throughput performance may be observed in some Jumbo frames
+  environments. If this is observed, increasing the application's socket buffer
+  size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values may help.
+  See the specific application manual and /usr/src/linux*/Documentation/
+  networking/ip-sysctl.txt for more details.
 
   - The maximum MTU setting for Jumbo Frames is 16110.  This value coincides
     with the maximum Jumbo Frames size of 16128.
@@ -447,40 +423,11 @@ Additional Configurations
   - Using Jumbo Frames at 10 or 100 Mbps may result in poor performance or
     loss of link.
 
-  - Some Intel gigabit adapters that support Jumbo Frames have a frame size
-    limit of 9238 bytes, with a corresponding MTU size limit of 9216 bytes.
-    The adapters with this limitation are based on the Intel(R) 82571EB,
-    82572EI, 82573L and 80003ES2LAN controller.  These correspond to the
-    following product names:
-     Intel(R) PRO/1000 PT Server Adapter
-     Intel(R) PRO/1000 PT Desktop Adapter
-     Intel(R) PRO/1000 PT Network Connection
-     Intel(R) PRO/1000 PT Dual Port Server Adapter
-     Intel(R) PRO/1000 PT Dual Port Network Connection
-     Intel(R) PRO/1000 PF Server Adapter
-     Intel(R) PRO/1000 PF Network Connection
-     Intel(R) PRO/1000 PF Dual Port Server Adapter
-     Intel(R) PRO/1000 PB Server Connection
-     Intel(R) PRO/1000 PL Network Connection
-     Intel(R) PRO/1000 EB Network Connection with I/O Acceleration
-     Intel(R) PRO/1000 EB Backplane Connection with I/O Acceleration
-     Intel(R) PRO/1000 PT Quad Port Server Adapter
-
   - Adapters based on the Intel(R) 82542 and 82573V/E controller do not
     support Jumbo Frames. These correspond to the following product names:
      Intel(R) PRO/1000 Gigabit Server Adapter
      Intel(R) PRO/1000 PM Network Connection
 
-  - The following adapters do not support Jumbo Frames:
-     Intel(R) 82562V 10/100 Network Connection
-     Intel(R) 82566DM Gigabit Network Connection
-     Intel(R) 82566DC Gigabit Network Connection
-     Intel(R) 82566MM Gigabit Network Connection
-     Intel(R) 82566MC Gigabit Network Connection
-     Intel(R) 82562GT 10/100 Network Connection
-     Intel(R) 82562G 10/100 Network Connection
-
-
   Ethtool
   -------
   The driver utilizes the ethtool interface for driver configuration and
@@ -490,142 +437,14 @@ Additional Configurations
   The latest release of ethtool can be found from
   http://sourceforge.net/projects/gkernel.
 
-  NOTE: Ethtool 1.6 only supports a limited set of ethtool options.  Support
-  for a more complete ethtool feature set can be enabled by upgrading
-  ethtool to ethtool-1.8.1.
-
   Enabling Wake on LAN* (WoL)
   ---------------------------
-  WoL is configured through the Ethtool* utility.  Ethtool is included with
-  all versions of Red Hat after Red Hat 7.2.  For other Linux distributions,
-  download and install Ethtool from the following website:
-  http://sourceforge.net/projects/gkernel.
-
-  For instructions on enabling WoL with Ethtool, refer to the website listed
-  above.
+  WoL is configured through the Ethtool* utility.
 
   WoL will be enabled on the system during the next shut down or reboot.
   For this driver version, in order to enable WoL, the e1000 driver must be
   loaded when shutting down or rebooting the system.
 
-  Wake On LAN is only supported on port A for the following devices:
-  Intel(R) PRO/1000 PT Dual Port Network Connection
-  Intel(R) PRO/1000 PT Dual Port Server Connection
-  Intel(R) PRO/1000 PT Dual Port Server Adapter
-  Intel(R) PRO/1000 PF Dual Port Server Adapter
-  Intel(R) PRO/1000 PT Quad Port Server Adapter
-
-  NAPI
-  ----
-  NAPI (Rx polling mode) is enabled in the e1000 driver.
-
-  See www.cyberus.ca/~hadi/usenix-paper.tgz for more information on NAPI.
-
-
-Known Issues
-============
-
-Dropped Receive Packets on Half-duplex 10/100 Networks
-------------------------------------------------------
-If you have an Intel PCI Express adapter running at 10mbps or 100mbps, half-
-duplex, you may observe occasional dropped receive packets.  There are no
-workarounds for this problem in this network configuration.  The network must
-be updated to operate in full-duplex, and/or 1000mbps only.
-
-Jumbo Frames System Requirement
--------------------------------
-Memory allocation failures have been observed on Linux systems with 64 MB
-of RAM or less that are running Jumbo Frames.  If you are using Jumbo
-Frames, your system may require more than the advertised minimum
-requirement of 64 MB of system memory.
-
-Performance Degradation with Jumbo Frames
------------------------------------------
-Degradation in throughput performance may be observed in some Jumbo frames
-environments.  If this is observed, increasing the application's socket
-buffer size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values
-may help.  See the specific application manual and
-/usr/src/linux*/Documentation/
-networking/ip-sysctl.txt for more details.
-
-Jumbo Frames on Foundry BigIron 8000 switch
--------------------------------------------
-There is a known issue using Jumbo frames when connected to a Foundry
-BigIron 8000 switch.  This is a 3rd party limitation.  If you experience
-loss of packets, lower the MTU size.
-
-Allocating Rx Buffers when Using Jumbo Frames 
----------------------------------------------
-Allocating Rx buffers when using Jumbo Frames on 2.6.x kernels may fail if 
-the available memory is heavily fragmented. This issue may be seen with PCI-X 
-adapters or with packet split disabled. This can be reduced or eliminated 
-by changing the amount of available memory for receive buffer allocation, by
-increasing /proc/sys/vm/min_free_kbytes. 
-
-Multiple Interfaces on Same Ethernet Broadcast Network
-------------------------------------------------------
-Due to the default ARP behavior on Linux, it is not possible to have
-one system on two IP networks in the same Ethernet broadcast domain
-(non-partitioned switch) behave as expected.  All Ethernet interfaces
-will respond to IP traffic for any IP address assigned to the system.
-This results in unbalanced receive traffic.
-
-If you have multiple interfaces in a server, either turn on ARP
-filtering by entering:
-
-    echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter
-(this only works if your kernel's version is higher than 2.4.5),
-
-NOTE: This setting is not saved across reboots.  The configuration
-change can be made permanent by adding the line:
-    net.ipv4.conf.all.arp_filter = 1
-to the file /etc/sysctl.conf
-
-      or,
-
-install the interfaces in separate broadcast domains (either in
-different switches or in a switch partitioned to VLANs).
-
-82541/82547 can't link or are slow to link with some link partners
------------------------------------------------------------------
-There is a known compatibility issue with 82541/82547 and some
-low-end switches where the link will not be established, or will
-be slow to establish.  In particular, these switches are known to
-be incompatible with 82541/82547:
-
-    Planex FXG-08TE
-    I-O Data ETG-SH8
-
-To workaround this issue, the driver can be compiled with an override
-of the PHY's master/slave setting.  Forcing master or forcing slave
-mode will improve time-to-link.
-
-    # make CFLAGS_EXTRA=-DE1000_MASTER_SLAVE=<n>
-
-Where <n> is:
-
-    0 = Hardware default
-    1 = Master mode
-    2 = Slave mode
-    3 = Auto master/slave
-
-Disable rx flow control with ethtool
-------------------------------------
-In order to disable receive flow control using ethtool, you must turn
-off auto-negotiation on the same command line.
-
-For example:
-
-   ethtool -A eth? autoneg off rx off
-
-Unplugging network cable while ethtool -p is running
-----------------------------------------------------
-In kernel versions 2.5.50 and later (including 2.6 kernel), unplugging
-the network cable while ethtool -p is running will cause the system to
-become unresponsive to keyboard commands, except for control-alt-delete.
-Restarting the system appears to be the only remedy.
-
-
 Support
 =======
 
diff --git a/Documentation/networking/e1000e.txt b/Documentation/networking/e1000e.txt
new file mode 100644 (file)
index 0000000..6aa048b
--- /dev/null
@@ -0,0 +1,302 @@
+Linux* Driver for Intel(R) Network Connection
+===============================================================
+
+Intel Gigabit Linux driver.
+Copyright(c) 1999 - 2010 Intel Corporation.
+
+Contents
+========
+
+- Identifying Your Adapter
+- Command Line Parameters
+- Additional Configurations
+- Support
+
+Identifying Your Adapter
+========================
+
+The e1000e driver supports all PCI Express Intel(R) Gigabit Network
+Connections, except those that are 82575, 82576 and 82580-based*.
+
+* NOTE: The Intel(R) PRO/1000 P Dual Port Server Adapter is supported by
+  the e1000 driver, not the e1000e driver due to the 82546 part being used
+  behind a PCI Express bridge.
+
+For more information on how to identify your adapter, go to the Adapter &
+Driver ID Guide at:
+
+    http://support.intel.com/support/go/network/adapter/idguide.htm
+
+For the latest Intel network drivers for Linux, refer to the following
+website.  In the search field, enter your adapter name or type, or use the
+networking link on the left to search for your adapter:
+
+    http://support.intel.com/support/go/network/adapter/home.htm
+
+Command Line Parameters
+=======================
+
+The default value for each parameter is generally the recommended setting,
+unless otherwise noted.
+
+NOTES:  For more information about the InterruptThrottleRate,
+        RxIntDelay, TxIntDelay, RxAbsIntDelay, and TxAbsIntDelay
+        parameters, see the application note at:
+        http://www.intel.com/design/network/applnots/ap450.htm
+
+InterruptThrottleRate
+---------------------
+Valid Range:   0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative,
+                                   4=simplified balancing)
+Default Value: 3
+
+The driver can limit the amount of interrupts per second that the adapter
+will generate for incoming packets. It does this by writing a value to the
+adapter that is based on the maximum amount of interrupts that the adapter
+will generate per second.
+
+Setting InterruptThrottleRate to a value greater or equal to 100
+will program the adapter to send out a maximum of that many interrupts
+per second, even if more packets have come in. This reduces interrupt
+load on the system and can lower CPU utilization under heavy load,
+but will increase latency as packets are not processed as quickly.
+
+The driver has two adaptive modes (setting 1 or 3) in which
+it dynamically adjusts the InterruptThrottleRate value based on the traffic
+that it receives. After determining the type of incoming traffic in the last
+timeframe, it will adjust the InterruptThrottleRate to an appropriate value
+for that traffic.
+
+The algorithm classifies the incoming traffic every interval into
+classes.  Once the class is determined, the InterruptThrottleRate value is
+adjusted to suit that traffic type the best. There are three classes defined:
+"Bulk traffic", for large amounts of packets of normal size; "Low latency",
+for small amounts of traffic and/or a significant percentage of small
+packets; and "Lowest latency", for almost completely small packets or
+minimal traffic.
+
+In dynamic conservative mode, the InterruptThrottleRate value is set to 4000
+for traffic that falls in class "Bulk traffic". If traffic falls in the "Low
+latency" or "Lowest latency" class, the InterruptThrottleRate is increased
+stepwise to 20000. This default mode is suitable for most applications.
+
+For situations where low latency is vital such as cluster or
+grid computing, the algorithm can reduce latency even more when
+InterruptThrottleRate is set to mode 1. In this mode, which operates
+the same as mode 3, the InterruptThrottleRate will be increased stepwise to
+70000 for traffic in class "Lowest latency".
+
+In simplified mode the interrupt rate is based on the ratio of Tx and
+Rx traffic.  If the bytes per second rate is approximately equal the
+interrupt rate will drop as low as 2000 interrupts per second.  If the
+traffic is mostly transmit or mostly receive, the interrupt rate could
+be as high as 8000.
+
+Setting InterruptThrottleRate to 0 turns off any interrupt moderation
+and may improve small packet latency, but is generally not suitable
+for bulk throughput traffic.
+
+NOTE:  InterruptThrottleRate takes precedence over the TxAbsIntDelay and
+       RxAbsIntDelay parameters.  In other words, minimizing the receive
+       and/or transmit absolute delays does not force the controller to
+       generate more interrupts than what the Interrupt Throttle Rate
+       allows.
+
+NOTE:  When e1000e is loaded with default settings and multiple adapters
+       are in use simultaneously, the CPU utilization may increase non-
+       linearly.  In order to limit the CPU utilization without impacting
+       the overall throughput, we recommend that you load the driver as
+       follows:
+
+           modprobe e1000e InterruptThrottleRate=3000,3000,3000
+
+       This sets the InterruptThrottleRate to 3000 interrupts/sec for
+       the first, second, and third instances of the driver.  The range
+       of 2000 to 3000 interrupts per second works on a majority of
+       systems and is a good starting point, but the optimal value will
+       be platform-specific.  If CPU utilization is not a concern, use
+       RX_POLLING (NAPI) and default driver settings.
+
+RxIntDelay
+----------
+Valid Range:   0-65535 (0=off)
+Default Value: 0
+
+This value delays the generation of receive interrupts in units of 1.024
+microseconds.  Receive interrupt reduction can improve CPU efficiency if
+properly tuned for specific network traffic.  Increasing this value adds
+extra latency to frame reception and can end up decreasing the throughput
+of TCP traffic.  If the system is reporting dropped receives, this value
+may be set too high, causing the driver to run out of available receive
+descriptors.
+
+CAUTION:  When setting RxIntDelay to a value other than 0, adapters may
+          hang (stop transmitting) under certain network conditions.  If
+          this occurs a NETDEV WATCHDOG message is logged in the system
+          event log.  In addition, the controller is automatically reset,
+          restoring the network connection.  To eliminate the potential
+          for the hang ensure that RxIntDelay is set to 0.
+
+RxAbsIntDelay
+-------------
+Valid Range:   0-65535 (0=off)
+Default Value: 8
+
+This value, in units of 1.024 microseconds, limits the delay in which a
+receive interrupt is generated.  Useful only if RxIntDelay is non-zero,
+this value ensures that an interrupt is generated after the initial
+packet is received within the set amount of time.  Proper tuning,
+along with RxIntDelay, may improve traffic throughput in specific network
+conditions.
+
+TxIntDelay
+----------
+Valid Range:   0-65535 (0=off)
+Default Value: 8
+
+This value delays the generation of transmit interrupts in units of
+1.024 microseconds.  Transmit interrupt reduction can improve CPU
+efficiency if properly tuned for specific network traffic.  If the
+system is reporting dropped transmits, this value may be set too high
+causing the driver to run out of available transmit descriptors.
+
+TxAbsIntDelay
+-------------
+Valid Range:   0-65535 (0=off)
+Default Value: 32
+
+This value, in units of 1.024 microseconds, limits the delay in which a
+transmit interrupt is generated.  Useful only if TxIntDelay is non-zero,
+this value ensures that an interrupt is generated after the initial
+packet is sent on the wire within the set amount of time.  Proper tuning,
+along with TxIntDelay, may improve traffic throughput in specific
+network conditions.
+
+Copybreak
+---------
+Valid Range:   0-xxxxxxx (0=off)
+Default Value: 256
+
+Driver copies all packets below or equaling this size to a fresh Rx
+buffer before handing it up the stack.
+
+This parameter is different than other parameters, in that it is a
+single (not 1,1,1 etc.) parameter applied to all driver instances and
+it is also available during runtime at
+/sys/module/e1000e/parameters/copybreak
+
+SmartPowerDownEnable
+--------------------
+Valid Range: 0-1
+Default Value:  0 (disabled)
+
+Allows PHY to turn off in lower power states. The user can set this parameter
+in supported chipsets.
+
+KumeranLockLoss
+---------------
+Valid Range: 0-1
+Default Value: 1 (enabled)
+
+This workaround skips resetting the PHY at shutdown for the initial
+silicon releases of ICH8 systems.
+
+IntMode
+-------
+Valid Range: 0-2 (0=legacy, 1=MSI, 2=MSI-X)
+Default Value: 2
+
+Allows changing the interrupt mode at module load time, without requiring a
+recompile. If the driver load fails to enable a specific interrupt mode, the
+driver will try other interrupt modes, from least to most compatible.  The
+interrupt order is MSI-X, MSI, Legacy.  If specifying MSI (IntMode=1)
+interrupts, only MSI and Legacy will be attempted.
+
+CrcStripping
+------------
+Valid Range: 0-1
+Default Value: 1 (enabled)
+
+Strip the CRC from received packets before sending up the network stack.  If
+you have a machine with a BMC enabled but cannot receive IPMI traffic after
+loading or enabling the driver, try disabling this feature.
+
+WriteProtectNVM
+---------------
+Valid Range: 0-1
+Default Value: 1 (enabled)
+
+Set the hardware to ignore all write/erase cycles to the GbE region in the
+ICHx NVM (non-volatile memory).  This feature can be disabled by the
+WriteProtectNVM module parameter (enabled by default) only after a hardware
+reset, but the machine must be power cycled before trying to enable writes.
+
+Note: the kernel boot option iomem=relaxed may need to be set if the kernel
+config option CONFIG_STRICT_DEVMEM=y, if the root user wants to write the
+NVM from user space via ethtool.
+
+Additional Configurations
+=========================
+
+  Jumbo Frames
+  ------------
+  Jumbo Frames support is enabled by changing the MTU to a value larger than
+  the default of 1500.  Use the ifconfig command to increase the MTU size.
+  For example:
+
+       ifconfig eth<x> mtu 9000 up
+
+  This setting is not saved across reboots.
+
+  Notes:
+
+  - The maximum MTU setting for Jumbo Frames is 9216.  This value coincides
+    with the maximum Jumbo Frames size of 9234 bytes.
+
+  - Using Jumbo Frames at 10 or 100 Mbps is not supported and may result in
+    poor performance or loss of link.
+
+  - Some adapters limit Jumbo Frames sized packets to a maximum of
+    4096 bytes and some adapters do not support Jumbo Frames.
+
+
+  Ethtool
+  -------
+  The driver utilizes the ethtool interface for driver configuration and
+  diagnostics, as well as displaying statistical information.  We
+  strongly recommend downloading the latest version of Ethtool at:
+
+  http://sourceforge.net/projects/gkernel.
+
+  Speed and Duplex
+  ----------------
+  Speed and Duplex are configured through the Ethtool* utility. For
+  instructions,  refer to the Ethtool man page.
+
+  Enabling Wake on LAN* (WoL)
+  ---------------------------
+  WoL is configured through the Ethtool* utility. For instructions on
+  enabling WoL with Ethtool, refer to the Ethtool man page.
+
+  WoL will be enabled on the system during the next shut down or reboot.
+  For this driver version, in order to enable WoL, the e1000e driver must be
+  loaded when shutting down or rebooting the system.
+
+  In most cases Wake On LAN is only supported on port A for multiple port
+  adapters. To verify if a port supports Wake on LAN run ethtool eth<X>.
+
+
+Support
+=======
+
+For general information, go to the Intel support website at:
+
+    www.intel.com/support/
+
+or the Intel Wired Networking project hosted by Sourceforge at:
+
+    http://sourceforge.net/projects/e1000
+
+If an issue is identified with the released source code on the supported
+kernel with a supported adapter, email the specific information related
+to the issue to e1000-devel@lists.sf.net
old mode 100755 (executable)
new mode 100644 (file)
index 19015de..21dd5d1
@@ -1,19 +1,16 @@
 Linux* Base Driver for Intel(R) Network Connection
 ==================================================
 
-November 24, 2009
+Intel Gigabit Linux driver.
+Copyright(c) 1999 - 2010 Intel Corporation.
 
 Contents
 ========
 
-- In This Release
 - Identifying Your Adapter
 - Known Issues/Troubleshooting
 - Support
 
-In This Release
-===============
-
 This file describes the ixgbevf Linux* Base Driver for Intel Network
 Connection.
 
@@ -33,7 +30,7 @@ Identifying Your Adapter
 For more information on how to identify your adapter, go to the Adapter &
 Driver ID Guide at:
 
-    http://support.intel.com/support/network/sb/CS-008441.htm
+    http://support.intel.com/support/go/network/adapter/idguide.htm
 
 Known Issues/Troubleshooting
 ============================
@@ -57,34 +54,3 @@ or the Intel Wired Networking project hosted by Sourceforge at:
 If an issue is identified with the released source code on the supported
 kernel with a supported adapter, email the specific information related
 to the issue to e1000-devel@lists.sf.net
-
-License
-=======
-
-Intel 10 Gigabit Linux driver.
-Copyright(c) 1999 - 2009 Intel Corporation.
-
-This program is free software; you can redistribute it and/or modify it
-under the terms and conditions of the GNU General Public License,
-version 2, as published by the Free Software Foundation.
-
-This program is distributed in the hope it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-more details.
-
-You should have received a copy of the GNU General Public License along with
-this program; if not, write to the Free Software Foundation, Inc.,
-51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
-The full GNU General Public License is included in this distribution in
-the file called "COPYING".
-
-Trademarks
-==========
-
-Intel, Itanium, and Pentium are trademarks or registered trademarks of
-Intel Corporation or its subsidiaries in the United States and other
-countries.
-
-* Other names and brands may be claimed as the property of others.
index 9363e056188ac87282c2477e25df2eebe6e837fa..8ed17587a74bdc006b2d2922b5709f0e16ec08ad 100644 (file)
@@ -13,7 +13,7 @@ regulators (where voltage output is controllable) and current sinks (where
 current limit is controllable).
 
 (C) 2008  Wolfson Microelectronics PLC.
-Author: Liam Girdwood <lg@opensource.wolfsonmicro.com>
+Author: Liam Girdwood <lrg@slimlogic.co.uk>
 
 
 Nomenclature
index ce46fa1e643e876344071d480e1e6d6d54b89856..37c6aad5e590ac1375944b8edf0656aa7b106607 100644 (file)
@@ -296,6 +296,7 @@ Conexant 5051
 Conexant 5066
 =============
   laptop       Basic Laptop config (default)
+  hp-laptop    HP laptops, e g G60
   dell-laptop  Dell laptops
   dell-vostro  Dell Vostro
   olpc-xo-1_5  OLPC XO 1.5
index ccd951fa94eeda42a5a50c854b6b92e512fe6724..cc96ee2666f2e5f10f13b83b1fab72bcf8e18924 100644 (file)
@@ -478,7 +478,7 @@ static void prepare_hwpoison_fd(void)
        }
 
        if (opt_unpoison && !hwpoison_forget_fd) {
-               sprintf(buf, "%s/renew-pfn", hwpoison_debug_fs);
+               sprintf(buf, "%s/unpoison-pfn", hwpoison_debug_fs);
                hwpoison_forget_fd = checked_open(buf, O_WRONLY);
        }
 }
diff --git a/Documentation/workqueue.txt b/Documentation/workqueue.txt
new file mode 100644 (file)
index 0000000..e4498a2
--- /dev/null
@@ -0,0 +1,380 @@
+
+Concurrency Managed Workqueue (cmwq)
+
+September, 2010                Tejun Heo <tj@kernel.org>
+                       Florian Mickler <florian@mickler.org>
+
+CONTENTS
+
+1. Introduction
+2. Why cmwq?
+3. The Design
+4. Application Programming Interface (API)
+5. Example Execution Scenarios
+6. Guidelines
+
+
+1. Introduction
+
+There are many cases where an asynchronous process execution context
+is needed and the workqueue (wq) API is the most commonly used
+mechanism for such cases.
+
+When such an asynchronous execution context is needed, a work item
+describing which function to execute is put on a queue.  An
+independent thread serves as the asynchronous execution context.  The
+queue is called workqueue and the thread is called worker.
+
+While there are work items on the workqueue the worker executes the
+functions associated with the work items one after the other.  When
+there is no work item left on the workqueue the worker becomes idle.
+When a new work item gets queued, the worker begins executing again.
+
+
+2. Why cmwq?
+
+In the original wq implementation, a multi threaded (MT) wq had one
+worker thread per CPU and a single threaded (ST) wq had one worker
+thread system-wide.  A single MT wq needed to keep around the same
+number of workers as the number of CPUs.  The kernel grew a lot of MT
+wq users over the years and with the number of CPU cores continuously
+rising, some systems saturated the default 32k PID space just booting
+up.
+
+Although MT wq wasted a lot of resource, the level of concurrency
+provided was unsatisfactory.  The limitation was common to both ST and
+MT wq albeit less severe on MT.  Each wq maintained its own separate
+worker pool.  A MT wq could provide only one execution context per CPU
+while a ST wq one for the whole system.  Work items had to compete for
+those very limited execution contexts leading to various problems
+including proneness to deadlocks around the single execution context.
+
+The tension between the provided level of concurrency and resource
+usage also forced its users to make unnecessary tradeoffs like libata
+choosing to use ST wq for polling PIOs and accepting an unnecessary
+limitation that no two polling PIOs can progress at the same time.  As
+MT wq don't provide much better concurrency, users which require
+higher level of concurrency, like async or fscache, had to implement
+their own thread pool.
+
+Concurrency Managed Workqueue (cmwq) is a reimplementation of wq with
+focus on the following goals.
+
+* Maintain compatibility with the original workqueue API.
+
+* Use per-CPU unified worker pools shared by all wq to provide
+  flexible level of concurrency on demand without wasting a lot of
+  resource.
+
+* Automatically regulate worker pool and level of concurrency so that
+  the API users don't need to worry about such details.
+
+
+3. The Design
+
+In order to ease the asynchronous execution of functions a new
+abstraction, the work item, is introduced.
+
+A work item is a simple struct that holds a pointer to the function
+that is to be executed asynchronously.  Whenever a driver or subsystem
+wants a function to be executed asynchronously it has to set up a work
+item pointing to that function and queue that work item on a
+workqueue.
+
+Special purpose threads, called worker threads, execute the functions
+off of the queue, one after the other.  If no work is queued, the
+worker threads become idle.  These worker threads are managed in so
+called thread-pools.
+
+The cmwq design differentiates between the user-facing workqueues that
+subsystems and drivers queue work items on and the backend mechanism
+which manages thread-pool and processes the queued work items.
+
+The backend is called gcwq.  There is one gcwq for each possible CPU
+and one gcwq to serve work items queued on unbound workqueues.
+
+Subsystems and drivers can create and queue work items through special
+workqueue API functions as they see fit. They can influence some
+aspects of the way the work items are executed by setting flags on the
+workqueue they are putting the work item on. These flags include
+things like CPU locality, reentrancy, concurrency limits and more. To
+get a detailed overview refer to the API description of
+alloc_workqueue() below.
+
+When a work item is queued to a workqueue, the target gcwq is
+determined according to the queue parameters and workqueue attributes
+and appended on the shared worklist of the gcwq.  For example, unless
+specifically overridden, a work item of a bound workqueue will be
+queued on the worklist of exactly that gcwq that is associated to the
+CPU the issuer is running on.
+
+For any worker pool implementation, managing the concurrency level
+(how many execution contexts are active) is an important issue.  cmwq
+tries to keep the concurrency at a minimal but sufficient level.
+Minimal to save resources and sufficient in that the system is used at
+its full capacity.
+
+Each gcwq bound to an actual CPU implements concurrency management by
+hooking into the scheduler.  The gcwq is notified whenever an active
+worker wakes up or sleeps and keeps track of the number of the
+currently runnable workers.  Generally, work items are not expected to
+hog a CPU and consume many cycles.  That means maintaining just enough
+concurrency to prevent work processing from stalling should be
+optimal.  As long as there are one or more runnable workers on the
+CPU, the gcwq doesn't start execution of a new work, but, when the
+last running worker goes to sleep, it immediately schedules a new
+worker so that the CPU doesn't sit idle while there are pending work
+items.  This allows using a minimal number of workers without losing
+execution bandwidth.
+
+Keeping idle workers around doesn't cost other than the memory space
+for kthreads, so cmwq holds onto idle ones for a while before killing
+them.
+
+For an unbound wq, the above concurrency management doesn't apply and
+the gcwq for the pseudo unbound CPU tries to start executing all work
+items as soon as possible.  The responsibility of regulating
+concurrency level is on the users.  There is also a flag to mark a
+bound wq to ignore the concurrency management.  Please refer to the
+API section for details.
+
+Forward progress guarantee relies on that workers can be created when
+more execution contexts are necessary, which in turn is guaranteed
+through the use of rescue workers.  All work items which might be used
+on code paths that handle memory reclaim are required to be queued on
+wq's that have a rescue-worker reserved for execution under memory
+pressure.  Else it is possible that the thread-pool deadlocks waiting
+for execution contexts to free up.
+
+
+4. Application Programming Interface (API)
+
+alloc_workqueue() allocates a wq.  The original create_*workqueue()
+functions are deprecated and scheduled for removal.  alloc_workqueue()
+takes three arguments - @name, @flags and @max_active.  @name is the
+name of the wq and also used as the name of the rescuer thread if
+there is one.
+
+A wq no longer manages execution resources but serves as a domain for
+forward progress guarantee, flush and work item attributes.  @flags
+and @max_active control how work items are assigned execution
+resources, scheduled and executed.
+
+@flags:
+
+  WQ_NON_REENTRANT
+
+       By default, a wq guarantees non-reentrance only on the same
+       CPU.  A work item may not be executed concurrently on the same
+       CPU by multiple workers but is allowed to be executed
+       concurrently on multiple CPUs.  This flag makes sure
+       non-reentrance is enforced across all CPUs.  Work items queued
+       to a non-reentrant wq are guaranteed to be executed by at most
+       one worker system-wide at any given time.
+
+  WQ_UNBOUND
+
+       Work items queued to an unbound wq are served by a special
+       gcwq which hosts workers which are not bound to any specific
+       CPU.  This makes the wq behave as a simple execution context
+       provider without concurrency management.  The unbound gcwq
+       tries to start execution of work items as soon as possible.
+       Unbound wq sacrifices locality but is useful for the following
+       cases.
+
+       * Wide fluctuation in the concurrency level requirement is
+         expected and using bound wq may end up creating large number
+         of mostly unused workers across different CPUs as the issuer
+         hops through different CPUs.
+
+       * Long running CPU intensive workloads which can be better
+         managed by the system scheduler.
+
+  WQ_FREEZEABLE
+
+       A freezeable wq participates in the freeze phase of the system
+       suspend operations.  Work items on the wq are drained and no
+       new work item starts execution until thawed.
+
+  WQ_RESCUER
+
+       All wq which might be used in the memory reclaim paths _MUST_
+       have this flag set.  This reserves one worker exclusively for
+       the execution of this wq under memory pressure.
+
+  WQ_HIGHPRI
+
+       Work items of a highpri wq are queued at the head of the
+       worklist of the target gcwq and start execution regardless of
+       the current concurrency level.  In other words, highpri work
+       items will always start execution as soon as execution
+       resource is available.
+
+       Ordering among highpri work items is preserved - a highpri
+       work item queued after another highpri work item will start
+       execution after the earlier highpri work item starts.
+
+       Although highpri work items are not held back by other
+       runnable work items, they still contribute to the concurrency
+       level.  Highpri work items in runnable state will prevent
+       non-highpri work items from starting execution.
+
+       This flag is meaningless for unbound wq.
+
+  WQ_CPU_INTENSIVE
+
+       Work items of a CPU intensive wq do not contribute to the
+       concurrency level.  In other words, runnable CPU intensive
+       work items will not prevent other work items from starting
+       execution.  This is useful for bound work items which are
+       expected to hog CPU cycles so that their execution is
+       regulated by the system scheduler.
+
+       Although CPU intensive work items don't contribute to the
+       concurrency level, start of their executions is still
+       regulated by the concurrency management and runnable
+       non-CPU-intensive work items can delay execution of CPU
+       intensive work items.
+
+       This flag is meaningless for unbound wq.
+
+  WQ_HIGHPRI | WQ_CPU_INTENSIVE
+
+       This combination makes the wq avoid interaction with
+       concurrency management completely and behave as a simple
+       per-CPU execution context provider.  Work items queued on a
+       highpri CPU-intensive wq start execution as soon as resources
+       are available and don't affect execution of other work items.
+
+@max_active:
+
+@max_active determines the maximum number of execution contexts per
+CPU which can be assigned to the work items of a wq.  For example,
+with @max_active of 16, at most 16 work items of the wq can be
+executing at the same time per CPU.
+
+Currently, for a bound wq, the maximum limit for @max_active is 512
+and the default value used when 0 is specified is 256.  For an unbound
+wq, the limit is higher of 512 and 4 * num_possible_cpus().  These
+values are chosen sufficiently high such that they are not the
+limiting factor while providing protection in runaway cases.
+
+The number of active work items of a wq is usually regulated by the
+users of the wq, more specifically, by how many work items the users
+may queue at the same time.  Unless there is a specific need for
+throttling the number of active work items, specifying '0' is
+recommended.
+
+Some users depend on the strict execution ordering of ST wq.  The
+combination of @max_active of 1 and WQ_UNBOUND is used to achieve this
+behavior.  Work items on such wq are always queued to the unbound gcwq
+and only one work item can be active at any given time thus achieving
+the same ordering property as ST wq.
+
+
+5. Example Execution Scenarios
+
+The following example execution scenarios try to illustrate how cmwq
+behave under different configurations.
+
+ Work items w0, w1, w2 are queued to a bound wq q0 on the same CPU.
+ w0 burns CPU for 5ms then sleeps for 10ms then burns CPU for 5ms
+ again before finishing.  w1 and w2 burn CPU for 5ms then sleep for
+ 10ms.
+
+Ignoring all other tasks, works and processing overhead, and assuming
+simple FIFO scheduling, the following is one highly simplified version
+of possible sequences of events with the original wq.
+
+ TIME IN MSECS EVENT
+ 0             w0 starts and burns CPU
+ 5             w0 sleeps
+ 15            w0 wakes up and burns CPU
+ 20            w0 finishes
+ 20            w1 starts and burns CPU
+ 25            w1 sleeps
+ 35            w1 wakes up and finishes
+ 35            w2 starts and burns CPU
+ 40            w2 sleeps
+ 50            w2 wakes up and finishes
+
+And with cmwq with @max_active >= 3,
+
+ TIME IN MSECS EVENT
+ 0             w0 starts and burns CPU
+ 5             w0 sleeps
+ 5             w1 starts and burns CPU
+ 10            w1 sleeps
+ 10            w2 starts and burns CPU
+ 15            w2 sleeps
+ 15            w0 wakes up and burns CPU
+ 20            w0 finishes
+ 20            w1 wakes up and finishes
+ 25            w2 wakes up and finishes
+
+If @max_active == 2,
+
+ TIME IN MSECS EVENT
+ 0             w0 starts and burns CPU
+ 5             w0 sleeps
+ 5             w1 starts and burns CPU
+ 10            w1 sleeps
+ 15            w0 wakes up and burns CPU
+ 20            w0 finishes
+ 20            w1 wakes up and finishes
+ 20            w2 starts and burns CPU
+ 25            w2 sleeps
+ 35            w2 wakes up and finishes
+
+Now, let's assume w1 and w2 are queued to a different wq q1 which has
+WQ_HIGHPRI set,
+
+ TIME IN MSECS EVENT
+ 0             w1 and w2 start and burn CPU
+ 5             w1 sleeps
+ 10            w2 sleeps
+ 10            w0 starts and burns CPU
+ 15            w0 sleeps
+ 15            w1 wakes up and finishes
+ 20            w2 wakes up and finishes
+ 25            w0 wakes up and burns CPU
+ 30            w0 finishes
+
+If q1 has WQ_CPU_INTENSIVE set,
+
+ TIME IN MSECS EVENT
+ 0             w0 starts and burns CPU
+ 5             w0 sleeps
+ 5             w1 and w2 start and burn CPU
+ 10            w1 sleeps
+ 15            w2 sleeps
+ 15            w0 wakes up and burns CPU
+ 20            w0 finishes
+ 20            w1 wakes up and finishes
+ 25            w2 wakes up and finishes
+
+
+6. Guidelines
+
+* Do not forget to use WQ_RESCUER if a wq may process work items which
+  are used during memory reclaim.  Each wq with WQ_RESCUER set has one
+  rescuer thread reserved for it.  If there is dependency among
+  multiple work items used during memory reclaim, they should be
+  queued to separate wq each with WQ_RESCUER.
+
+* Unless strict ordering is required, there is no need to use ST wq.
+
+* Unless there is a specific need, using 0 for @max_active is
+  recommended.  In most use cases, concurrency level usually stays
+  well under the default limit.
+
+* A wq serves as a domain for forward progress guarantee (WQ_RESCUER),
+  flush and work item attributes.  Work items which are not involved
+  in memory reclaim and don't need to be flushed as a part of a group
+  of work items, and don't require any special attribute, can use one
+  of the system wq.  There is no difference in execution
+  characteristics between using a dedicated wq and a system wq.
+
+* Unless work items are expected to consume a huge amount of CPU
+  cycles, using a bound wq is usually beneficial due to the increased
+  level of locality in wq operations and work item execution.
index 087912aa09bda419450654f904c96289dcb1b43c..3d4179fbc5263ff4b624753c9ce06004286ce246 100644 (file)
@@ -962,6 +962,23 @@ W: http://www.fluff.org/ben/linux/
 S:     Maintained
 F:     arch/arm/mach-s3c6410/
 
+ARM/S5P ARM ARCHITECTURES
+M:     Kukjin Kim <kgene.kim@samsung.com>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
+S:     Maintained
+F:     arch/arm/mach-s5p*/
+
+ARM/SAMSUNG S5P SERIES FIMC SUPPORT
+M:     Kyungmin Park <kyungmin.park@samsung.com>
+M:     Sylwester Nawrocki <s.nawrocki@samsung.com>
+L:     linux-arm-kernel@lists.infradead.org
+L:     linux-media@vger.kernel.org
+S:     Maintained
+F:     arch/arm/plat-s5p/dev-fimc*
+F:     arch/arm/plat-samsung/include/plat/*fimc*
+F:     drivers/media/video/s5p-fimc/
+
 ARM/SHMOBILE ARM ARCHITECTURE
 M:     Paul Mundt <lethal@linux-sh.org>
 M:     Magnus Damm <magnus.damm@gmail.com>
@@ -1135,7 +1152,7 @@ ATLX ETHERNET DRIVERS
 M:     Jay Cliburn <jcliburn@gmail.com>
 M:     Chris Snook <chris.snook@gmail.com>
 M:     Jie Yang <jie.yang@atheros.com>
-L:     atl1-devel@lists.sourceforge.net
+L:     netdev@vger.kernel.org
 W:     http://sourceforge.net/projects/atl1
 W:     http://atl1.sourceforge.net
 S:     Maintained
@@ -1220,7 +1237,7 @@ F:        drivers/auxdisplay/
 F:     include/linux/cfag12864b.h
 
 AVR32 ARCHITECTURE
-M:     Haavard Skinnemoen <hskinnemoen@atmel.com>
+M:     Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>
 W:     http://www.atmel.com/products/AVR32/
 W:     http://avr32linux.org/
 W:     http://avrfreaks.net/
@@ -1228,7 +1245,7 @@ S:        Supported
 F:     arch/avr32/
 
 AVR32/AT32AP MACHINE SUPPORT
-M:     Haavard Skinnemoen <hskinnemoen@atmel.com>
+M:     Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>
 S:     Supported
 F:     arch/avr32/mach-at32ap/
 
@@ -1445,6 +1462,16 @@ S:       Maintained
 F:     Documentation/video4linux/cafe_ccic
 F:     drivers/media/video/cafe_ccic*
 
+CAIF NETWORK LAYER
+M:     Sjur Braendeland <sjur.brandeland@stericsson.com>
+L:     netdev@vger.kernel.org
+S:     Supported
+F:     Documentation/networking/caif/
+F:     drivers/net/caif/
+F:     include/linux/caif/
+F:     include/net/caif/
+F:     net/caif/
+
 CALGARY x86-64 IOMMU
 M:     Muli Ben-Yehuda <muli@il.ibm.com>
 M:     "Jon D. Mason" <jdmason@kudzu.us>
@@ -1500,6 +1527,8 @@ T:        git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
 S:     Supported
 F:     Documentation/filesystems/ceph.txt
 F:     fs/ceph
+F:     net/ceph
+F:     include/linux/ceph
 
 CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM:
 M:     David Vrabel <david.vrabel@csr.com>
@@ -2189,6 +2218,12 @@ W:       http://acpi4asus.sf.net
 S:     Maintained
 F:     drivers/platform/x86/eeepc-laptop.c
 
+EFIFB FRAMEBUFFER DRIVER
+L:     linux-fbdev@vger.kernel.org
+M:     Peter Jones <pjones@redhat.com>
+S:     Maintained
+F:     drivers/video/efifb.c
+
 EFS FILESYSTEM
 W:     http://aeschi.ch.eu.org/efs/
 S:     Orphan
@@ -2512,7 +2547,7 @@ S:        Supported
 F:     drivers/scsi/gdt*
 
 GENERIC GPIO I2C DRIVER
-M:     Haavard Skinnemoen <hskinnemoen@atmel.com>
+M:     Haavard Skinnemoen <hskinnemoen@gmail.com>
 S:     Supported
 F:     drivers/i2c/busses/i2c-gpio.c
 F:     include/linux/i2c-gpio.h
@@ -2647,9 +2682,14 @@ S:       Maintained
 F:     drivers/media/video/gspca/
 
 HARDWARE MONITORING
+M:     Jean Delvare <khali@linux-fr.org>
+M:     Guenter Roeck <guenter.roeck@ericsson.com>
 L:     lm-sensors@lm-sensors.org
 W:     http://www.lm-sensors.org/
-S:     Orphan
+T:     quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
+T:     quilt kernel.org/pub/linux/kernel/people/groeck/linux-staging/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
+S:     Maintained
 F:     Documentation/hwmon/
 F:     drivers/hwmon/
 F:     include/linux/hwmon*.h
@@ -2787,11 +2827,6 @@ S:       Maintained
 F:     arch/x86/kernel/hpet.c
 F:     arch/x86/include/asm/hpet.h
 
-HPET:  ACPI
-M:     Bob Picco <bob.picco@hp.com>
-S:     Maintained
-F:     drivers/char/hpet.c
-
 HPFS FILESYSTEM
 M:     Mikulas Patocka <mikulas@artax.karlin.mff.cuni.cz>
 W:     http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi
@@ -3040,16 +3075,27 @@ L:      netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ixp2000/
 
-INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe)
+INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf)
 M:     Jeff Kirsher <jeffrey.t.kirsher@intel.com>
 M:     Jesse Brandeburg <jesse.brandeburg@intel.com>
 M:     Bruce Allan <bruce.w.allan@intel.com>
-M:     Alex Duyck <alexander.h.duyck@intel.com>
+M:     Carolyn Wyborny <carolyn.wyborny@intel.com>
+M:     Don Skidmore <donald.c.skidmore@intel.com>
+M:     Greg Rose <gregory.v.rose@intel.com>
 M:     PJ Waskiewicz <peter.p.waskiewicz.jr@intel.com>
+M:     Alex Duyck <alexander.h.duyck@intel.com>
 M:     John Ronciak <john.ronciak@intel.com>
 L:     e1000-devel@lists.sourceforge.net
 W:     http://e1000.sourceforge.net/
 S:     Supported
+F:     Documentation/networking/e100.txt
+F:     Documentation/networking/e1000.txt
+F:     Documentation/networking/e1000e.txt
+F:     Documentation/networking/igb.txt
+F:     Documentation/networking/igbvf.txt
+F:     Documentation/networking/ixgb.txt
+F:     Documentation/networking/ixgbe.txt
+F:     Documentation/networking/ixgbevf.txt
 F:     drivers/net/e100.c
 F:     drivers/net/e1000/
 F:     drivers/net/e1000e/
@@ -3057,6 +3103,7 @@ F:        drivers/net/igb/
 F:     drivers/net/igbvf/
 F:     drivers/net/ixgb/
 F:     drivers/net/ixgbe/
+F:     drivers/net/ixgbevf/
 
 INTEL PRO/WIRELESS 2100 NETWORK CONNECTION SUPPORT
 L:     linux-wireless@vger.kernel.org
@@ -3117,7 +3164,7 @@ F:        drivers/net/ioc3-eth.c
 
 IOC3 SERIAL DRIVER
 M:     Pat Gefre <pfg@sgi.com>
-L:     linux-mips@linux-mips.org
+L:     linux-serial@vger.kernel.org
 S:     Maintained
 F:     drivers/serial/ioc3_serial.c
 
@@ -3404,7 +3451,7 @@ F:        drivers/s390/kvm/
 
 KEXEC
 M:     Eric Biederman <ebiederm@xmission.com>
-W:     http://ftp.kernel.org/pub/linux/kernel/people/horms/kexec-tools/
+W:     http://kernel.org/pub/linux/utils/kernel/kexec/
 L:     kexec@lists.infradead.org
 S:     Maintained
 F:     include/linux/kexec.h
@@ -3765,9 +3812,8 @@ W:        http://www.syskonnect.com
 S:     Supported
 
 MATROX FRAMEBUFFER DRIVER
-M:     Petr Vandrovec <vandrove@vc.cvut.cz>
 L:     linux-fbdev@vger.kernel.org
-S:     Maintained
+S:     Orphan
 F:     drivers/video/matrox/matroxfb_*
 F:     include/linux/matroxfb.h
 
@@ -3891,10 +3937,8 @@ F:       Documentation/serial/moxa-smartio
 F:     drivers/char/mxser.*
 
 MSI LAPTOP SUPPORT
-M:     Lennart Poettering <mzxreary@0pointer.de>
+M:     Lee, Chun-Yi <jlee@novell.com>
 L:     platform-driver-x86@vger.kernel.org
-W:     https://tango.0pointer.de/mailman/listinfo/s270-linux
-W:     http://0pointer.de/lennart/tchibo.html
 S:     Maintained
 F:     drivers/platform/x86/msi-laptop.c
 
@@ -3911,8 +3955,10 @@ S:       Supported
 F:     drivers/mfd/
 
 MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM
-S:     Orphan
+M:     Chris Ball <cjb@laptop.org>
 L:     linux-mmc@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git
+S:     Maintained
 F:     drivers/mmc/
 F:     include/linux/mmc/
 
@@ -3934,7 +3980,7 @@ F:        drivers/char/isicom.c
 F:     include/linux/isicom.h
 
 MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
-M:     Felipe Balbi <felipe.balbi@nokia.com>
+M:     Felipe Balbi <balbi@ti.com>
 L:     linux-usb@vger.kernel.org
 T:     git git://gitorious.org/usb/usb.git
 S:     Maintained
@@ -3954,8 +4000,8 @@ S:        Maintained
 F:     drivers/net/natsemi.c
 
 NCP FILESYSTEM
-M:     Petr Vandrovec <vandrove@vc.cvut.cz>
-S:     Maintained
+M:     Petr Vandrovec <petr@vandrovec.name>
+S:     Odd Fixes
 F:     fs/ncpfs/
 
 NCR DUAL 700 SCSI DRIVER (MICROCHANNEL)
@@ -4232,7 +4278,7 @@ S:        Maintained
 F:     drivers/char/hw_random/omap-rng.c
 
 OMAP USB SUPPORT
-M:     Felipe Balbi <felipe.balbi@nokia.com>
+M:     Felipe Balbi <balbi@ti.com>
 M:     David Brownell <dbrownell@users.sourceforge.net>
 L:     linux-usb@vger.kernel.org
 L:     linux-omap@vger.kernel.org
@@ -4761,6 +4807,15 @@ F:       fs/qnx4/
 F:     include/linux/qnx4_fs.h
 F:     include/linux/qnxtypes.h
 
+RADOS BLOCK DEVICE (RBD)
+F:     include/linux/qnxtypes.h
+M:     Yehuda Sadeh <yehuda@hq.newdream.net>
+M:     Sage Weil <sage@newdream.net>
+M:     ceph-devel@vger.kernel.org
+S:     Supported
+F:     drivers/block/rbd.c
+F:     drivers/block/rbd_types.h
+
 RADEON FRAMEBUFFER DISPLAY DRIVER
 M:     Benjamin Herrenschmidt <benh@kernel.crashing.org>
 L:     linux-fbdev@vger.kernel.org
@@ -4810,6 +4865,7 @@ RCUTORTURE MODULE
 M:     Josh Triplett <josh@freedesktop.org>
 M:     "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
 S:     Supported
+T:     git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
 F:     Documentation/RCU/torture.txt
 F:     kernel/rcutorture.c
 
@@ -4834,6 +4890,7 @@ M:        Dipankar Sarma <dipankar@in.ibm.com>
 M:     "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
 W:     http://www.rdrop.com/users/paulmck/rclock/
 S:     Supported
+T:     git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
 F:     Documentation/RCU/
 F:     include/linux/rcu*
 F:     include/linux/srcu*
@@ -4841,12 +4898,10 @@ F:      kernel/rcu*
 F:     kernel/srcu*
 X:     kernel/rcutorture.c
 
-REAL TIME CLOCK DRIVER
+REAL TIME CLOCK DRIVER (LEGACY)
 M:     Paul Gortmaker <p_gortmaker@yahoo.com>
 S:     Maintained
-F:     Documentation/rtc.txt
-F:     drivers/rtc/
-F:     include/linux/rtc.h
+F:     drivers/char/rtc.c
 
 REAL TIME CLOCK (RTC) SUBSYSTEM
 M:     Alessandro Zummo <a.zummo@towertech.it>
@@ -4986,6 +5041,12 @@ F:       drivers/media/common/saa7146*
 F:     drivers/media/video/*7146*
 F:     include/media/*7146*
 
+SAMSUNG AUDIO (ASoC) DRIVERS
+M:     Jassi Brar <jassi.brar@samsung.com>
+L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
+S:     Supported
+F:     sound/soc/s3c24xx
+
 TLG2300 VIDEO4LINUX-2 DRIVER
 M:     Huang Shijie <shijie8@gmail.com>
 M:     Kang Yong <kangyong@telegent.com>
@@ -5083,8 +5144,10 @@ S:       Maintained
 F:     drivers/mmc/host/sdricoh_cs.c
 
 SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER
-S:     Orphan
+M:     Chris Ball <cjb@laptop.org>
 L:     linux-mmc@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git
+S:     Maintained
 F:     drivers/mmc/host/sdhci.*
 
 SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF)
@@ -6426,8 +6489,10 @@ F:       include/linux/wm97xx.h
 WOLFSON MICROELECTRONICS DRIVERS
 M:     Mark Brown <broonie@opensource.wolfsonmicro.com>
 M:     Ian Lartey <ian@opensource.wolfsonmicro.com>
+M:     Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+T:     git git://opensource.wolfsonmicro.com/linux-2.6-asoc
 T:     git git://opensource.wolfsonmicro.com/linux-2.6-audioplus
-W:     http://opensource.wolfsonmicro.com/node/8
+W:     http://opensource.wolfsonmicro.com/content/linux-drivers-wolfson-devices
 S:     Supported
 F:     Documentation/hwmon/wm83??
 F:     drivers/leds/leds-wm83*.c
index 4df9873f83b275a48037e86e058208fa9c6baffc..d3c10719bbbd5b311c51f9a407184fd30847e6ea 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 36
-EXTRAVERSION = -rc3
-NAME = Sheep on Meth
+EXTRAVERSION =
+NAME = Flesh-Eating Bats with Fangs
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
@@ -568,6 +568,12 @@ endif
 
 ifdef CONFIG_FUNCTION_TRACER
 KBUILD_CFLAGS  += -pg
+ifdef CONFIG_DYNAMIC_FTRACE
+       ifdef CONFIG_HAVE_C_RECORDMCOUNT
+               BUILD_C_RECORDMCOUNT := y
+               export BUILD_C_RECORDMCOUNT
+       endif
+endif
 endif
 
 # We trigger additional mismatches with less inlining
@@ -591,6 +597,11 @@ KBUILD_CFLAGS      += $(call cc-option,-fno-strict-overflow)
 # conserve stack if available
 KBUILD_CFLAGS   += $(call cc-option,-fconserve-stack)
 
+# check for 'asm goto'
+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
+       KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
+endif
+
 # Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
 # But warn user when we do so
 warn-assign = \
index 4877a8c8ee1697599289f35107824f95ba8daf84..53d7f619a1b9b46643c59cd3c7d4ace354148c20 100644 (file)
@@ -32,8 +32,9 @@ config HAVE_OPROFILE
 
 config KPROBES
        bool "Kprobes"
-       depends on KALLSYMS && MODULES
+       depends on MODULES
        depends on HAVE_KPROBES
+       select KALLSYMS
        help
          Kprobes allows you to trap at almost any kernel address and
          execute a callback function.  register_kprobe() establishes
@@ -45,7 +46,6 @@ config OPTPROBES
        def_bool y
        depends on KPROBES && HAVE_OPTPROBES
        depends on !PREEMPT
-       select KALLSYMS_ALL
 
 config HAVE_EFFICIENT_UNALIGNED_ACCESS
        bool
@@ -158,4 +158,7 @@ config HAVE_PERF_EVENTS_NMI
          subsystem.  Also has support for calculating CPU cycle events
          to determine how many clock cycles in a given period.
 
+config HAVE_ARCH_JUMP_LABEL
+       bool
+
 source "kernel/gcov/Kconfig"
index b9647bb66d1388d9c13503748252c2a1df4ac830..d04ccd73af45f6487f442a0dba26921db884e218 100644 (file)
@@ -9,6 +9,7 @@ config ALPHA
        select HAVE_IDE
        select HAVE_OPROFILE
        select HAVE_SYSCALL_WRAPPERS
+       select HAVE_IRQ_WORK
        select HAVE_PERF_EVENTS
        select HAVE_DMA_ATTRS
        help
index 01d71e1c8a9eb6df57fe8bc13e1f94f634a8eaf7..012f1243b1c1a0024af9cffe0afdfc204f4dcf11 100644 (file)
@@ -43,6 +43,8 @@ extern void smp_imb(void);
 /* ??? Ought to use this in arch/alpha/kernel/signal.c too.  */
 
 #ifndef CONFIG_SMP
+#include <linux/sched.h>
+
 extern void __load_new_mm_context(struct mm_struct *);
 static inline void
 flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
index 4157cd3c44a96d2811410b334a35f11923ba6fbf..fe792ca818f64c4d9954e8b62e5f8064a5d5777e 100644 (file)
@@ -1,11 +1,6 @@
 #ifndef __ASM_ALPHA_PERF_EVENT_H
 #define __ASM_ALPHA_PERF_EVENT_H
 
-/* Alpha only supports software events through this interface. */
-extern void set_perf_event_pending(void);
-
-#define PERF_EVENT_INDEX_OFFSET 0
-
 #ifdef CONFIG_PERF_EVENTS
 extern void init_hw_perf_events(void);
 #else
index 804e5311c84188fbc14a62b7afb97bc35a232003..058937bf5a77718983126f2ab3813a023cb8c1d0 100644 (file)
 #define __NR_pwritev                   491
 #define __NR_rt_tgsigqueueinfo         492
 #define __NR_perf_event_open           493
+#define __NR_fanotify_init             494
+#define __NR_fanotify_mark             495
+#define __NR_prlimit64                 496
 
 #ifdef __KERNEL__
 
-#define NR_SYSCALLS                    494
+#define NR_SYSCALLS                    497
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_SYS_OLD_GETRLIMIT
 #define __ARCH_WANT_SYS_OLDUMOUNT
 #define __ARCH_WANT_SYS_SIGPENDING
+#define __ARCH_WANT_SYS_RT_SIGSUSPEND
 
 /* "Conditional" syscalls.  What we want is
 
index b45d913a51c368881b4311d1f1887566829b09a8..6d159cee5f2f43a48bc9aac8355069a3e588ef7f 100644 (file)
@@ -73,8 +73,6 @@
        ldq     $20, HAE_REG($19);      \
        stq     $21, HAE_CACHE($19);    \
        stq     $21, 0($20);            \
-       ldq     $0, 0($sp);             \
-       ldq     $1, 8($sp);             \
 99:;                                   \
        ldq     $19, 72($sp);           \
        ldq     $20, 80($sp);           \
@@ -316,19 +314,24 @@ ret_from_sys_call:
        cmovne  $26, 0, $19             /* $19 = 0 => non-restartable */
        ldq     $0, SP_OFF($sp)
        and     $0, 8, $0
-       beq     $0, restore_all
-ret_from_reschedule:
+       beq     $0, ret_to_kernel
+ret_to_user:
        /* Make sure need_resched and sigpending don't change between
                sampling and the rti.  */
        lda     $16, 7
        call_pal PAL_swpipl
        ldl     $5, TI_FLAGS($8)
        and     $5, _TIF_WORK_MASK, $2
-       bne     $5, work_pending
+       bne     $2, work_pending
 restore_all:
        RESTORE_ALL
        call_pal PAL_rti
 
+ret_to_kernel:
+       lda     $16, 7
+       call_pal PAL_swpipl
+       br restore_all
+
        .align 3
 $syscall_error:
        /*
@@ -363,7 +366,7 @@ $ret_success:
  *       $8: current.
  *      $19: The old syscall number, or zero if this is not a return
  *           from a syscall that errored and is possibly restartable.
- *      $20: Error indication.
+ *      $20: The old a3 value
  */
 
        .align  4
@@ -392,12 +395,18 @@ $work_resched:
 
 $work_notifysig:
        mov     $sp, $16
-       b     $1, do_switch_stack
+       bsr     $1, do_switch_stack
        mov     $sp, $17
        mov     $5, $18
+       mov     $19, $9         /* save old syscall number */
+       mov     $20, $10        /* save old a3 */
+       and     $5, _TIF_SIGPENDING, $2
+       cmovne  $2, 0, $9       /* we don't want double syscall restarts */
        jsr     $26, do_notify_resume
+       mov     $9, $19
+       mov     $10, $20
        bsr     $1, undo_switch_stack
-       br      restore_all
+       br      ret_to_user
 .end work_pending
 
 /*
@@ -430,6 +439,7 @@ strace:
        beq     $1, 1f
        ldq     $27, 0($2)
 1:     jsr     $26, ($27), sys_gettimeofday
+ret_from_straced:
        ldgp    $gp, 0($26)
 
        /* check return.. */
@@ -650,7 +660,7 @@ kernel_thread:
        /* We don't actually care for a3 success widgetry in the kernel.
           Not for positive errno values.  */
        stq     $0, 0($sp)              /* $0 */
-       br      restore_all
+       br      ret_to_kernel
 .end kernel_thread
 
 /*
@@ -757,11 +767,15 @@ sys_vfork:
        .ent    sys_sigreturn
 sys_sigreturn:
        .prologue 0
+       lda     $9, ret_from_straced
+       cmpult  $26, $9, $9
        mov     $sp, $17
        lda     $18, -SWITCH_STACK_SIZE($sp)
        lda     $sp, -SWITCH_STACK_SIZE($sp)
        jsr     $26, do_sigreturn
-       br      $1, undo_switch_stack
+       bne     $9, 1f
+       jsr     $26, syscall_trace
+1:     br      $1, undo_switch_stack
        br      ret_from_sys_call
 .end sys_sigreturn
 
@@ -770,46 +784,18 @@ sys_sigreturn:
        .ent    sys_rt_sigreturn
 sys_rt_sigreturn:
        .prologue 0
+       lda     $9, ret_from_straced
+       cmpult  $26, $9, $9
        mov     $sp, $17
        lda     $18, -SWITCH_STACK_SIZE($sp)
        lda     $sp, -SWITCH_STACK_SIZE($sp)
        jsr     $26, do_rt_sigreturn
-       br      $1, undo_switch_stack
+       bne     $9, 1f
+       jsr     $26, syscall_trace
+1:     br      $1, undo_switch_stack
        br      ret_from_sys_call
 .end sys_rt_sigreturn
 
-       .align  4
-       .globl  sys_sigsuspend
-       .ent    sys_sigsuspend
-sys_sigsuspend:
-       .prologue 0
-       mov     $sp, $17
-       br      $1, do_switch_stack
-       mov     $sp, $18
-       subq    $sp, 16, $sp
-       stq     $26, 0($sp)
-       jsr     $26, do_sigsuspend
-       ldq     $26, 0($sp)
-       lda     $sp, SWITCH_STACK_SIZE+16($sp)
-       ret
-.end sys_sigsuspend
-
-       .align  4
-       .globl  sys_rt_sigsuspend
-       .ent    sys_rt_sigsuspend
-sys_rt_sigsuspend:
-       .prologue 0
-       mov     $sp, $18
-       br      $1, do_switch_stack
-       mov     $sp, $19
-       subq    $sp, 16, $sp
-       stq     $26, 0($sp)
-       jsr     $26, do_rt_sigsuspend
-       ldq     $26, 0($sp)
-       lda     $sp, SWITCH_STACK_SIZE+16($sp)
-       ret
-.end sys_rt_sigsuspend
-
        .align  4
        .globl  sys_sethae
        .ent    sys_sethae
@@ -928,15 +914,6 @@ sys_execve:
        jmp     $31, do_sys_execve
 .end sys_execve
 
-       .align  4
-       .globl  osf_sigprocmask
-       .ent    osf_sigprocmask
-osf_sigprocmask:
-       .prologue 0
-       mov     $sp, $18
-       jmp     $31, sys_osf_sigprocmask
-.end osf_sigprocmask
-
        .align  4
        .globl  alpha_ni_syscall
        .ent    alpha_ni_syscall
index 8ca6345bf13167842d3c0fb60637e51e18e3db93..253cf1a87481e815ad9a724dde1fef51b5616d09 100644 (file)
@@ -90,11 +90,13 @@ static int
 ev6_parse_cbox(u64 c_addr, u64 c1_syn, u64 c2_syn, 
               u64 c_stat, u64 c_sts, int print)
 {
-       char *sourcename[] = { "UNKNOWN", "UNKNOWN", "UNKNOWN",
-                              "MEMORY", "BCACHE", "DCACHE", 
-                              "BCACHE PROBE", "BCACHE PROBE" };
-       char *streamname[] = { "D", "I" };
-       char *bitsname[] = { "SINGLE", "DOUBLE" };
+       static const char * const sourcename[] = {
+               "UNKNOWN", "UNKNOWN", "UNKNOWN",
+               "MEMORY", "BCACHE", "DCACHE",
+               "BCACHE PROBE", "BCACHE PROBE"
+       };
+       static const char * const streamname[] = { "D", "I" };
+       static const char * const bitsname[] = { "SINGLE", "DOUBLE" };
        int status = MCHK_DISPOSITION_REPORT;
        int source = -1, stream = -1, bits = -1;
 
index 5c905aaaeccd82861ea62d9186517475f0d7019c..648ae88aeb8ae0cce7dcedbe215bf56417e5eebc 100644 (file)
@@ -589,22 +589,23 @@ marvel_print_pox_spl_cmplt(u64 spl_cmplt)
 static void
 marvel_print_pox_trans_sum(u64 trans_sum)
 {
-       char *pcix_cmd[] = { "Interrupt Acknowledge",
-                            "Special Cycle",
-                            "I/O Read",
-                            "I/O Write",
-                            "Reserved",
-                            "Reserved / Device ID Message",
-                            "Memory Read",
-                            "Memory Write",
-                            "Reserved / Alias to Memory Read Block",
-                            "Reserved / Alias to Memory Write Block",
-                            "Configuration Read",
-                            "Configuration Write",
-                            "Memory Read Multiple / Split Completion",
-                            "Dual Address Cycle",
-                            "Memory Read Line / Memory Read Block",
-                            "Memory Write and Invalidate / Memory Write Block"
+       static const char * const pcix_cmd[] = {
+               "Interrupt Acknowledge",
+               "Special Cycle",
+               "I/O Read",
+               "I/O Write",
+               "Reserved",
+               "Reserved / Device ID Message",
+               "Memory Read",
+               "Memory Write",
+               "Reserved / Alias to Memory Read Block",
+               "Reserved / Alias to Memory Write Block",
+               "Configuration Read",
+               "Configuration Write",
+               "Memory Read Multiple / Split Completion",
+               "Dual Address Cycle",
+               "Memory Read Line / Memory Read Block",
+               "Memory Write and Invalidate / Memory Write Block"
        };
 
 #define IO7__POX_TRANSUM__PCI_ADDR__S          (0)
index f7ed97ce0dfd72458dde026292b89702be518477..c3b3781a03de01045ebd270319173ae2b5d347ee 100644 (file)
@@ -75,8 +75,12 @@ titan_parse_p_serror(int which, u64 serror, int print)
        int status = MCHK_DISPOSITION_REPORT;
 
 #ifdef CONFIG_VERBOSE_MCHECK
-       char *serror_src[] = {"GPCI", "APCI", "AGP HP", "AGP LP"};
-       char *serror_cmd[] = {"DMA Read", "DMA RMW", "SGTE Read", "Reserved"};
+       static const char * const serror_src[] = {
+               "GPCI", "APCI", "AGP HP", "AGP LP"
+       };
+       static const char * const serror_cmd[] = {
+               "DMA Read", "DMA RMW", "SGTE Read", "Reserved"
+       };
 #endif /* CONFIG_VERBOSE_MCHECK */
 
 #define TITAN__PCHIP_SERROR__LOST_UECC (1UL << 0)
@@ -140,14 +144,15 @@ titan_parse_p_perror(int which, int port, u64 perror, int print)
        int status = MCHK_DISPOSITION_REPORT;
 
 #ifdef CONFIG_VERBOSE_MCHECK
-       char *perror_cmd[] = { "Interrupt Acknowledge", "Special Cycle",
-                              "I/O Read",              "I/O Write",
-                              "Reserved",              "Reserved",
-                              "Memory Read",           "Memory Write",
-                              "Reserved",              "Reserved",
-                              "Configuration Read",    "Configuration Write",
-                              "Memory Read Multiple",  "Dual Address Cycle",
-                              "Memory Read Line","Memory Write and Invalidate"
+       static const char * const perror_cmd[] = {
+               "Interrupt Acknowledge", "Special Cycle",
+               "I/O Read",             "I/O Write",
+               "Reserved",             "Reserved",
+               "Memory Read",          "Memory Write",
+               "Reserved",             "Reserved",
+               "Configuration Read",   "Configuration Write",
+               "Memory Read Multiple", "Dual Address Cycle",
+               "Memory Read Line",     "Memory Write and Invalidate"
        };
 #endif /* CONFIG_VERBOSE_MCHECK */
 
@@ -273,11 +278,11 @@ titan_parse_p_agperror(int which, u64 agperror, int print)
        int cmd, len;
        unsigned long addr;
 
-       char *agperror_cmd[] = { "Read (low-priority)", "Read (high-priority)",
-                                "Write (low-priority)",
-                                "Write (high-priority)",
-                                "Reserved",            "Reserved",
-                                "Flush",               "Fence"
+       static const char * const agperror_cmd[] = {
+               "Read (low-priority)",  "Read (high-priority)",
+               "Write (low-priority)", "Write (high-priority)",
+               "Reserved",             "Reserved",
+               "Flush",                "Fence"
        };
 #endif /* CONFIG_VERBOSE_MCHECK */
 
index 5d1e6d6ce6843b136fb7810e74c86df16d86daf4..547e8b84b2f794ab546aca3b1927c80bdc1104bf 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/smp.h>
-#include <linux/smp_lock.h>
 #include <linux/stddef.h>
 #include <linux/syscalls.h>
 #include <linux/unistd.h>
@@ -69,7 +68,6 @@ SYSCALL_DEFINE4(osf_set_program_attributes, unsigned long, text_start,
 {
        struct mm_struct *mm;
 
-       lock_kernel();
        mm = current->mm;
        mm->end_code = bss_start + bss_len;
        mm->start_brk = bss_start + bss_len;
@@ -78,7 +76,6 @@ SYSCALL_DEFINE4(osf_set_program_attributes, unsigned long, text_start,
        printk("set_program_attributes(%lx %lx %lx %lx)\n",
                text_start, text_len, bss_start, bss_len);
 #endif
-       unlock_kernel();
        return 0;
 }
 
@@ -517,7 +514,6 @@ SYSCALL_DEFINE2(osf_proplist_syscall, enum pl_code, code,
        long error;
        int __user *min_buf_size_ptr;
 
-       lock_kernel();
        switch (code) {
        case PL_SET:
                if (get_user(error, &args->set.nbytes))
@@ -547,7 +543,6 @@ SYSCALL_DEFINE2(osf_proplist_syscall, enum pl_code, code,
                error = -EOPNOTSUPP;
                break;
        };
-       unlock_kernel();
        return error;
 }
 
@@ -594,7 +589,7 @@ SYSCALL_DEFINE2(osf_sigstack, struct sigstack __user *, uss,
 
 SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
 {
-       char *sysinfo_table[] = {
+       const char *sysinfo_table[] = {
                utsname()->sysname,
                utsname()->nodename,
                utsname()->release,
@@ -606,7 +601,7 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
                "dummy",        /* secure RPC domain */
        };
        unsigned long offset;
-       char *res;
+       const char *res;
        long len, err = -EINVAL;
 
        offset = command-1;
index 738fc824e2ea373f85b3ae04fb7eb1346ecacdac..b899e95f79fdb424ea9b07d31153edb0e824ac8e 100644 (file)
@@ -66,7 +66,7 @@ static int pci_mmap_resource(struct kobject *kobj,
 {
        struct pci_dev *pdev = to_pci_dev(container_of(kobj,
                                                       struct device, kobj));
-       struct resource *res = (struct resource *)attr->private;
+       struct resource *res = attr->private;
        enum pci_mmap_state mmap_type;
        struct pci_bus_region bar;
        int i;
index 85d8e4f58c83ce612269162b9635bd49059c39dd..1cc49683fb69b2a5f96639e71a2f1af821479e77 100644 (file)
@@ -307,7 +307,7 @@ again:
                             new_raw_count) != prev_raw_count)
                goto again;
 
-       delta = (new_raw_count  - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf;
+       delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf;
 
        /* It is possible on very rare occasions that the PMC has overflowed
         * but the interrupt is yet to come.  Detect and fix this situation.
@@ -402,14 +402,13 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc)
                struct hw_perf_event *hwc = &pe->hw;
                int idx = hwc->idx;
 
-               if (cpuc->current_idx[j] != PMC_NO_INDEX) {
-                       cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
-                       continue;
+               if (cpuc->current_idx[j] == PMC_NO_INDEX) {
+                       alpha_perf_event_set_period(pe, hwc, idx);
+                       cpuc->current_idx[j] = idx;
                }
 
-               alpha_perf_event_set_period(pe, hwc, idx);
-               cpuc->current_idx[j] = idx;
-               cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
+               if (!(hwc->state & PERF_HES_STOPPED))
+                       cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
        }
        cpuc->config = cpuc->event[0]->hw.config_base;
 }
@@ -420,12 +419,13 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc)
  *  - this function is called from outside this module via the pmu struct
  *    returned from perf event initialisation.
  */
-static int alpha_pmu_enable(struct perf_event *event)
+static int alpha_pmu_add(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       struct hw_perf_event *hwc = &event->hw;
        int n0;
        int ret;
-       unsigned long flags;
+       unsigned long irq_flags;
 
        /*
         * The Sparc code has the IRQ disable first followed by the perf
@@ -435,8 +435,8 @@ static int alpha_pmu_enable(struct perf_event *event)
         * nevertheless we disable the PMCs first to enable a potential
         * final PMI to occur before we disable interrupts.
         */
-       perf_disable();
-       local_irq_save(flags);
+       perf_pmu_disable(event->pmu);
+       local_irq_save(irq_flags);
 
        /* Default to error to be returned */
        ret = -EAGAIN;
@@ -455,8 +455,12 @@ static int alpha_pmu_enable(struct perf_event *event)
                }
        }
 
-       local_irq_restore(flags);
-       perf_enable();
+       hwc->state = PERF_HES_UPTODATE;
+       if (!(flags & PERF_EF_START))
+               hwc->state |= PERF_HES_STOPPED;
+
+       local_irq_restore(irq_flags);
+       perf_pmu_enable(event->pmu);
 
        return ret;
 }
@@ -467,15 +471,15 @@ static int alpha_pmu_enable(struct perf_event *event)
  *  - this function is called from outside this module via the pmu struct
  *    returned from perf event initialisation.
  */
-static void alpha_pmu_disable(struct perf_event *event)
+static void alpha_pmu_del(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
-       unsigned long flags;
+       unsigned long irq_flags;
        int j;
 
-       perf_disable();
-       local_irq_save(flags);
+       perf_pmu_disable(event->pmu);
+       local_irq_save(irq_flags);
 
        for (j = 0; j < cpuc->n_events; j++) {
                if (event == cpuc->event[j]) {
@@ -501,8 +505,8 @@ static void alpha_pmu_disable(struct perf_event *event)
                }
        }
 
-       local_irq_restore(flags);
-       perf_enable();
+       local_irq_restore(irq_flags);
+       perf_pmu_enable(event->pmu);
 }
 
 
@@ -514,13 +518,44 @@ static void alpha_pmu_read(struct perf_event *event)
 }
 
 
-static void alpha_pmu_unthrottle(struct perf_event *event)
+static void alpha_pmu_stop(struct perf_event *event, int flags)
+{
+       struct hw_perf_event *hwc = &event->hw;
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+       if (!(hwc->state & PERF_HES_STOPPED)) {
+               cpuc->idx_mask &= ~(1UL<<hwc->idx);
+               hwc->state |= PERF_HES_STOPPED;
+       }
+
+       if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
+               alpha_perf_event_update(event, hwc, hwc->idx, 0);
+               hwc->state |= PERF_HES_UPTODATE;
+       }
+
+       if (cpuc->enabled)
+               wrperfmon(PERFMON_CMD_DISABLE, (1UL<<hwc->idx));
+}
+
+
+static void alpha_pmu_start(struct perf_event *event, int flags)
 {
        struct hw_perf_event *hwc = &event->hw;
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
+       if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
+               return;
+
+       if (flags & PERF_EF_RELOAD) {
+               WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+               alpha_perf_event_set_period(event, hwc, hwc->idx);
+       }
+
+       hwc->state = 0;
+
        cpuc->idx_mask |= 1UL<<hwc->idx;
-       wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx));
+       if (cpuc->enabled)
+               wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx));
 }
 
 
@@ -642,39 +677,36 @@ static int __hw_perf_event_init(struct perf_event *event)
        return 0;
 }
 
-static const struct pmu pmu = {
-       .enable         = alpha_pmu_enable,
-       .disable        = alpha_pmu_disable,
-       .read           = alpha_pmu_read,
-       .unthrottle     = alpha_pmu_unthrottle,
-};
-
-
 /*
  * Main entry point to initialise a HW performance event.
  */
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+static int alpha_pmu_event_init(struct perf_event *event)
 {
        int err;
 
+       switch (event->attr.type) {
+       case PERF_TYPE_RAW:
+       case PERF_TYPE_HARDWARE:
+       case PERF_TYPE_HW_CACHE:
+               break;
+
+       default:
+               return -ENOENT;
+       }
+
        if (!alpha_pmu)
-               return ERR_PTR(-ENODEV);
+               return -ENODEV;
 
        /* Do the real initialisation work. */
        err = __hw_perf_event_init(event);
 
-       if (err)
-               return ERR_PTR(err);
-
-       return &pmu;
+       return err;
 }
 
-
-
 /*
  * Main entry point - enable HW performance counters.
  */
-void hw_perf_enable(void)
+static void alpha_pmu_enable(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
@@ -700,7 +732,7 @@ void hw_perf_enable(void)
  * Main entry point - disable HW performance counters.
  */
 
-void hw_perf_disable(void)
+static void alpha_pmu_disable(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
@@ -713,6 +745,17 @@ void hw_perf_disable(void)
        wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
 }
 
+static struct pmu pmu = {
+       .pmu_enable     = alpha_pmu_enable,
+       .pmu_disable    = alpha_pmu_disable,
+       .event_init     = alpha_pmu_event_init,
+       .add            = alpha_pmu_add,
+       .del            = alpha_pmu_del,
+       .start          = alpha_pmu_start,
+       .stop           = alpha_pmu_stop,
+       .read           = alpha_pmu_read,
+};
+
 
 /*
  * Main entry point - don't know when this is called but it
@@ -766,7 +809,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
        wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
 
        /* la_ptr is the counter that overflowed. */
-       if (unlikely(la_ptr >= perf_max_events)) {
+       if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) {
                /* This should never occur! */
                irq_err_count++;
                pr_warning("PMI: silly index %ld\n", la_ptr);
@@ -807,7 +850,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
                        /* Interrupts coming too quickly; "throttle" the
                         * counter, i.e., disable it for a little while.
                         */
-                       cpuc->idx_mask &= ~(1UL<<idx);
+                       alpha_pmu_stop(event, 0);
                }
        }
        wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
@@ -837,6 +880,7 @@ void __init init_hw_perf_events(void)
 
        /* And set up PMU specification */
        alpha_pmu = &ev67_pmu;
-       perf_max_events = alpha_pmu->num_pmcs;
+
+       perf_pmu_register(&pmu);
 }
 
index 842dba308eab3065510857e50312496c674c1097..3ec35066f1dc51c09367c32bea106453a109953d 100644 (file)
@@ -356,7 +356,7 @@ dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti)
        dest[27] = pt->r27;
        dest[28] = pt->r28;
        dest[29] = pt->gp;
-       dest[30] = rdusp();
+       dest[30] = ti == current_thread_info() ? rdusp() : ti->pcb.usp;
        dest[31] = pt->pc;
 
        /* Once upon a time this was the PS value.  Which is stupid
index 0932dbb1ef8eff444943645e15b0802b1d4cf5d5..6f7feb5db27193f33e24e962e4d1be257d8464ef 100644 (file)
@@ -41,46 +41,20 @@ static void do_signal(struct pt_regs *, struct switch_stack *,
 /*
  * The OSF/1 sigprocmask calling sequence is different from the
  * C sigprocmask() sequence..
- *
- * how:
- * 1 - SIG_BLOCK
- * 2 - SIG_UNBLOCK
- * 3 - SIG_SETMASK
- *
- * We change the range to -1 .. 1 in order to let gcc easily
- * use the conditional move instructions.
- *
- * Note that we don't need to acquire the kernel lock for SMP
- * operation, as all of this is local to this thread.
  */
-SYSCALL_DEFINE3(osf_sigprocmask, int, how, unsigned long, newmask,
-               struct pt_regs *, regs)
+SYSCALL_DEFINE2(osf_sigprocmask, int, how, unsigned long, newmask)
 {
-       unsigned long oldmask = -EINVAL;
-
-       if ((unsigned long)how-1 <= 2) {
-               long sign = how-2;              /* -1 .. 1 */
-               unsigned long block, unblock;
-
-               newmask &= _BLOCKABLE;
-               spin_lock_irq(&current->sighand->siglock);
-               oldmask = current->blocked.sig[0];
-
-               unblock = oldmask & ~newmask;
-               block = oldmask | newmask;
-               if (!sign)
-                       block = unblock;
-               if (sign <= 0)
-                       newmask = block;
-               if (_NSIG_WORDS > 1 && sign > 0)
-                       sigemptyset(&current->blocked);
-               current->blocked.sig[0] = newmask;
-               recalc_sigpending();
-               spin_unlock_irq(&current->sighand->siglock);
-
-               regs->r0 = 0;           /* special no error return */
+       sigset_t oldmask;
+       sigset_t mask;
+       unsigned long res;
+
+       siginitset(&mask, newmask & _BLOCKABLE);
+       res = sigprocmask(how, &mask, &oldmask);
+       if (!res) {
+               force_successful_syscall_return();
+               res = oldmask.sig[0];
        }
-       return oldmask;
+       return res;
 }
 
 SYSCALL_DEFINE3(osf_sigaction, int, sig,
@@ -94,9 +68,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig,
                old_sigset_t mask;
                if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
                    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
-                   __get_user(new_ka.sa.sa_flags, &act->sa_flags))
+                   __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
+                   __get_user(mask, &act->sa_mask))
                        return -EFAULT;
-               __get_user(mask, &act->sa_mask);
                siginitset(&new_ka.sa.sa_mask, mask);
                new_ka.ka_restorer = NULL;
        }
@@ -106,9 +80,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig,
        if (!ret && oact) {
                if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
                    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
-                   __put_user(old_ka.sa.sa_flags, &oact->sa_flags))
+                   __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
+                   __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
                        return -EFAULT;
-               __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
        }
 
        return ret;
@@ -144,8 +118,7 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act,
 /*
  * Atomically swap in the new signal mask, and wait for a signal.
  */
-asmlinkage int
-do_sigsuspend(old_sigset_t mask, struct pt_regs *regs, struct switch_stack *sw)
+SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
 {
        mask &= _BLOCKABLE;
        spin_lock_irq(&current->sighand->siglock);
@@ -154,41 +127,6 @@ do_sigsuspend(old_sigset_t mask, struct pt_regs *regs, struct switch_stack *sw)
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
 
-       /* Indicate EINTR on return from any possible signal handler,
-          which will not come back through here, but via sigreturn.  */
-       regs->r0 = EINTR;
-       regs->r19 = 1;
-
-       current->state = TASK_INTERRUPTIBLE;
-       schedule();
-       set_thread_flag(TIF_RESTORE_SIGMASK);
-       return -ERESTARTNOHAND;
-}
-
-asmlinkage int
-do_rt_sigsuspend(sigset_t __user *uset, size_t sigsetsize,
-                struct pt_regs *regs, struct switch_stack *sw)
-{
-       sigset_t set;
-
-       /* XXX: Don't preclude handling different sized sigset_t's.  */
-       if (sigsetsize != sizeof(sigset_t))
-               return -EINVAL;
-       if (copy_from_user(&set, uset, sizeof(set)))
-               return -EFAULT;
-
-       sigdelsetmask(&set, ~_BLOCKABLE);
-       spin_lock_irq(&current->sighand->siglock);
-       current->saved_sigmask = current->blocked;
-       current->blocked = set;
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-
-       /* Indicate EINTR on return from any possible signal handler,
-          which will not come back through here, but via sigreturn.  */
-       regs->r0 = EINTR;
-       regs->r19 = 1;
-
        current->state = TASK_INTERRUPTIBLE;
        schedule();
        set_thread_flag(TIF_RESTORE_SIGMASK);
@@ -239,6 +177,8 @@ restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
        unsigned long usp;
        long i, err = __get_user(regs->pc, &sc->sc_pc);
 
+       current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
        sw->r26 = (unsigned long) ret_from_sys_call;
 
        err |= __get_user(regs->r0, sc->sc_regs+0);
@@ -591,7 +531,6 @@ syscall_restart(unsigned long r0, unsigned long r19,
                regs->pc -= 4;
                break;
        case ERESTART_RESTARTBLOCK:
-               current_thread_info()->restart_block.fn = do_no_restart_syscall;
                regs->r0 = EINTR;
                break;
        }
index 4afc1a1e2e5a055ccda3d11a7d2f1b96c3670235..f0df3fbd84025eff9bf304fd179bed8785e865dc 100644 (file)
@@ -87,7 +87,7 @@ static int srm_env_proc_show(struct seq_file *m, void *v)
        srm_env_t       *entry;
        char            *page;
 
-       entry = (srm_env_t *)m->private;
+       entry = m->private;
        page = (char *)__get_free_page(GFP_USER);
        if (!page)
                return -ENOMEM;
index 09acb786e72b0d665d3aa9e38346148adbdb08c5..a6a1de9db16fd6e57d96c4e0b1f76db70f27147f 100644 (file)
@@ -58,7 +58,7 @@ sys_call_table:
        .quad sys_open                          /* 45 */
        .quad alpha_ni_syscall
        .quad sys_getxgid
-       .quad osf_sigprocmask
+       .quad sys_osf_sigprocmask
        .quad alpha_ni_syscall
        .quad alpha_ni_syscall                  /* 50 */
        .quad sys_acct
@@ -512,6 +512,9 @@ sys_call_table:
        .quad sys_pwritev
        .quad sys_rt_tgsigqueueinfo
        .quad sys_perf_event_open
+       .quad sys_fanotify_init
+       .quad sys_fanotify_mark                         /* 495 */
+       .quad sys_prlimit64
 
        .size sys_call_table, . - sys_call_table
        .type sys_call_table, @object
index eacceb26d9c8aa8bd250b409e821c277a9b51c73..0f1d8493cfca9c498a5187b97ae7c9dff8195b71 100644 (file)
@@ -41,7 +41,7 @@
 #include <linux/init.h>
 #include <linux/bcd.h>
 #include <linux/profile.h>
-#include <linux/perf_event.h>
+#include <linux/irq_work.h>
 
 #include <asm/uaccess.h>
 #include <asm/io.h>
@@ -83,25 +83,25 @@ static struct {
 
 unsigned long est_cycle_freq;
 
-#ifdef CONFIG_PERF_EVENTS
+#ifdef CONFIG_IRQ_WORK
 
-DEFINE_PER_CPU(u8, perf_event_pending);
+DEFINE_PER_CPU(u8, irq_work_pending);
 
-#define set_perf_event_pending_flag()  __get_cpu_var(perf_event_pending) = 1
-#define test_perf_event_pending()      __get_cpu_var(perf_event_pending)
-#define clear_perf_event_pending()     __get_cpu_var(perf_event_pending) = 0
+#define set_irq_work_pending_flag()  __get_cpu_var(irq_work_pending) = 1
+#define test_irq_work_pending()      __get_cpu_var(irq_work_pending)
+#define clear_irq_work_pending()     __get_cpu_var(irq_work_pending) = 0
 
-void set_perf_event_pending(void)
+void set_irq_work_pending(void)
 {
-       set_perf_event_pending_flag();
+       set_irq_work_pending_flag();
 }
 
-#else  /* CONFIG_PERF_EVENTS */
+#else  /* CONFIG_IRQ_WORK */
 
-#define test_perf_event_pending()      0
-#define clear_perf_event_pending()
+#define test_irq_work_pending()      0
+#define clear_irq_work_pending()
 
-#endif /* CONFIG_PERF_EVENTS */
+#endif /* CONFIG_IRQ_WORK */
 
 
 static inline __u32 rpcc(void)
@@ -191,16 +191,16 @@ irqreturn_t timer_interrupt(int irq, void *dev)
 
        write_sequnlock(&xtime_lock);
 
+       if (test_irq_work_pending()) {
+               clear_irq_work_pending();
+               irq_work_run();
+       }
+
 #ifndef CONFIG_SMP
        while (nticks--)
                update_process_times(user_mode(get_irq_regs()));
 #endif
 
-       if (test_perf_event_pending()) {
-               clear_perf_event_pending();
-               perf_event_do_pending();
-       }
-
        return IRQ_HANDLED;
 }
 
index b14f015008ada5e90d580b0589d4ff5144a1602e..0414e021a91c3ba8756674dea873f0dabd6c0abd 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/sched.h>
 #include <linux/tty.h>
 #include <linux/delay.h>
-#include <linux/smp_lock.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/kallsyms.h>
@@ -623,7 +622,6 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
                return;
        }
 
-       lock_kernel();
        printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
                pc, va, opcode, reg);
        do_exit(SIGSEGV);
@@ -646,7 +644,6 @@ got_exception:
         * Yikes!  No one to forward the exception to.
         * Since the registers are in a weird format, dump them ourselves.
         */
-       lock_kernel();
 
        printk("%s(%d): unhandled unaligned exception\n",
               current->comm, task_pid_nr(current));
index 16bc8eb4901c9335b32a774082e9bc1cc77bb8b9..9103904b3daba63e73815bb2980923bdc3cb045d 100644 (file)
@@ -23,6 +23,7 @@ config ARM
        select HAVE_KERNEL_GZIP
        select HAVE_KERNEL_LZO
        select HAVE_KERNEL_LZMA
+       select HAVE_IRQ_WORK
        select HAVE_PERF_EVENTS
        select PERF_USE_VMALLOC
        select HAVE_REGS_AND_STACK_ACCESS_API
@@ -271,7 +272,6 @@ config ARCH_AT91
        bool "Atmel AT91"
        select ARCH_REQUIRE_GPIOLIB
        select HAVE_CLK
-       select ARCH_USES_GETTIMEOFFSET
        help
          This enables support for systems based on the Atmel AT91RM9200,
          AT91SAM9 and AT91CAP9 processors.
@@ -1051,6 +1051,32 @@ config ARM_ERRATA_460075
          ACTLR register. Note that setting specific bits in the ACTLR register
          may not be available in non-secure mode.
 
+config ARM_ERRATA_742230
+       bool "ARM errata: DMB operation may be faulty"
+       depends on CPU_V7 && SMP
+       help
+         This option enables the workaround for the 742230 Cortex-A9
+         (r1p0..r2p2) erratum. Under rare circumstances, a DMB instruction
+         between two write operations may not ensure the correct visibility
+         ordering of the two writes. This workaround sets a specific bit in
+         the diagnostic register of the Cortex-A9 which causes the DMB
+         instruction to behave as a DSB, ensuring the correct behaviour of
+         the two writes.
+
+config ARM_ERRATA_742231
+       bool "ARM errata: Incorrect hazard handling in the SCU may lead to data corruption"
+       depends on CPU_V7 && SMP
+       help
+         This option enables the workaround for the 742231 Cortex-A9
+         (r2p0..r2p2) erratum. Under certain conditions, specific to the
+         Cortex-A9 MPCore micro-architecture, two CPUs working in SMP mode,
+         accessing some data located in the same cache line, may get corrupted
+         data due to bad handling of the address hazard when the line gets
+         replaced from one of the CPUs at the same time as another CPU is
+         accessing it. This workaround sets specific bits in the diagnostic
+         register of the Cortex-A9 which reduces the linefill issuing
+         capabilities of the processor.
+
 config PL310_ERRATA_588369
        bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
        depends on CACHE_L2X0 && ARCH_OMAP4
@@ -1076,6 +1102,20 @@ config ARM_ERRATA_720789
          invalidated are not, resulting in an incoherency in the system page
          tables. The workaround changes the TLB flushing routines to invalidate
          entries regardless of the ASID.
+
+config ARM_ERRATA_743622
+       bool "ARM errata: Faulty hazard checking in the Store Buffer may lead to data corruption"
+       depends on CPU_V7
+       help
+         This option enables the workaround for the 743622 Cortex-A9
+         (r2p0..r2p2) erratum. Under very rare conditions, a faulty
+         optimisation in the Cortex-A9 Store Buffer may lead to data
+         corruption. This workaround sets a specific bit in the diagnostic
+         register of the Cortex-A9 which disables the Store Buffer
+         optimisation, preventing the defect from occurring. This has no
+         visible impact on the overall performance or power consumption of the
+         processor.
+
 endmenu
 
 source "arch/arm/common/Kconfig"
@@ -1576,97 +1616,6 @@ config AUTO_ZRELADDR
          0xf8000000. This assumes the zImage being placed in the first 128MB
          from start of memory.
 
-config ZRELADDR
-       hex "Physical address of the decompressed kernel image"
-       depends on !AUTO_ZRELADDR
-       default 0x00008000 if ARCH_BCMRING ||\
-               ARCH_CNS3XXX ||\
-               ARCH_DOVE ||\
-               ARCH_EBSA110 ||\
-               ARCH_FOOTBRIDGE ||\
-               ARCH_INTEGRATOR ||\
-               ARCH_IOP13XX ||\
-               ARCH_IOP33X ||\
-               ARCH_IXP2000 ||\
-               ARCH_IXP23XX ||\
-               ARCH_IXP4XX ||\
-               ARCH_KIRKWOOD ||\
-               ARCH_KS8695 ||\
-               ARCH_LOKI ||\
-               ARCH_MMP ||\
-               ARCH_MV78XX0 ||\
-               ARCH_NOMADIK ||\
-               ARCH_NUC93X ||\
-               ARCH_NS9XXX ||\
-               ARCH_ORION5X ||\
-               ARCH_SPEAR3XX ||\
-               ARCH_SPEAR6XX ||\
-               ARCH_TEGRA ||\
-               ARCH_U8500 ||\
-               ARCH_VERSATILE ||\
-               ARCH_W90X900
-       default 0x08008000 if ARCH_MX1 ||\
-               ARCH_SHARK
-       default 0x10008000 if ARCH_MSM ||\
-               ARCH_OMAP1 ||\
-               ARCH_RPC
-       default 0x20008000 if ARCH_S5P6440 ||\
-               ARCH_S5P6442 ||\
-               ARCH_S5PC100 ||\
-               ARCH_S5PV210
-       default 0x30008000 if ARCH_S3C2410 ||\
-               ARCH_S3C2400 ||\
-               ARCH_S3C2412 ||\
-               ARCH_S3C2416 ||\
-               ARCH_S3C2440 ||\
-               ARCH_S3C2443
-       default 0x40008000 if ARCH_STMP378X ||\
-               ARCH_STMP37XX ||\
-               ARCH_SH7372 ||\
-               ARCH_SH7377 ||\
-               ARCH_S5PV310
-       default 0x50008000 if ARCH_S3C64XX ||\
-               ARCH_SH7367
-       default 0x60008000 if ARCH_VEXPRESS
-       default 0x80008000 if ARCH_MX25 ||\
-               ARCH_MX3 ||\
-               ARCH_NETX ||\
-               ARCH_OMAP2PLUS ||\
-               ARCH_PNX4008
-       default 0x90008000 if ARCH_MX5 ||\
-               ARCH_MX91231
-       default 0xa0008000 if ARCH_IOP32X ||\
-               ARCH_PXA ||\
-               MACH_MX27
-       default 0xc0008000 if ARCH_LH7A40X ||\
-               MACH_MX21
-       default 0xf0008000 if ARCH_AAEC2000 ||\
-               ARCH_L7200
-       default 0xc0028000 if ARCH_CLPS711X
-       default 0x70008000 if ARCH_AT91 && (ARCH_AT91CAP9 || ARCH_AT91SAM9G45)
-       default 0x20008000 if ARCH_AT91 && !(ARCH_AT91CAP9 || ARCH_AT91SAM9G45)
-       default 0xc0008000 if ARCH_DAVINCI && ARCH_DAVINCI_DA8XX
-       default 0x80008000 if ARCH_DAVINCI && !ARCH_DAVINCI_DA8XX
-       default 0x00008000 if ARCH_EP93XX && EP93XX_SDCE3_SYNC_PHYS_OFFSET
-       default 0xc0008000 if ARCH_EP93XX && EP93XX_SDCE0_PHYS_OFFSET
-       default 0xd0008000 if ARCH_EP93XX && EP93XX_SDCE1_PHYS_OFFSET
-       default 0xe0008000 if ARCH_EP93XX && EP93XX_SDCE2_PHYS_OFFSET
-       default 0xf0008000 if ARCH_EP93XX && EP93XX_SDCE3_ASYNC_PHYS_OFFSET
-       default 0x00008000 if ARCH_GEMINI && GEMINI_MEM_SWAP
-       default 0x10008000 if ARCH_GEMINI && !GEMINI_MEM_SWAP
-       default 0x70008000 if ARCH_REALVIEW && REALVIEW_HIGH_PHYS_OFFSET
-       default 0x00008000 if ARCH_REALVIEW && !REALVIEW_HIGH_PHYS_OFFSET
-       default 0xc0208000 if ARCH_SA1100 && SA1111
-       default 0xc0008000 if ARCH_SA1100 && !SA1111
-       default 0x30108000 if ARCH_S3C2410 && PM_H1940
-       default 0x28E08000 if ARCH_U300 && MACH_U300_SINGLE_RAM
-       default 0x48008000 if ARCH_U300 && !MACH_U300_SINGLE_RAM
-       help
-         ZRELADDR is the physical address where the decompressed kernel
-         image will be placed. ZRELADDR has to be specified when the
-         assumption of AUTO_ZRELADDR is not valid, or when ZBOOT_ROM is
-         selected.
-
 endmenu
 
 menu "CPU Power Management"
index f705213caa881af9c07e181c0d2a3a1a26a5d98a..4a590f4113e2af044ea1764aeb0681ad7f50a74c 100644 (file)
 MKIMAGE         := $(srctree)/scripts/mkuboot.sh
 
 ifneq ($(MACHINE),)
--include $(srctree)/$(MACHINE)/Makefile.boot
+include $(srctree)/$(MACHINE)/Makefile.boot
 endif
 
 # Note: the following conditions must always be true:
+#   ZRELADDR == virt_to_phys(PAGE_OFFSET + TEXT_OFFSET)
 #   PARAMS_PHYS must be within 4MB of ZRELADDR
 #   INITRD_PHYS must be in RAM
+ZRELADDR    := $(zreladdr-y)
 PARAMS_PHYS := $(params_phys-y)
 INITRD_PHYS := $(initrd_phys-y)
 
-export INITRD_PHYS PARAMS_PHYS
+export ZRELADDR INITRD_PHYS PARAMS_PHYS
 
 targets := Image zImage xipImage bootpImage uImage
 
@@ -65,7 +67,7 @@ quiet_cmd_uimage = UIMAGE  $@
 ifeq ($(CONFIG_ZBOOT_ROM),y)
 $(obj)/uImage: LOADADDR=$(CONFIG_ZBOOT_ROM_TEXT)
 else
-$(obj)/uImage: LOADADDR=$(CONFIG_ZRELADDR)
+$(obj)/uImage: LOADADDR=$(ZRELADDR)
 endif
 
 ifeq ($(CONFIG_THUMB2_KERNEL),y)
index 68775e33476c2fafb4c20d88f7f676c836a8edc1..65a7c1c588a94ab4623be0ddfe02a691fb2954c6 100644 (file)
@@ -79,6 +79,10 @@ endif
 EXTRA_CFLAGS  := -fpic -fno-builtin
 EXTRA_AFLAGS  := -Wa,-march=all
 
+# Supply ZRELADDR to the decompressor via a linker symbol.
+ifneq ($(CONFIG_AUTO_ZRELADDR),y)
+LDFLAGS_vmlinux := --defsym zreladdr=$(ZRELADDR)
+endif
 ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
 LDFLAGS_vmlinux += --be8
 endif
@@ -112,5 +116,5 @@ CFLAGS_font.o := -Dstatic=
 $(obj)/font.c: $(FONTC)
        $(call cmd,shipped)
 
-$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile .config
+$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG)
        @sed "$(SEDFLAGS)" < $< > $@
index 6af9907c3b5ccad2ae2d73e37f5470c5b2f6b897..6825c34646d4e02f24b0eefe7ab4e012bca05208 100644 (file)
@@ -177,7 +177,7 @@ not_angel:
                and     r4, pc, #0xf8000000
                add     r4, r4, #TEXT_OFFSET
 #else
-               ldr     r4, =CONFIG_ZRELADDR
+               ldr     r4, =zreladdr
 #endif
                subs    r0, r0, r1              @ calculate the delta offset
 
index 6c091356245593b87860d2ccb6221650fc62855b..1bec96e851967101df7a796745b84d24bd320ab8 100644 (file)
@@ -263,6 +263,22 @@ static int it8152_pci_platform_notify_remove(struct device *dev)
        return 0;
 }
 
+int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
+{
+       dev_dbg(dev, "%s: dma_addr %08x, size %08x\n",
+               __func__, dma_addr, size);
+       return (dev->bus == &pci_bus_type) &&
+               ((dma_addr + size - PHYS_OFFSET) >= SZ_64M);
+}
+
+int dma_set_coherent_mask(struct device *dev, u64 mask)
+{
+       if (mask >= PHYS_OFFSET + SZ_64M - 1)
+               return 0;
+
+       return -EIO;
+}
+
 int __init it8152_pci_setup(int nr, struct pci_sys_data *sys)
 {
        it8152_io.start = IT8152_IO_BASE + 0x12000;
index c226fe10553e2952ec982ef3ec5fcaf8538586e0..c568da7dcae45e60e8630e2b3060599f561d6555 100644 (file)
@@ -288,15 +288,7 @@ extern void dmabounce_unregister_dev(struct device *);
  * DMA access and 1 if the buffer needs to be bounced.
  *
  */
-#ifdef CONFIG_SA1111
 extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
-#else
-static inline int dma_needs_bounce(struct device *dev, dma_addr_t addr,
-                                  size_t size)
-{
-       return 0;
-}
-#endif
 
 /*
  * The DMA API, implemented by dmabounce.c.  See below for descriptions.
index 48837e6d888722dc96f594247a80026bf9b75e29..c4aa4e8c6af9cda0b7e88a46cbed94dc6e3b565d 100644 (file)
 #ifndef __ARM_PERF_EVENT_H__
 #define __ARM_PERF_EVENT_H__
 
-/*
- * NOP: on *most* (read: all supported) ARM platforms, the performance
- * counter interrupts are regular interrupts and not an NMI. This
- * means that when we receive the interrupt we can call
- * perf_event_do_pending() that handles all of the work with
- * interrupts enabled.
- */
-static inline void
-set_perf_event_pending(void)
-{
-}
-
 /* ARM performance counters start from 1 (in the cp15 accesses) so use the
  * same indexes here for consistency. */
 #define PERF_EVENT_INDEX_OFFSET 1
index ab68cf1ef80fe7ccad28bfdd5d5084bb6f1b61dc..e90b167ea8484002fffb0121e7a2d61726851fe6 100644 (file)
@@ -317,6 +317,10 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
 #define pgprot_dmacoherent(prot) \
        __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE)
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+                                    unsigned long size, pgprot_t vma_prot);
 #else
 #define pgprot_dmacoherent(prot) \
        __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)
index d02cfb683487eeafea4ef407a1a4e6f2d4ce4112..c891eb76c0e313406847e7b9fbe968bb1b8fa459 100644 (file)
 #define __NR_perf_event_open           (__NR_SYSCALL_BASE+364)
 #define __NR_recvmmsg                  (__NR_SYSCALL_BASE+365)
 #define __NR_accept4                   (__NR_SYSCALL_BASE+366)
+#define __NR_fanotify_init             (__NR_SYSCALL_BASE+367)
+#define __NR_fanotify_mark             (__NR_SYSCALL_BASE+368)
+#define __NR_prlimit64                 (__NR_SYSCALL_BASE+369)
 
 /*
  * The following SWIs are ARM private.
index afeb71fa72cb81fc0e2fb5652c653ef34e7258bb..5c26eccef9982665b1e1672416b9bc996f3b2dae 100644 (file)
                CALL(sys_perf_event_open)
 /* 365 */      CALL(sys_recvmmsg)
                CALL(sys_accept4)
+               CALL(sys_fanotify_init)
+               CALL(sys_fanotify_mark)
+               CALL(sys_prlimit64)
 #ifndef syscalls_counted
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 #define syscalls_counted
index f05a35a59694dc7af54a76ffb899e02f8b849875..7885722bdf4eff7115137ac46444c8d176f05b57 100644 (file)
@@ -48,6 +48,8 @@ work_pending:
        beq     no_work_pending
        mov     r0, sp                          @ 'regs'
        mov     r2, why                         @ 'syscall'
+       tst     r1, #_TIF_SIGPENDING            @ delivering a signal?
+       movne   why, #0                         @ prevent further restarts
        bl      do_notify_resume
        b       ret_slow_syscall                @ Check work again
 
@@ -418,11 +420,13 @@ ENDPROC(sys_clone_wrapper)
 
 sys_sigreturn_wrapper:
                add     r0, sp, #S_OFF
+               mov     why, #0         @ prevent syscall restart handling
                b       sys_sigreturn
 ENDPROC(sys_sigreturn_wrapper)
 
 sys_rt_sigreturn_wrapper:
                add     r0, sp, #S_OFF
+               mov     why, #0         @ prevent syscall restart handling
                b       sys_rt_sigreturn
 ENDPROC(sys_rt_sigreturn_wrapper)
 
index 8bccbfa693ffc359dc55d6004837d2a149e2c5cd..2c1f0050c9c4d9fd74ac08b1c0a9c193e16df4de 100644 (file)
@@ -1162,11 +1162,12 @@ space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
 {
        /*
         * MSR   : cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx
-        * Undef : cccc 0011 0x00 xxxx xxxx xxxx xxxx xxxx
+        * Undef : cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx
         * ALU op with S bit and Rd == 15 :
         *         cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx
         */
-       if ((insn & 0x0f900000) == 0x03200000 ||        /* MSR & Undef */
+       if ((insn & 0x0fb00000) == 0x03200000 ||        /* MSR */
+           (insn & 0x0ff00000) == 0x03400000 ||        /* Undef */
            (insn & 0x0e10f000) == 0x0210f000)          /* ALU s-bit, R15  */
                return INSN_REJECTED;
 
@@ -1177,7 +1178,7 @@ space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
         * *S (bit 20) updates condition codes
         * ADC/SBC/RSC reads the C flag
         */
-       insn &= 0xfff00fff;     /* Rn = r0, Rd = r0 */
+       insn &= 0xffff0fff;     /* Rd = r0 */
        asi->insn[0] = insn;
        asi->insn_handler = (insn & (1 << 20)) ?  /* S-bit */
                        emulate_alu_imm_rwflags : emulate_alu_imm_rflags;
index 417c392ddf1cb55066fa5f99e83e77514bd89901..49643b1467e62d529d4edd661990bb64da1e1f73 100644 (file)
@@ -123,6 +123,12 @@ armpmu_get_max_events(void)
 }
 EXPORT_SYMBOL_GPL(armpmu_get_max_events);
 
+int perf_num_counters(void)
+{
+       return armpmu_get_max_events();
+}
+EXPORT_SYMBOL_GPL(perf_num_counters);
+
 #define HW_OP_UNSUPPORTED              0xFFFF
 
 #define C(_x) \
@@ -221,46 +227,56 @@ again:
 }
 
 static void
-armpmu_disable(struct perf_event *event)
+armpmu_read(struct perf_event *event)
 {
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
-       int idx = hwc->idx;
-
-       WARN_ON(idx < 0);
-
-       clear_bit(idx, cpuc->active_mask);
-       armpmu->disable(hwc, idx);
-
-       barrier();
 
-       armpmu_event_update(event, hwc, idx);
-       cpuc->events[idx] = NULL;
-       clear_bit(idx, cpuc->used_mask);
+       /* Don't read disabled counters! */
+       if (hwc->idx < 0)
+               return;
 
-       perf_event_update_userpage(event);
+       armpmu_event_update(event, hwc, hwc->idx);
 }
 
 static void
-armpmu_read(struct perf_event *event)
+armpmu_stop(struct perf_event *event, int flags)
 {
        struct hw_perf_event *hwc = &event->hw;
 
-       /* Don't read disabled counters! */
-       if (hwc->idx < 0)
+       if (!armpmu)
                return;
 
-       armpmu_event_update(event, hwc, hwc->idx);
+       /*
+        * ARM pmu always has to update the counter, so ignore
+        * PERF_EF_UPDATE, see comments in armpmu_start().
+        */
+       if (!(hwc->state & PERF_HES_STOPPED)) {
+               armpmu->disable(hwc, hwc->idx);
+               barrier(); /* why? */
+               armpmu_event_update(event, hwc, hwc->idx);
+               hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+       }
 }
 
 static void
-armpmu_unthrottle(struct perf_event *event)
+armpmu_start(struct perf_event *event, int flags)
 {
        struct hw_perf_event *hwc = &event->hw;
 
+       if (!armpmu)
+               return;
+
+       /*
+        * ARM pmu always has to reprogram the period, so ignore
+        * PERF_EF_RELOAD, see the comment below.
+        */
+       if (flags & PERF_EF_RELOAD)
+               WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+
+       hwc->state = 0;
        /*
         * Set the period again. Some counters can't be stopped, so when we
-        * were throttled we simply disabled the IRQ source and the counter
+        * were stopped we simply disabled the IRQ source and the counter
         * may have been left counting. If we don't do this step then we may
         * get an interrupt too soon or *way* too late if the overflow has
         * happened since disabling.
@@ -269,14 +285,33 @@ armpmu_unthrottle(struct perf_event *event)
        armpmu->enable(hwc, hwc->idx);
 }
 
+static void
+armpmu_del(struct perf_event *event, int flags)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       struct hw_perf_event *hwc = &event->hw;
+       int idx = hwc->idx;
+
+       WARN_ON(idx < 0);
+
+       clear_bit(idx, cpuc->active_mask);
+       armpmu_stop(event, PERF_EF_UPDATE);
+       cpuc->events[idx] = NULL;
+       clear_bit(idx, cpuc->used_mask);
+
+       perf_event_update_userpage(event);
+}
+
 static int
-armpmu_enable(struct perf_event *event)
+armpmu_add(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
        int idx;
        int err = 0;
 
+       perf_pmu_disable(event->pmu);
+
        /* If we don't have a space for the counter then finish early. */
        idx = armpmu->get_event_idx(cpuc, hwc);
        if (idx < 0) {
@@ -293,25 +328,19 @@ armpmu_enable(struct perf_event *event)
        cpuc->events[idx] = event;
        set_bit(idx, cpuc->active_mask);
 
-       /* Set the period for the event. */
-       armpmu_event_set_period(event, hwc, idx);
-
-       /* Enable the event. */
-       armpmu->enable(hwc, idx);
+       hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+       if (flags & PERF_EF_START)
+               armpmu_start(event, PERF_EF_RELOAD);
 
        /* Propagate our changes to the userspace mapping. */
        perf_event_update_userpage(event);
 
 out:
+       perf_pmu_enable(event->pmu);
        return err;
 }
 
-static struct pmu pmu = {
-       .enable     = armpmu_enable,
-       .disable    = armpmu_disable,
-       .unthrottle = armpmu_unthrottle,
-       .read       = armpmu_read,
-};
+static struct pmu pmu;
 
 static int
 validate_event(struct cpu_hw_events *cpuc,
@@ -319,8 +348,8 @@ validate_event(struct cpu_hw_events *cpuc,
 {
        struct hw_perf_event fake_event = event->hw;
 
-       if (event->pmu && event->pmu != &pmu)
-               return 0;
+       if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
+               return 1;
 
        return armpmu->get_event_idx(cpuc, &fake_event) >= 0;
 }
@@ -491,20 +520,29 @@ __hw_perf_event_init(struct perf_event *event)
        return err;
 }
 
-const struct pmu *
-hw_perf_event_init(struct perf_event *event)
+static int armpmu_event_init(struct perf_event *event)
 {
        int err = 0;
 
+       switch (event->attr.type) {
+       case PERF_TYPE_RAW:
+       case PERF_TYPE_HARDWARE:
+       case PERF_TYPE_HW_CACHE:
+               break;
+
+       default:
+               return -ENOENT;
+       }
+
        if (!armpmu)
-               return ERR_PTR(-ENODEV);
+               return -ENODEV;
 
        event->destroy = hw_perf_event_destroy;
 
        if (!atomic_inc_not_zero(&active_events)) {
-               if (atomic_read(&active_events) > perf_max_events) {
+               if (atomic_read(&active_events) > armpmu->num_events) {
                        atomic_dec(&active_events);
-                       return ERR_PTR(-ENOSPC);
+                       return -ENOSPC;
                }
 
                mutex_lock(&pmu_reserve_mutex);
@@ -518,17 +556,16 @@ hw_perf_event_init(struct perf_event *event)
        }
 
        if (err)
-               return ERR_PTR(err);
+               return err;
 
        err = __hw_perf_event_init(event);
        if (err)
                hw_perf_event_destroy(event);
 
-       return err ? ERR_PTR(err) : &pmu;
+       return err;
 }
 
-void
-hw_perf_enable(void)
+static void armpmu_enable(struct pmu *pmu)
 {
        /* Enable all of the perf events on hardware. */
        int idx;
@@ -549,13 +586,23 @@ hw_perf_enable(void)
        armpmu->start();
 }
 
-void
-hw_perf_disable(void)
+static void armpmu_disable(struct pmu *pmu)
 {
        if (armpmu)
                armpmu->stop();
 }
 
+static struct pmu pmu = {
+       .pmu_enable     = armpmu_enable,
+       .pmu_disable    = armpmu_disable,
+       .event_init     = armpmu_event_init,
+       .add            = armpmu_add,
+       .del            = armpmu_del,
+       .start          = armpmu_start,
+       .stop           = armpmu_stop,
+       .read           = armpmu_read,
+};
+
 /*
  * ARMv6 Performance counter handling code.
  *
@@ -1041,11 +1088,11 @@ armv6pmu_handle_irq(int irq_num,
        /*
         * Handle the pending perf events.
         *
-        * Note: this call *must* be run with interrupts enabled. For
-        * platforms that can have the PMU interrupts raised as a PMI, this
+        * Note: this call *must* be run with interrupts disabled. For
+        * platforms that can have the PMU interrupts raised as an NMI, this
         * will not work.
         */
-       perf_event_do_pending();
+       irq_work_run();
 
        return IRQ_HANDLED;
 }
@@ -2017,11 +2064,11 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
        /*
         * Handle the pending perf events.
         *
-        * Note: this call *must* be run with interrupts enabled. For
-        * platforms that can have the PMU interrupts raised as a PMI, this
+        * Note: this call *must* be run with interrupts disabled. For
+        * platforms that can have the PMU interrupts raised as an NMI, this
         * will not work.
         */
-       perf_event_do_pending();
+       irq_work_run();
 
        return IRQ_HANDLED;
 }
@@ -2389,7 +2436,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
                        armpmu->disable(hwc, idx);
        }
 
-       perf_event_do_pending();
+       irq_work_run();
 
        /*
         * Re-enable the PMU.
@@ -2716,7 +2763,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
                        armpmu->disable(hwc, idx);
        }
 
-       perf_event_do_pending();
+       irq_work_run();
 
        /*
         * Re-enable the PMU.
@@ -2933,14 +2980,12 @@ init_hw_perf_events(void)
                        armpmu = &armv6pmu;
                        memcpy(armpmu_perf_cache_map, armv6_perf_cache_map,
                                        sizeof(armv6_perf_cache_map));
-                       perf_max_events = armv6pmu.num_events;
                        break;
                case 0xB020:    /* ARM11mpcore */
                        armpmu = &armv6mpcore_pmu;
                        memcpy(armpmu_perf_cache_map,
                               armv6mpcore_perf_cache_map,
                               sizeof(armv6mpcore_perf_cache_map));
-                       perf_max_events = armv6mpcore_pmu.num_events;
                        break;
                case 0xC080:    /* Cortex-A8 */
                        armv7pmu.id = ARM_PERF_PMU_ID_CA8;
@@ -2952,7 +2997,6 @@ init_hw_perf_events(void)
                        /* Reset PMNC and read the nb of CNTx counters
                            supported */
                        armv7pmu.num_events = armv7_reset_read_pmnc();
-                       perf_max_events = armv7pmu.num_events;
                        break;
                case 0xC090:    /* Cortex-A9 */
                        armv7pmu.id = ARM_PERF_PMU_ID_CA9;
@@ -2964,7 +3008,6 @@ init_hw_perf_events(void)
                        /* Reset PMNC and read the nb of CNTx counters
                            supported */
                        armv7pmu.num_events = armv7_reset_read_pmnc();
-                       perf_max_events = armv7pmu.num_events;
                        break;
                }
        /* Intel CPUs [xscale]. */
@@ -2975,13 +3018,11 @@ init_hw_perf_events(void)
                        armpmu = &xscale1pmu;
                        memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
                                        sizeof(xscale_perf_cache_map));
-                       perf_max_events = xscale1pmu.num_events;
                        break;
                case 2:
                        armpmu = &xscale2pmu;
                        memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
                                        sizeof(xscale_perf_cache_map));
-                       perf_max_events = xscale2pmu.num_events;
                        break;
                }
        }
@@ -2991,9 +3032,10 @@ init_hw_perf_events(void)
                                arm_pmu_names[armpmu->id], armpmu->num_events);
        } else {
                pr_info("no hardware support available\n");
-               perf_max_events = -1;
        }
 
+       perf_pmu_register(&pmu);
+
        return 0;
 }
 arch_initcall(init_hw_perf_events);
@@ -3001,13 +3043,6 @@ arch_initcall(init_hw_perf_events);
 /*
  * Callchain handling code.
  */
-static inline void
-callchain_store(struct perf_callchain_entry *entry,
-               u64 ip)
-{
-       if (entry->nr < PERF_MAX_STACK_DEPTH)
-               entry->ip[entry->nr++] = ip;
-}
 
 /*
  * The registers we're interested in are at the end of the variable
@@ -3039,7 +3074,7 @@ user_backtrace(struct frame_tail *tail,
        if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
                return NULL;
 
-       callchain_store(entry, buftail.lr);
+       perf_callchain_store(entry, buftail.lr);
 
        /*
         * Frame pointers should strictly progress back up the stack
@@ -3051,16 +3086,11 @@ user_backtrace(struct frame_tail *tail,
        return buftail.fp - 1;
 }
 
-static void
-perf_callchain_user(struct pt_regs *regs,
-                   struct perf_callchain_entry *entry)
+void
+perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
        struct frame_tail *tail;
 
-       callchain_store(entry, PERF_CONTEXT_USER);
-
-       if (!user_mode(regs))
-               regs = task_pt_regs(current);
 
        tail = (struct frame_tail *)regs->ARM_fp - 1;
 
@@ -3078,56 +3108,18 @@ callchain_trace(struct stackframe *fr,
                void *data)
 {
        struct perf_callchain_entry *entry = data;
-       callchain_store(entry, fr->pc);
+       perf_callchain_store(entry, fr->pc);
        return 0;
 }
 
-static void
-perf_callchain_kernel(struct pt_regs *regs,
-                     struct perf_callchain_entry *entry)
+void
+perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
        struct stackframe fr;
 
-       callchain_store(entry, PERF_CONTEXT_KERNEL);
        fr.fp = regs->ARM_fp;
        fr.sp = regs->ARM_sp;
        fr.lr = regs->ARM_lr;
        fr.pc = regs->ARM_pc;
        walk_stackframe(&fr, callchain_trace, entry);
 }
-
-static void
-perf_do_callchain(struct pt_regs *regs,
-                 struct perf_callchain_entry *entry)
-{
-       int is_user;
-
-       if (!regs)
-               return;
-
-       is_user = user_mode(regs);
-
-       if (!current || !current->pid)
-               return;
-
-       if (is_user && current->state != TASK_RUNNING)
-               return;
-
-       if (!is_user)
-               perf_callchain_kernel(regs, entry);
-
-       if (current->mm)
-               perf_callchain_user(regs, entry);
-}
-
-static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
-
-struct perf_callchain_entry *
-perf_callchain(struct pt_regs *regs)
-{
-       struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
-
-       entry->nr = 0;
-       perf_do_callchain(regs, entry);
-       return entry;
-}
index 753c0d31a3d3f0b407cfe20b68f5ba66189cd1a7..c67b47f1c0fd805751cf226a65315337a2507942 100644 (file)
@@ -121,8 +121,8 @@ static struct clk ssc1_clk = {
        .pmc_mask       = 1 << AT91SAM9G45_ID_SSC1,
        .type           = CLK_TYPE_PERIPHERAL,
 };
-static struct clk tcb_clk = {
-       .name           = "tcb_clk",
+static struct clk tcb0_clk = {
+       .name           = "tcb0_clk",
        .pmc_mask       = 1 << AT91SAM9G45_ID_TCB,
        .type           = CLK_TYPE_PERIPHERAL,
 };
@@ -192,6 +192,14 @@ static struct clk ohci_clk = {
        .parent         = &uhphs_clk,
 };
 
+/* One additional fake clock for second TC block */
+static struct clk tcb1_clk = {
+       .name           = "tcb1_clk",
+       .pmc_mask       = 0,
+       .type           = CLK_TYPE_PERIPHERAL,
+       .parent         = &tcb0_clk,
+};
+
 static struct clk *periph_clocks[] __initdata = {
        &pioA_clk,
        &pioB_clk,
@@ -208,7 +216,7 @@ static struct clk *periph_clocks[] __initdata = {
        &spi1_clk,
        &ssc0_clk,
        &ssc1_clk,
-       &tcb_clk,
+       &tcb0_clk,
        &pwm_clk,
        &tsc_clk,
        &dma_clk,
@@ -221,6 +229,7 @@ static struct clk *periph_clocks[] __initdata = {
        &mmc1_clk,
        // irq0
        &ohci_clk,
+       &tcb1_clk,
 };
 
 /*
index 809114d5a5a6690ec3c4f510bb6cb88fc7fede99..1276babf84d540ad253d06eb089f29d57beab1a7 100644 (file)
@@ -46,7 +46,7 @@ static struct resource hdmac_resources[] = {
                .end    = AT91_BASE_SYS + AT91_DMA + SZ_512 - 1,
                .flags  = IORESOURCE_MEM,
        },
-       [2] = {
+       [1] = {
                .start  = AT91SAM9G45_ID_DMA,
                .end    = AT91SAM9G45_ID_DMA,
                .flags  = IORESOURCE_IRQ,
@@ -426,7 +426,7 @@ static struct i2c_gpio_platform_data pdata_i2c0 = {
        .sda_is_open_drain      = 1,
        .scl_pin                = AT91_PIN_PA21,
        .scl_is_open_drain      = 1,
-       .udelay                 = 2,            /* ~100 kHz */
+       .udelay                 = 5,            /* ~100 kHz */
 };
 
 static struct platform_device at91sam9g45_twi0_device = {
@@ -440,7 +440,7 @@ static struct i2c_gpio_platform_data pdata_i2c1 = {
        .sda_is_open_drain      = 1,
        .scl_pin                = AT91_PIN_PB11,
        .scl_is_open_drain      = 1,
-       .udelay                 = 2,            /* ~100 kHz */
+       .udelay                 = 5,            /* ~100 kHz */
 };
 
 static struct platform_device at91sam9g45_twi1_device = {
@@ -835,9 +835,9 @@ static struct platform_device at91sam9g45_tcb1_device = {
 static void __init at91_add_device_tc(void)
 {
        /* this chip has one clock and irq for all six TC channels */
-       at91_clock_associate("tcb_clk", &at91sam9g45_tcb0_device.dev, "t0_clk");
+       at91_clock_associate("tcb0_clk", &at91sam9g45_tcb0_device.dev, "t0_clk");
        platform_device_register(&at91sam9g45_tcb0_device);
-       at91_clock_associate("tcb_clk", &at91sam9g45_tcb1_device.dev, "t0_clk");
+       at91_clock_associate("tcb1_clk", &at91sam9g45_tcb1_device.dev, "t0_clk");
        platform_device_register(&at91sam9g45_tcb1_device);
 }
 #else
index c4c8865d52d7bfd84f5a22416ef7fe6bc1c472aa..65eb0943194f4b0ea4ea65613435ceef4d875c75 100644 (file)
@@ -93,11 +93,12 @@ static struct resource dm9000_resource[] = {
                .start  = AT91_PIN_PC11,
                .end    = AT91_PIN_PC11,
                .flags  = IORESOURCE_IRQ
+                       | IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE,
        }
 };
 
 static struct dm9000_plat_data dm9000_platdata = {
-       .flags          = DM9000_PLATF_16BITONLY,
+       .flags          = DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM,
 };
 
 static struct platform_device dm9000_device = {
@@ -167,17 +168,6 @@ static struct at91_udc_data __initdata ek_udc_data = {
 };
 
 
-/*
- * MCI (SD/MMC)
- */
-static struct at91_mmc_data __initdata ek_mmc_data = {
-       .wire4          = 1,
-//     .det_pin        = ... not connected
-//     .wp_pin         = ... not connected
-//     .vcc_pin        = ... not connected
-};
-
-
 /*
  * NAND flash
  */
@@ -246,6 +236,10 @@ static void __init ek_add_device_nand(void)
        at91_add_device_nand(&ek_nand_data);
 }
 
+/*
+ * SPI related devices
+ */
+#if defined(CONFIG_SPI_ATMEL) || defined(CONFIG_SPI_ATMEL_MODULE)
 
 /*
  * ADS7846 Touchscreen
@@ -356,6 +350,19 @@ static struct spi_board_info ek_spi_devices[] = {
 #endif
 };
 
+#else /* CONFIG_SPI_ATMEL_* */
+/* spi0 and mmc/sd share the same PIO pins: cannot be used at the same time */
+
+/*
+ * MCI (SD/MMC)
+ * det_pin, wp_pin and vcc_pin are not connected
+ */
+static struct at91_mmc_data __initdata ek_mmc_data = {
+       .wire4          = 1,
+};
+
+#endif /* CONFIG_SPI_ATMEL_* */
+
 
 /*
  * LCD Controller
index 7f7da439341fabc4e85b6febeb8b1f3e2cd6f4ee..7525cee3983f7252fac0542be5955a490ca6869b 100644 (file)
@@ -501,7 +501,8 @@ postcore_initcall(at91_clk_debugfs_init);
 int __init clk_register(struct clk *clk)
 {
        if (clk_is_peripheral(clk)) {
-               clk->parent = &mck;
+               if (!clk->parent)
+                       clk->parent = &mck;
                clk->mode = pmc_periph_mode;
                list_add_tail(&clk->node, &clocks);
        }
index c80e090b36708706671d8c66ff1a67a1c0106d61..ee8db152592e087c8fe986dd1c7d7e557d473ad4 100644 (file)
 
 static inline void arch_idle(void)
 {
-#ifndef CONFIG_DEBUG_KERNEL
        /*
         * Disable the processor clock.  The processor will be automatically
         * re-enabled by an interrupt or by a reset.
         */
        at91_sys_write(AT91_PMC_SCDR, AT91_PMC_PCK);
-#else
+#ifndef CONFIG_CPU_ARM920T
        /*
         * Set the processor (CP15) into 'Wait for Interrupt' mode.
-        * Unlike disabling the processor clock via the PMC (above)
-        *  this allows the processor to be woken via JTAG.
+        * Post-RM9200 processors need this in conjunction with the above
+        * to save power when idle.
         */
        cpu_do_idle();
 #endif
index 29c0a911df262f6d70b5c369afa4788c44f0c07a..77eb35c89cd02161ee1678b049b6d80490423c09 100644 (file)
@@ -691,7 +691,7 @@ int dma_init(void)
 
        memset(&gDMA, 0, sizeof(gDMA));
 
-       init_MUTEX_LOCKED(&gDMA.lock);
+       sema_init(&gDMA.lock, 0);
        init_waitqueue_head(&gDMA.freeChannelQ);
 
        /* Initialize the Hardware */
@@ -1574,7 +1574,7 @@ int dma_init_mem_map(DMA_MemMap_t *memMap)
 {
        memset(memMap, 0, sizeof(*memMap));
 
-       init_MUTEX(&memMap->lock);
+       sema_init(&memMap->lock, 1);
 
        return 0;
 }
index 3d996b659ff41e43d794d93c167b409afb90c355..9be261beae7ddb2a72e2e9a76d9dbf2e2bf51316 100644 (file)
@@ -769,8 +769,7 @@ static struct map_desc dm355_io_desc[] = {
                .virtual        = SRAM_VIRT,
                .pfn            = __phys_to_pfn(0x00010000),
                .length         = SZ_32K,
-               /* MT_MEMORY_NONCACHED requires supersection alignment */
-               .type           = MT_DEVICE,
+               .type           = MT_MEMORY_NONCACHED,
        },
 };
 
index 6b6f4c643709c7d14a4b2baf168464ad772be4ce..7781e35daec3d0a7cf9e956aceb7e717f48c7c44 100644 (file)
@@ -969,8 +969,7 @@ static struct map_desc dm365_io_desc[] = {
                .virtual        = SRAM_VIRT,
                .pfn            = __phys_to_pfn(0x00010000),
                .length         = SZ_32K,
-               /* MT_MEMORY_NONCACHED requires supersection alignment */
-               .type           = MT_DEVICE,
+               .type           = MT_MEMORY_NONCACHED,
        },
 };
 
index 40fec315c99a192826d7530d43c6406726ecbcff..5e5b0a7831fbf2b7af156ba76bf4af0b6be26489 100644 (file)
@@ -653,8 +653,7 @@ static struct map_desc dm644x_io_desc[] = {
                .virtual        = SRAM_VIRT,
                .pfn            = __phys_to_pfn(0x00008000),
                .length         = SZ_16K,
-               /* MT_MEMORY_NONCACHED requires supersection alignment */
-               .type           = MT_DEVICE,
+               .type           = MT_MEMORY_NONCACHED,
        },
 };
 
index e4a3df1872aca89404899034ebbe920d453e7f66..26e8a9c7f50b4393f1e716575950fff967658b9a 100644 (file)
@@ -737,8 +737,7 @@ static struct map_desc dm646x_io_desc[] = {
                .virtual        = SRAM_VIRT,
                .pfn            = __phys_to_pfn(0x00010000),
                .length         = SZ_32K,
-               /* MT_MEMORY_NONCACHED requires supersection alignment */
-               .type           = MT_DEVICE,
+               .type           = MT_MEMORY_NONCACHED,
        },
 };
 
index 3b3e4721ce2ea0fb9236803e566d4c67b0d5638e..eb4936ff90ad9c42b283fc6c354eae1f01c6c4c7 100644 (file)
@@ -13,8 +13,8 @@
 
 #define IO_SPACE_LIMIT         0xffffffff
 
-#define __io(a)  ((void __iomem *)(((a) - DOVE_PCIE0_IO_PHYS_BASE) +\
-                                  DOVE_PCIE0_IO_VIRT_BASE))
-#define __mem_pci(a)           (a)
+#define __io(a)        ((void __iomem *)(((a) - DOVE_PCIE0_IO_BUS_BASE) + \
+                                                DOVE_PCIE0_IO_VIRT_BASE))
+#define __mem_pci(a)   (a)
 
 #endif
index 8bf3cec98cfadba46d8ca1816c7aff5b8a871bbf..4566bd1c8660b3fe7ac0cff28473746fd3d34c82 100644 (file)
@@ -560,4 +560,4 @@ static int __init ep93xx_clock_init(void)
        clkdev_add_table(clocks, ARRAY_SIZE(clocks));
        return 0;
 }
-arch_initcall(ep93xx_clock_init);
+postcore_initcall(ep93xx_clock_init);
index 8904ca4e2e24fc9a4984bd0b88ce9d3eab52e7ea..a696d354b1f82598649586e6478be6334a91f978 100644 (file)
@@ -276,7 +276,7 @@ static void channel_disable(struct m2p_channel *ch)
        v &= ~(M2P_CONTROL_STALL_IRQ_EN | M2P_CONTROL_NFB_IRQ_EN);
        m2p_set_control(ch, v);
 
-       while (m2p_channel_state(ch) == STATE_ON)
+       while (m2p_channel_state(ch) >= STATE_ON)
                cpu_relax();
 
        m2p_set_control(ch, 0x0);
index c5c0369bb481dff32f4bbb8c214f4859a16787ac..2f7e2728970d66966a959542f106fb9a216860ea 100644 (file)
@@ -122,6 +122,7 @@ config MACH_CPUIMX27
        select IMX_HAVE_PLATFORM_IMX_I2C
        select IMX_HAVE_PLATFORM_IMX_UART
        select IMX_HAVE_PLATFORM_MXC_NAND
+       select MXC_ULPI if USB_ULPI
        help
          Include support for Eukrea CPUIMX27 platform. This includes
          specific configurations for the module and its peripherals.
index 339150ab0ea5d63e13f0520e06612fc827f37f49..6830afd1d2baf01bc60aa959d15841aed48e26a6 100644 (file)
@@ -259,7 +259,7 @@ static void __init eukrea_cpuimx27_init(void)
        i2c_register_board_info(0, eukrea_cpuimx27_i2c_devices,
                                ARRAY_SIZE(eukrea_cpuimx27_i2c_devices));
 
-       imx27_add_i2c_imx1(&cpuimx27_i2c1_data);
+       imx27_add_i2c_imx0(&cpuimx27_i2c1_data);
 
        platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
 
index 61cd4d64b98596c7507dcaf67d069dbe22508e50..24498a932ba65b8054de5ba8229ef0e0c838dd71 100644 (file)
@@ -503,6 +503,14 @@ struct pci_bus * __devinit ixp4xx_scan_bus(int nr, struct pci_sys_data *sys)
        return pci_scan_bus(sys->busnr, &ixp4xx_ops, sys);
 }
 
+int dma_set_coherent_mask(struct device *dev, u64 mask)
+{
+       if (mask >= SZ_64M - 1)
+               return 0;
+
+       return -EIO;
+}
+
 EXPORT_SYMBOL(ixp4xx_pci_read);
 EXPORT_SYMBOL(ixp4xx_pci_write);
 
index f91ca6d4fbe8a2820758d2252091a761caac377c..8138371c406e6584b8990803079c7a093cbe1964 100644 (file)
@@ -26,6 +26,8 @@
 #define PCIBIOS_MAX_MEM                0x4BFFFFFF
 #endif
 
+#define ARCH_HAS_DMA_SET_COHERENT_MASK
+
 #define pcibios_assign_all_busses()    1
 
 /* Register locations and bits */
index 93fc2ec95e7687b30b6144f2116adb0664266a4d..6e924b398919e822ca4f2bddd294358bf4008ede 100644 (file)
@@ -38,7 +38,7 @@
 
 #define KIRKWOOD_PCIE1_IO_PHYS_BASE    0xf3000000
 #define KIRKWOOD_PCIE1_IO_VIRT_BASE    0xfef00000
-#define KIRKWOOD_PCIE1_IO_BUS_BASE     0x00000000
+#define KIRKWOOD_PCIE1_IO_BUS_BASE     0x00100000
 #define KIRKWOOD_PCIE1_IO_SIZE         SZ_1M
 
 #define KIRKWOOD_PCIE_IO_PHYS_BASE     0xf2000000
index 55e7f00836b7cdba68a9662729119e977904a2dd..513ad3102d7c192d2fdc00229b6cdb527b002d87 100644 (file)
@@ -117,7 +117,7 @@ static void __init pcie0_ioresources_init(struct pcie_port *pp)
         * IORESOURCE_IO
         */
        pp->res[0].name = "PCIe 0 I/O Space";
-       pp->res[0].start = KIRKWOOD_PCIE_IO_PHYS_BASE;
+       pp->res[0].start = KIRKWOOD_PCIE_IO_BUS_BASE;
        pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE_IO_SIZE - 1;
        pp->res[0].flags = IORESOURCE_IO;
 
@@ -139,7 +139,7 @@ static void __init pcie1_ioresources_init(struct pcie_port *pp)
         * IORESOURCE_IO
         */
        pp->res[0].name = "PCIe 1 I/O Space";
-       pp->res[0].start = KIRKWOOD_PCIE1_IO_PHYS_BASE;
+       pp->res[0].start = KIRKWOOD_PCIE1_IO_BUS_BASE;
        pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE1_IO_SIZE - 1;
        pp->res[0].flags = IORESOURCE_IO;
 
index 4f5b0e0ce6cf8f87e0e843b82ce2f8b0b4cbc34c..1a8a25edb1b422ace6925e07171953390b51bc11 100644 (file)
@@ -9,6 +9,8 @@
 #ifndef __ASM_MACH_SYSTEM_H
 #define __ASM_MACH_SYSTEM_H
 
+#include <mach/cputype.h>
+
 static inline void arch_idle(void)
 {
        cpu_do_idle();
@@ -16,6 +18,9 @@ static inline void arch_idle(void)
 
 static inline void arch_reset(char mode, const char *cmd)
 {
-       cpu_reset(0);
+       if (cpu_is_pxa168())
+               cpu_reset(0xffff0000);
+       else
+               cpu_reset(0);
 }
 #endif /* __ASM_MACH_SYSTEM_H */
index 91931dcb068997d540dd2a133001eed458cbc81b..4aaadc753d3e6ff4e60b88c17e62e5b3c0cfb812 100644 (file)
@@ -215,7 +215,7 @@ struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata = {
  * Add platform devices present on this baseboard and init
  * them from CPU side as far as required to use them later on
  */
-void __init eukrea_mbimxsd_baseboard_init(void)
+void __init eukrea_mbimxsd25_baseboard_init(void)
 {
        if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads,
                        ARRAY_SIZE(eukrea_mbimxsd_pads)))
index a5f0174290b4eaa0ae36a1769ae4c9cb32631d3c..e064bb3d69197b8ddee286eda06dee980052712d 100644 (file)
@@ -147,8 +147,8 @@ static void __init eukrea_cpuimx25_init(void)
        if (!otg_mode_host)
                mxc_register_device(&otg_udc_device, &otg_device_pdata);
 
-#ifdef CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD
-       eukrea_mbimxsd_baseboard_init();
+#ifdef CONFIG_MACH_EUKREA_MBIMXSD25_BASEBOARD
+       eukrea_mbimxsd25_baseboard_init();
 #endif
 }
 
index d3af0fdf8475f7ef0d67b3afbb080df739c36431..7a62e744a8b0fcbc5eef1da9645bf663199608f8 100644 (file)
@@ -155,7 +155,7 @@ static unsigned long get_rate_arm(void)
 
        aad = &clk_consumer[(pdr0 >> 16) & 0xf];
        if (aad->sel)
-               fref = fref * 2 / 3;
+               fref = fref * 3 / 4;
 
        return fref / aad->arm;
 }
@@ -164,7 +164,7 @@ static unsigned long get_rate_ahb(struct clk *clk)
 {
        unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0);
        struct arm_ahb_div *aad;
-       unsigned long fref = get_rate_mpll();
+       unsigned long fref = get_rate_arm();
 
        aad = &clk_consumer[(pdr0 >> 16) & 0xf];
 
@@ -176,16 +176,11 @@ static unsigned long get_rate_ipg(struct clk *clk)
        return get_rate_ahb(NULL) >> 1;
 }
 
-static unsigned long get_3_3_div(unsigned long in)
-{
-       return (((in >> 3) & 0x7) + 1) * ((in & 0x7) + 1);
-}
-
 static unsigned long get_rate_uart(struct clk *clk)
 {
        unsigned long pdr3 = __raw_readl(CCM_BASE + CCM_PDR3);
        unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4);
-       unsigned long div = get_3_3_div(pdr4 >> 10);
+       unsigned long div = ((pdr4 >> 10) & 0x3f) + 1;
 
        if (pdr3 & (1 << 14))
                return get_rate_arm() / div;
@@ -216,7 +211,7 @@ static unsigned long get_rate_sdhc(struct clk *clk)
                break;
        }
 
-       return rate / get_3_3_div(div);
+       return rate / (div + 1);
 }
 
 static unsigned long get_rate_mshc(struct clk *clk)
@@ -270,7 +265,7 @@ static unsigned long get_rate_csi(struct clk *clk)
        else
                rate = get_rate_ppll();
 
-       return rate / get_3_3_div((pdr2 >> 16) & 0x3f);
+       return rate / (((pdr2 >> 16) & 0x3f) + 1);
 }
 
 static unsigned long get_rate_otg(struct clk *clk)
@@ -283,25 +278,51 @@ static unsigned long get_rate_otg(struct clk *clk)
        else
                rate = get_rate_ppll();
 
-       return rate / get_3_3_div((pdr4 >> 22) & 0x3f);
+       return rate / (((pdr4 >> 22) & 0x3f) + 1);
 }
 
 static unsigned long get_rate_ipg_per(struct clk *clk)
 {
        unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0);
        unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4);
-       unsigned long div1, div2;
+       unsigned long div;
 
        if (pdr0 & (1 << 26)) {
-               div1 = (pdr4 >> 19) & 0x7;
-               div2 = (pdr4 >> 16) & 0x7;
-               return get_rate_arm() / ((div1 + 1) * (div2 + 1));
+               div = (pdr4 >> 16) & 0x3f;
+               return get_rate_arm() / (div + 1);
        } else {
-               div1 = (pdr0 >> 12) & 0x7;
-               return get_rate_ahb(NULL) / div1;
+               div = (pdr0 >> 12) & 0x7;
+               return get_rate_ahb(NULL) / (div + 1);
        }
 }
 
+static unsigned long get_rate_hsp(struct clk *clk)
+{
+       unsigned long hsp_podf = (__raw_readl(CCM_BASE + CCM_PDR0) >> 20) & 0x03;
+       unsigned long fref = get_rate_mpll();
+
+       if (fref > 400 * 1000 * 1000) {
+               switch (hsp_podf) {
+               case 0:
+                       return fref >> 2;
+               case 1:
+                       return fref >> 3;
+               case 2:
+                       return fref / 3;
+               }
+       } else {
+               switch (hsp_podf) {
+               case 0:
+               case 2:
+                       return fref / 3;
+               case 1:
+                       return fref / 6;
+               }
+       }
+
+       return 0;
+}
+
 static int clk_cgr_enable(struct clk *clk)
 {
        u32 reg;
@@ -359,7 +380,7 @@ DEFINE_CLOCK(i2c1_clk,   0, CCM_CGR1, 10, get_rate_ipg_per, NULL);
 DEFINE_CLOCK(i2c2_clk,   1, CCM_CGR1, 12, get_rate_ipg_per, NULL);
 DEFINE_CLOCK(i2c3_clk,   2, CCM_CGR1, 14, get_rate_ipg_per, NULL);
 DEFINE_CLOCK(iomuxc_clk, 0, CCM_CGR1, 16, NULL, NULL);
-DEFINE_CLOCK(ipu_clk,    0, CCM_CGR1, 18, get_rate_ahb, NULL);
+DEFINE_CLOCK(ipu_clk,    0, CCM_CGR1, 18, get_rate_hsp, NULL);
 DEFINE_CLOCK(kpp_clk,    0, CCM_CGR1, 20, get_rate_ipg, NULL);
 DEFINE_CLOCK(mlb_clk,    0, CCM_CGR1, 22, get_rate_ahb, NULL);
 DEFINE_CLOCK(mshc_clk,   0, CCM_CGR1, 24, get_rate_mshc, NULL);
@@ -485,10 +506,10 @@ static struct clk_lookup lookups[] = {
 
 int __init mx35_clocks_init()
 {
-       unsigned int ll = 0;
+       unsigned int cgr2 = 3 << 26, cgr3 = 0;
 
 #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
-       ll = (3 << 16);
+       cgr2 |= 3 << 16;
 #endif
 
        clkdev_add_table(lookups, ARRAY_SIZE(lookups));
@@ -499,8 +520,20 @@ int __init mx35_clocks_init()
        __raw_writel((3 << 18), CCM_BASE + CCM_CGR0);
        __raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16),
                        CCM_BASE + CCM_CGR1);
-       __raw_writel((3 << 26) | ll, CCM_BASE + CCM_CGR2);
-       __raw_writel(0, CCM_BASE + CCM_CGR3);
+
+       /*
+        * Check if we came up in internal boot mode. If yes, we need some
+        * extra clocks turned on, otherwise the MX35 boot ROM code will
+        * hang after a watchdog reset.
+        */
+       if (!(__raw_readl(CCM_BASE + CCM_RCSR) & (3 << 10))) {
+               /* Additionally turn on UART1, SCC, and IIM clocks */
+               cgr2 |= 3 << 16 | 3 << 4;
+               cgr3 |= 3 << 2;
+       }
+
+       __raw_writel(cgr2, CCM_BASE + CCM_CGR2);
+       __raw_writel(cgr3, CCM_BASE + CCM_CGR3);
 
        mxc_timer_init(&gpt_clk,
                        MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT);
index 1dc5004df866d4e9df7d817aca27e658573f8c6c..f8f15e3ac7a0e82a6cde9f6bd021c85a546bbae7 100644 (file)
@@ -216,7 +216,7 @@ struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata = {
  * Add platform devices present on this baseboard and init
  * them from CPU side as far as required to use them later on
  */
-void __init eukrea_mbimxsd_baseboard_init(void)
+void __init eukrea_mbimxsd35_baseboard_init(void)
 {
        if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads,
                        ARRAY_SIZE(eukrea_mbimxsd_pads)))
index 9770a6a973be561fdfb67cfdda1f8cdf36381e8e..2a4f8b781ba4c60a4d66f554d860953fcafe1cf0 100644 (file)
@@ -201,8 +201,8 @@ static void __init mxc_board_init(void)
        if (!otg_mode_host)
                mxc_register_device(&mxc_otg_udc_device, &otg_device_pdata);
 
-#ifdef CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD
-       eukrea_mbimxsd_baseboard_init();
+#ifdef CONFIG_MACH_EUKREA_MBIMXSD35_BASEBOARD
+       eukrea_mbimxsd35_baseboard_init();
 #endif
 }
 
index 6af69def357f92d2f177d19d8fc7bce330ff5666..57c10a9926cc5056668645ff39fe3ac07fed9969 100644 (file)
@@ -56,7 +56,7 @@ static void _clk_ccgr_disable(struct clk *clk)
 {
        u32 reg;
        reg = __raw_readl(clk->enable_reg);
-       reg &= ~(MXC_CCM_CCGRx_MOD_OFF << clk->enable_shift);
+       reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift);
        __raw_writel(reg, clk->enable_reg);
 
 }
index 268a9bc6be8a22a4ca0ac4fed742d035753581a3..58093d9e07be44ea482df319c88acb78c0db3975 100644 (file)
@@ -312,8 +312,7 @@ static int pxa_set_target(struct cpufreq_policy *policy,
        freqs.cpu = policy->cpu;
 
        if (freq_debug)
-               pr_debug(KERN_INFO "Changing CPU frequency to %d Mhz, "
-                        "(SDRAM %d Mhz)\n",
+               pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n",
                         freqs.new / 1000, (pxa_freq_settings[idx].div2) ?
                         (new_freq_mem / 2000) : (new_freq_mem / 1000));
 
@@ -398,7 +397,7 @@ static int pxa_set_target(struct cpufreq_policy *policy,
        return 0;
 }
 
-static __init int pxa_cpufreq_init(struct cpufreq_policy *policy)
+static int pxa_cpufreq_init(struct cpufreq_policy *policy)
 {
        int i;
        unsigned int freq;
index 27fa329d9a8b7a5677c2cf75e25900797130eb46..0a0d0fe99220d7f450e61dc04495d8dfe4492be3 100644 (file)
@@ -204,7 +204,7 @@ static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy,
        return 0;
 }
 
-static __init int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
+static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
 {
        int ret = -EINVAL;
 
index 7f64d24cd5648df0790c6ec570322195d92359e5..814f1458a06a52ec5af6b37643ca4b6db602dd55 100644 (file)
  * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x
  * == 0x3 for pxa300/pxa310/pxa320
  */
+#if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x)
 #define __cpu_is_pxa2xx(id)                            \
        ({                                              \
                unsigned int _id = (id) >> 13 & 0x7;    \
                _id <= 0x2;                             \
         })
+#else
+#define __cpu_is_pxa2xx(id)    (0)
+#endif
 
+#ifdef CONFIG_PXA3xx
 #define __cpu_is_pxa3xx(id)                            \
        ({                                              \
                unsigned int _id = (id) >> 13 & 0x7;    \
                _id == 0x3;                             \
         })
+#else
+#define __cpu_is_pxa3xx(id)    (0)
+#endif
 
+#if defined(CONFIG_CPU_PXA930) || defined(CONFIG_CPU_PXA935)
 #define __cpu_is_pxa93x(id)                            \
        ({                                              \
                unsigned int _id = (id) >> 4 & 0xfff;   \
                _id == 0x683 || _id == 0x693;           \
         })
+#else
+#define __cpu_is_pxa93x(id)    (0)
+#endif
 
 #define cpu_is_pxa2xx()                                        \
        ({                                              \
@@ -309,7 +321,7 @@ extern unsigned long get_clock_tick_rate(void);
 #define PCIBIOS_MIN_IO         0
 #define PCIBIOS_MIN_MEM                0
 #define pcibios_assign_all_busses()    1
+#define ARCH_HAS_DMA_SET_COHERENT_MASK
 #endif
 
-
 #endif  /* _ASM_ARCH_HARDWARE_H */
index 262691fb97d86c2ce36de96d7e180a35b4906298..fdca3be47d9bb1fdaa70d8b7b9c112c34b32c495 100644 (file)
@@ -6,6 +6,8 @@
 #ifndef __ASM_ARM_ARCH_IO_H
 #define __ASM_ARM_ARCH_IO_H
 
+#include <mach/hardware.h>
+
 #define IO_SPACE_LIMIT 0xffffffff
 
 /*
index 7139e0dc26d16062304bd72beca1f339e61899e7..4e1287070d219c32235aebadfbf2d97df3d059fb 100644 (file)
 #define GPIO46_CI_DD_7         MFP_CFG_DRV(GPIO46, AF0, DS04X)
 #define GPIO47_CI_DD_8         MFP_CFG_DRV(GPIO47, AF1, DS04X)
 #define GPIO48_CI_DD_9         MFP_CFG_DRV(GPIO48, AF1, DS04X)
-#define GPIO52_CI_HSYNC                MFP_CFG_DRV(GPIO52, AF0, DS04X)
-#define GPIO51_CI_VSYNC                MFP_CFG_DRV(GPIO51, AF0, DS04X)
 #define GPIO49_CI_MCLK         MFP_CFG_DRV(GPIO49, AF0, DS04X)
 #define GPIO50_CI_PCLK         MFP_CFG_DRV(GPIO50, AF0, DS04X)
+#define GPIO51_CI_HSYNC                MFP_CFG_DRV(GPIO51, AF0, DS04X)
+#define GPIO52_CI_VSYNC                MFP_CFG_DRV(GPIO52, AF0, DS04X)
 
 /* KEYPAD */
 #define GPIO3_KP_DKIN_6                MFP_CFG_LPM(GPIO3,   AF2, FLOAT)
index 77ad6d34ab5bc9cd0eb5ce2917a461ce8904c55c..405b92a29793d80fa771546418d78b4bec7094cd 100644 (file)
@@ -469,9 +469,13 @@ static struct i2c_board_info __initdata palm27x_pi2c_board_info[] = {
        },
 };
 
+static struct i2c_pxa_platform_data palm27x_i2c_power_info = {
+       .use_pio        = 1,
+};
+
 void __init palm27x_pmic_init(void)
 {
        i2c_register_board_info(1, ARRAY_AND_SIZE(palm27x_pi2c_board_info));
-       pxa27x_set_i2c_power_info(NULL);
+       pxa27x_set_i2c_power_info(&palm27x_i2c_power_info);
 }
 #endif
index c9b747cedea8fb48a4a9615d52394fa5d2e77fcc..37d6173bbb660868a3cb696cedcaab97b9eee66d 100644 (file)
@@ -240,6 +240,7 @@ static void __init vpac270_onenand_init(void) {}
 #if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE)
 static struct pxamci_platform_data vpac270_mci_platform_data = {
        .ocr_mask               = MMC_VDD_32_33 | MMC_VDD_33_34,
+       .gpio_power             = -1,
        .gpio_card_detect       = GPIO53_VPAC270_SD_DETECT_N,
        .gpio_card_ro           = GPIO52_VPAC270_SD_READONLY,
        .detect_delay_ms        = 200,
index a492b982aa062705a0c24d03dd696b020719b262..405e621289172512bbf01527d9061717a95d55d6 100644 (file)
 #include <mach/map.h>
 #include <mach/gpio-bank-c.h>
 #include <mach/spi-clocks.h>
+#include <mach/irqs.h>
 
 #include <plat/s3c64xx-spi.h>
 #include <plat/gpio-cfg.h>
-#include <plat/irqs.h>
+#include <plat/devs.h>
 
 static char *spi_src_clks[] = {
        [S3C64XX_SPI_SRCCLK_PCLK] = "pclk",
index 5c07d013b23da47dc387a436a2cedbeea2c826b7..e130379ba0e8df2752dfd5e4cae25e25b5dc6630 100644 (file)
 #include <plat/devs.h>
 #include <plat/regs-serial.h>
 
-#define UCON S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK
-#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB
-#define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE
+#define UCON (S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK)
+#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB)
+#define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE)
 
 static struct s3c2410_uartcfg real6410_uartcfgs[] __initdata = {
        [0] = {
-               .hwport      = 0,
-               .flags       = 0,
-               .ucon        = UCON,
-               .ulcon       = ULCON,
-               .ufcon       = UFCON,
+               .hwport = 0,
+               .flags  = 0,
+               .ucon   = UCON,
+               .ulcon  = ULCON,
+               .ufcon  = UFCON,
        },
        [1] = {
-               .hwport      = 1,
-               .flags       = 0,
-               .ucon        = UCON,
-               .ulcon       = ULCON,
-               .ufcon       = UFCON,
+               .hwport = 1,
+               .flags  = 0,
+               .ucon   = UCON,
+               .ulcon  = ULCON,
+               .ufcon  = UFCON,
        },
        [2] = {
-               .hwport      = 2,
-               .flags       = 0,
-               .ucon        = UCON,
-               .ulcon       = ULCON,
-               .ufcon       = UFCON,
+               .hwport = 2,
+               .flags  = 0,
+               .ucon   = UCON,
+               .ulcon  = ULCON,
+               .ufcon  = UFCON,
        },
        [3] = {
-               .hwport      = 3,
-               .flags       = 0,
-               .ucon        = UCON,
-               .ulcon       = ULCON,
-               .ufcon       = UFCON,
+               .hwport = 3,
+               .flags  = 0,
+               .ucon   = UCON,
+               .ulcon  = ULCON,
+               .ufcon  = UFCON,
        },
 };
 
 /* DM9000AEP 10/100 ethernet controller */
 
 static struct resource real6410_dm9k_resource[] = {
-        [0] = {
-                .start = S3C64XX_PA_XM0CSN1,
-                .end   = S3C64XX_PA_XM0CSN1 + 1,
-                .flags = IORESOURCE_MEM
-        },
-        [1] = {
-                .start = S3C64XX_PA_XM0CSN1 + 4,
-                .end   = S3C64XX_PA_XM0CSN1 + 5,
-                .flags = IORESOURCE_MEM
-        },
-        [2] = {
-                .start = S3C_EINT(7),
-                .end   = S3C_EINT(7),
-                .flags = IORESOURCE_IRQ,
-        }
+       [0] = {
+               .start  = S3C64XX_PA_XM0CSN1,
+               .end    = S3C64XX_PA_XM0CSN1 + 1,
+               .flags  = IORESOURCE_MEM
+       },
+       [1] = {
+               .start  = S3C64XX_PA_XM0CSN1 + 4,
+               .end    = S3C64XX_PA_XM0CSN1 + 5,
+               .flags  = IORESOURCE_MEM
+       },
+       [2] = {
+               .start  = S3C_EINT(7),
+               .end    = S3C_EINT(7),
+               .flags  = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL
+       }
 };
 
 static struct dm9000_plat_data real6410_dm9k_pdata = {
-        .flags          = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM),
+       .flags          = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM),
 };
 
 static struct platform_device real6410_device_eth = {
-        .name           = "dm9000",
-        .id             = -1,
-        .num_resources  = ARRAY_SIZE(real6410_dm9k_resource),
-        .resource       = real6410_dm9k_resource,
-        .dev            = {
-                .platform_data  = &real6410_dm9k_pdata,
-        },
+       .name           = "dm9000",
+       .id             = -1,
+       .num_resources  = ARRAY_SIZE(real6410_dm9k_resource),
+       .resource       = real6410_dm9k_resource,
+       .dev            = {
+               .platform_data  = &real6410_dm9k_pdata,
+       },
 };
 
 static struct platform_device *real6410_devices[] __initdata = {
@@ -129,12 +129,12 @@ static void __init real6410_machine_init(void)
        /* set timing for nCS1 suitable for ethernet chip */
 
        __raw_writel((0 << S3C64XX_SROM_BCX__PMC__SHIFT) |
-                       (6 << S3C64XX_SROM_BCX__TACP__SHIFT) |
-                       (4 << S3C64XX_SROM_BCX__TCAH__SHIFT) |
-                       (1 << S3C64XX_SROM_BCX__TCOH__SHIFT) |
-                       (13 << S3C64XX_SROM_BCX__TACC__SHIFT) |
-                       (4 << S3C64XX_SROM_BCX__TCOS__SHIFT) |
-                       (0 << S3C64XX_SROM_BCX__TACS__SHIFT), S3C64XX_SROM_BC1);
+               (6 << S3C64XX_SROM_BCX__TACP__SHIFT) |
+               (4 << S3C64XX_SROM_BCX__TCAH__SHIFT) |
+               (1 << S3C64XX_SROM_BCX__TCOH__SHIFT) |
+               (13 << S3C64XX_SROM_BCX__TACC__SHIFT) |
+               (4 << S3C64XX_SROM_BCX__TCOS__SHIFT) |
+               (0 << S3C64XX_SROM_BCX__TACS__SHIFT), S3C64XX_SROM_BC1);
 
        platform_add_devices(real6410_devices, ARRAY_SIZE(real6410_devices));
 }
index 526f33adb31d65e56d0a5064f3ac7b16496eef33..ec592e8660547a45956bcddc4bb6426523973b30 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/sysdev.h>
 #include <linux/serial_core.h>
 #include <linux/platform_device.h>
+#include <linux/sched.h>
 
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
index a48fb553fd01cf5d42e1ab590146d7cad1e7680f..70ac681af72bc7a1a9ce5434c164e5b86e1fdf55 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/sysdev.h>
 #include <linux/serial_core.h>
 #include <linux/platform_device.h>
+#include <linux/sched.h>
 
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
index 251c92ac5b227e05a28251ed7b890ff1f84eccfb..cd1afbce83e2cfa6d006d6084e384e1705408be8 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/sysdev.h>
 #include <linux/serial_core.h>
 #include <linux/platform_device.h>
+#include <linux/sched.h>
 
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
index af91fefef2c6c77899c4f96ffc9bed03cacee633..d562670e1b0b44005ade9644f69338de44b459bb 100644 (file)
@@ -173,11 +173,6 @@ static int s5pv210_clk_ip3_ctrl(struct clk *clk, int enable)
        return s5p_gatectrl(S5P_CLKGATE_IP3, clk, enable);
 }
 
-static int s5pv210_clk_ip4_ctrl(struct clk *clk, int enable)
-{
-       return s5p_gatectrl(S5P_CLKGATE_IP4, clk, enable);
-}
-
 static int s5pv210_clk_mask0_ctrl(struct clk *clk, int enable)
 {
        return s5p_gatectrl(S5P_CLK_SRC_MASK0, clk, enable);
@@ -280,6 +275,24 @@ static struct clk init_clocks_disable[] = {
                .parent         = &clk_hclk_dsys.clk,
                .enable         = s5pv210_clk_ip0_ctrl,
                .ctrlbit        = (1<<29),
+       }, {
+               .name           = "fimc",
+               .id             = 0,
+               .parent         = &clk_hclk_dsys.clk,
+               .enable         = s5pv210_clk_ip0_ctrl,
+               .ctrlbit        = (1 << 24),
+       }, {
+               .name           = "fimc",
+               .id             = 1,
+               .parent         = &clk_hclk_dsys.clk,
+               .enable         = s5pv210_clk_ip0_ctrl,
+               .ctrlbit        = (1 << 25),
+       }, {
+               .name           = "fimc",
+               .id             = 2,
+               .parent         = &clk_hclk_dsys.clk,
+               .enable         = s5pv210_clk_ip0_ctrl,
+               .ctrlbit        = (1 << 26),
        }, {
                .name           = "otg",
                .id             = -1,
@@ -357,7 +370,7 @@ static struct clk init_clocks_disable[] = {
                .id             = 1,
                .parent         = &clk_pclk_psys.clk,
                .enable         = s5pv210_clk_ip3_ctrl,
-               .ctrlbit        = (1<<8),
+               .ctrlbit        = (1 << 10),
        }, {
                .name           = "i2c",
                .id             = 2,
index b9f4d677cf5541460747a5086fb25f5fbcea744e..245b82b53df4612d63d45984659e2221605d124f 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/io.h>
 #include <linux/sysdev.h>
 #include <linux/platform_device.h>
+#include <linux/sched.h>
 
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
@@ -47,7 +48,7 @@ static struct map_desc s5pv210_iodesc[] __initdata = {
        {
                .virtual        = (unsigned long)S5P_VA_SYSTIMER,
                .pfn            = __phys_to_pfn(S5PV210_PA_SYSTIMER),
-               .length         = SZ_1M,
+               .length         = SZ_4K,
                .type           = MT_DEVICE,
        }, {
                .virtual        = (unsigned long)VA_VIC2,
index 5e16b4c692222a4a45d5728cec5b4970cae495e0..ae416fe7daf2e61b09f683c9be36ebc57c105c62 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 # Common objects
-obj-y                          := timer.o console.o clock.o
+obj-y                          := timer.o console.o clock.o pm_runtime.o
 
 # CPU objects
 obj-$(CONFIG_ARCH_SH7367)      += setup-sh7367.o clock-sh7367.o intc-sh7367.o
index 23d472f9525e6a160c97cbf8adc21c505819fb05..95935c83c30654ee94d301e94086680475a6ed67 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/platform_device.h>
 #include <linux/delay.h>
 #include <linux/mfd/sh_mobile_sdhi.h>
+#include <linux/mfd/tmio.h>
 #include <linux/mmc/host.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
@@ -39,6 +40,7 @@
 #include <linux/sh_clk.h>
 #include <linux/gpio.h>
 #include <linux/input.h>
+#include <linux/leds.h>
 #include <linux/input/sh_keysc.h>
 #include <linux/usb/r8a66597.h>
 
@@ -307,6 +309,7 @@ static struct sh_mobile_sdhi_info sdhi1_info = {
        .dma_slave_tx   = SHDMA_SLAVE_SDHI1_TX,
        .dma_slave_rx   = SHDMA_SLAVE_SDHI1_RX,
        .tmio_ocr_mask  = MMC_VDD_165_195,
+       .tmio_flags     = TMIO_MMC_WRPROTECT_DISABLE,
 };
 
 static struct resource sdhi1_resources[] = {
@@ -558,7 +561,7 @@ static struct resource fsi_resources[] = {
 
 static struct platform_device fsi_device = {
        .name           = "sh_fsi2",
-       .id             = 0,
+       .id             = -1,
        .num_resources  = ARRAY_SIZE(fsi_resources),
        .resource       = fsi_resources,
        .dev    = {
@@ -650,7 +653,44 @@ static struct platform_device hdmi_device = {
        },
 };
 
+static struct gpio_led ap4evb_leds[] = {
+       {
+               .name                   = "led4",
+               .gpio                   = GPIO_PORT185,
+               .default_state  = LEDS_GPIO_DEFSTATE_ON,
+       },
+       {
+               .name                   = "led2",
+               .gpio                   = GPIO_PORT186,
+               .default_state  = LEDS_GPIO_DEFSTATE_ON,
+       },
+       {
+               .name                   = "led3",
+               .gpio                   = GPIO_PORT187,
+               .default_state  = LEDS_GPIO_DEFSTATE_ON,
+       },
+       {
+               .name                   = "led1",
+               .gpio                   = GPIO_PORT188,
+               .default_state  = LEDS_GPIO_DEFSTATE_ON,
+       }
+};
+
+static struct gpio_led_platform_data ap4evb_leds_pdata = {
+       .num_leds = ARRAY_SIZE(ap4evb_leds),
+       .leds = ap4evb_leds,
+};
+
+static struct platform_device leds_device = {
+       .name = "leds-gpio",
+       .id = 0,
+       .dev = {
+               .platform_data  = &ap4evb_leds_pdata,
+       },
+};
+
 static struct platform_device *ap4evb_devices[] __initdata = {
+       &leds_device,
        &nor_flash_device,
        &smc911x_device,
        &sdhi0_device,
@@ -840,20 +880,6 @@ static void __init ap4evb_init(void)
        gpio_request(GPIO_FN_CS5A,      NULL);
        gpio_request(GPIO_FN_IRQ6_39,   NULL);
 
-       /* enable LED 1 - 4 */
-       gpio_request(GPIO_PORT185, NULL);
-       gpio_request(GPIO_PORT186, NULL);
-       gpio_request(GPIO_PORT187, NULL);
-       gpio_request(GPIO_PORT188, NULL);
-       gpio_direction_output(GPIO_PORT185, 1);
-       gpio_direction_output(GPIO_PORT186, 1);
-       gpio_direction_output(GPIO_PORT187, 1);
-       gpio_direction_output(GPIO_PORT188, 1);
-       gpio_export(GPIO_PORT185, 0);
-       gpio_export(GPIO_PORT186, 0);
-       gpio_export(GPIO_PORT187, 0);
-       gpio_export(GPIO_PORT188, 0);
-
        /* enable Debug switch (S6) */
        gpio_request(GPIO_PORT32, NULL);
        gpio_request(GPIO_PORT33, NULL);
index fb4e9b1d788e464922ba2345d60fb43b8e1173d2..759468992ad287ff3f40b2f2e92e19d99734c94a 100644 (file)
@@ -286,7 +286,6 @@ static struct clk_ops pllc2_clk_ops = {
 
 struct clk pllc2_clk = {
        .ops            = &pllc2_clk_ops,
-       .flags          = CLK_ENABLE_ON_INIT,
        .parent         = &extal1_div2_clk,
        .freq_table     = pllc2_freq_table,
        .parent_table   = pllc2_parent,
@@ -395,7 +394,7 @@ static struct clk div6_reparent_clks[DIV6_REPARENT_NR] = {
 
 enum { MSTP001,
        MSTP131, MSTP130,
-       MSTP129, MSTP128,
+       MSTP129, MSTP128, MSTP127, MSTP126,
        MSTP118, MSTP117, MSTP116,
        MSTP106, MSTP101, MSTP100,
        MSTP223,
@@ -413,6 +412,8 @@ static struct clk mstp_clks[MSTP_NR] = {
        [MSTP130] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 30, 0), /* VEU2 */
        [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* VEU1 */
        [MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* VEU0 */
+       [MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU */
+       [MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2 */
        [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX */
        [MSTP117] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */
        [MSTP116] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */
@@ -428,7 +429,7 @@ static struct clk mstp_clks[MSTP_NR] = {
        [MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */
        [MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */
        [MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
-       [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, CLK_ENABLE_ON_INIT), /* FSIA */
+       [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSIA */
        [MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */
        [MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */
        [MSTP314] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 14, 0), /* SDHI0 */
@@ -498,6 +499,8 @@ static struct clk_lookup lookups[] = {
        CLKDEV_DEV_ID("uio_pdrv_genirq.3", &mstp_clks[MSTP130]), /* VEU2 */
        CLKDEV_DEV_ID("uio_pdrv_genirq.2", &mstp_clks[MSTP129]), /* VEU1 */
        CLKDEV_DEV_ID("uio_pdrv_genirq.1", &mstp_clks[MSTP128]), /* VEU0 */
+       CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU */
+       CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2 */
        CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */
        CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1", &mstp_clks[MSTP117]), /* LCDC1 */
        CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* IIC0 */
index b7c705a213a2a1400e180df83d9e5a6f67649db4..6b7c7c42bc8fc529678fe7e77d01878299d049c4 100644 (file)
@@ -1,8 +1,10 @@
 /*
- * SH-Mobile Timer
+ * SH-Mobile Clock Framework
  *
  * Copyright (C) 2010  Magnus Damm
  *
+ * Used together with arch/arm/common/clkdev.c and drivers/sh/clk.c.
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; version 2 of the License.
diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c
new file mode 100644 (file)
index 0000000..94912d3
--- /dev/null
@@ -0,0 +1,169 @@
+/*
+ * arch/arm/mach-shmobile/pm_runtime.c
+ *
+ * Runtime PM support code for SuperH Mobile ARM
+ *
+ *  Copyright (C) 2009-2010 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/sh_clk.h>
+#include <linux/bitmap.h>
+
+#ifdef CONFIG_PM_RUNTIME
+#define BIT_ONCE 0
+#define BIT_ACTIVE 1
+#define BIT_CLK_ENABLED 2
+
+struct pm_runtime_data {
+       unsigned long flags;
+       struct clk *clk;
+};
+
+static void __devres_release(struct device *dev, void *res)
+{
+       struct pm_runtime_data *prd = res;
+
+       dev_dbg(dev, "__devres_release()\n");
+
+       if (test_bit(BIT_CLK_ENABLED, &prd->flags))
+               clk_disable(prd->clk);
+
+       if (test_bit(BIT_ACTIVE, &prd->flags))
+               clk_put(prd->clk);
+}
+
+static struct pm_runtime_data *__to_prd(struct device *dev)
+{
+       return devres_find(dev, __devres_release, NULL, NULL);
+}
+
+static void platform_pm_runtime_init(struct device *dev,
+                                    struct pm_runtime_data *prd)
+{
+       if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) {
+               prd->clk = clk_get(dev, NULL);
+               if (!IS_ERR(prd->clk)) {
+                       set_bit(BIT_ACTIVE, &prd->flags);
+                       dev_info(dev, "clocks managed by runtime pm\n");
+               }
+       }
+}
+
+static void platform_pm_runtime_bug(struct device *dev,
+                                   struct pm_runtime_data *prd)
+{
+       if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags))
+               dev_err(dev, "runtime pm suspend before resume\n");
+}
+
+int platform_pm_runtime_suspend(struct device *dev)
+{
+       struct pm_runtime_data *prd = __to_prd(dev);
+
+       dev_dbg(dev, "platform_pm_runtime_suspend()\n");
+
+       platform_pm_runtime_bug(dev, prd);
+
+       if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
+               clk_disable(prd->clk);
+               clear_bit(BIT_CLK_ENABLED, &prd->flags);
+       }
+
+       return 0;
+}
+
+int platform_pm_runtime_resume(struct device *dev)
+{
+       struct pm_runtime_data *prd = __to_prd(dev);
+
+       dev_dbg(dev, "platform_pm_runtime_resume()\n");
+
+       platform_pm_runtime_init(dev, prd);
+
+       if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
+               clk_enable(prd->clk);
+               set_bit(BIT_CLK_ENABLED, &prd->flags);
+       }
+
+       return 0;
+}
+
+int platform_pm_runtime_idle(struct device *dev)
+{
+       /* suspend synchronously to disable clocks immediately */
+       return pm_runtime_suspend(dev);
+}
+
+static int platform_bus_notify(struct notifier_block *nb,
+                              unsigned long action, void *data)
+{
+       struct device *dev = data;
+       struct pm_runtime_data *prd;
+
+       dev_dbg(dev, "platform_bus_notify() %ld !\n", action);
+
+       if (action == BUS_NOTIFY_BIND_DRIVER) {
+               prd = devres_alloc(__devres_release, sizeof(*prd), GFP_KERNEL);
+               if (prd)
+                       devres_add(dev, prd);
+               else
+                       dev_err(dev, "unable to alloc memory for runtime pm\n");
+       }
+
+       return 0;
+}
+
+#else /* CONFIG_PM_RUNTIME */
+
+static int platform_bus_notify(struct notifier_block *nb,
+                              unsigned long action, void *data)
+{
+       struct device *dev = data;
+       struct clk *clk;
+
+       dev_dbg(dev, "platform_bus_notify() %ld !\n", action);
+
+       switch (action) {
+       case BUS_NOTIFY_BIND_DRIVER:
+               clk = clk_get(dev, NULL);
+               if (!IS_ERR(clk)) {
+                       clk_enable(clk);
+                       clk_put(clk);
+                       dev_info(dev, "runtime pm disabled, clock forced on\n");
+               }
+               break;
+       case BUS_NOTIFY_UNBOUND_DRIVER:
+               clk = clk_get(dev, NULL);
+               if (!IS_ERR(clk)) {
+                       clk_disable(clk);
+                       clk_put(clk);
+                       dev_info(dev, "runtime pm disabled, clock forced off\n");
+               }
+               break;
+       }
+
+       return 0;
+}
+
+#endif /* CONFIG_PM_RUNTIME */
+
+static struct notifier_block platform_bus_notifier = {
+       .notifier_call = platform_bus_notify
+};
+
+static int __init sh_pm_runtime_init(void)
+{
+       bus_register_notifier(&platform_bus_type, &platform_bus_notifier);
+       return 0;
+}
+core_initcall(sh_pm_runtime_init);
index 7b1fc984abb64c85dd3124eba6b6b108000835b9..d5a71abcbaeaf5a8da0255e50e89f5982fd447d4 100644 (file)
@@ -273,6 +273,9 @@ extern void gpio_pullup(unsigned gpio, int value);
 extern int gpio_get_value(unsigned gpio);
 extern void gpio_set_value(unsigned gpio, int value);
 
+#define gpio_get_value_cansleep gpio_get_value
+#define gpio_set_value_cansleep gpio_set_value
+
 /* wrappers to sleep-enable the previous two functions */
 static inline unsigned gpio_to_irq(unsigned gpio)
 {
index 577df6cccb0891503bf0188f3c0f33103c159000..71fb173495209572817a0e15f8794ab8c756b70f 100644 (file)
@@ -68,7 +68,7 @@ static void __init ct_ca9x4_init_irq(void)
 }
 
 #if 0
-static void ct_ca9x4_timer_init(void)
+static void __init ct_ca9x4_timer_init(void)
 {
        writel(0, MMIO_P2V(CT_CA9X4_TIMER0) + TIMER_CTRL);
        writel(0, MMIO_P2V(CT_CA9X4_TIMER1) + TIMER_CTRL);
@@ -222,12 +222,18 @@ static struct platform_device pmu_device = {
        .resource       = pmu_resources,
 };
 
-static void ct_ca9x4_init(void)
+static void __init ct_ca9x4_init(void)
 {
        int i;
 
 #ifdef CONFIG_CACHE_L2X0
-       l2x0_init(MMIO_P2V(CT_CA9X4_L2CC), 0x00000000, 0xfe0fffff);
+       void __iomem *l2x0_base = MMIO_P2V(CT_CA9X4_L2CC);
+
+       /* set RAM latencies to 1 cycle for this core tile. */
+       writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL);
+       writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL);
+
+       l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
 #endif
 
        clkdev_add_table(lookups, ARRAY_SIZE(lookups));
index 817f0ad38a0b5100ec0884e8c908c84af471bec4..7eaa232180a5ae627c3639ba642755444a5d3e27 100644 (file)
@@ -48,7 +48,7 @@ void __init v2m_map_io(struct map_desc *tile, size_t num)
 }
 
 
-static void v2m_timer_init(void)
+static void __init v2m_timer_init(void)
 {
        writel(0, MMIO_P2V(V2M_TIMER0) + TIMER_CTRL);
        writel(0, MMIO_P2V(V2M_TIMER1) + TIMER_CTRL);
index 33c3f570aaa06c2a56f6a6d70eb7f558a883b0b1..a0a2928ae4dd7670a1342863040791833b57dbab 100644 (file)
@@ -398,7 +398,7 @@ config CPU_V6
 # ARMv6k
 config CPU_32v6K
        bool "Support ARM V6K processor extensions" if !SMP
-       depends on CPU_V6
+       depends on CPU_V6 || CPU_V7
        default y if SMP && !(ARCH_MX3 || ARCH_OMAP2)
        help
          Say Y here if your ARMv6 processor supports the 'K' extension.
index d073b64ae87ec4f6652c67959244292dbb3e69ad..724ba3bce72c952ff44645d2a50e5566b568c943 100644 (file)
@@ -885,8 +885,23 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 
        if (ai_usermode & UM_SIGNAL)
                force_sig(SIGBUS, current);
-       else
-               set_cr(cr_no_alignment);
+       else {
+               /*
+                * We're about to disable the alignment trap and return to
+                * user space.  But if an interrupt occurs before actually
+                * reaching user space, then the IRQ vector entry code will
+                * notice that we were still in kernel space and therefore
+                * the alignment trap won't be re-enabled in that case as it
+                * is presumed to be always on from kernel space.
+                * Let's prevent that race by disabling interrupts here (they
+                * are disabled on the way back to user space anyway in
+                * entry-common.S) and disable the alignment trap only if
+                * there is no work pending for this thread.
+                */
+               raw_local_irq_disable();
+               if (!(current_thread_info()->flags & _TIF_WORK_MASK))
+                       set_cr(cr_no_alignment);
+       }
 
        return 0;
 }
index c704eed63c5ddba4c5f849f7ab7b6420008cef21..4bc43e535d3baadc657df9af504078ff1ae00570 100644 (file)
@@ -229,6 +229,8 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
                        }
                } while (size -= PAGE_SIZE);
 
+               dsb();
+
                return (void *)c->vm_start;
        }
        return NULL;
index ab506272b2d3ef459b264b7741d61af46f6aa6b8..17e7b0b57e49f80e6e30c8bbe65ea9d1ebcf14a6 100644 (file)
@@ -204,8 +204,12 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
        /*
         * Don't allow RAM to be mapped - this causes problems with ARMv6+
         */
-       if (WARN_ON(pfn_valid(pfn)))
-               return NULL;
+       if (pfn_valid(pfn)) {
+               printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory.  This leads\n"
+                      KERN_WARNING "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n"
+                      KERN_WARNING "will fail in the next kernel release.  Please fix your driver.\n");
+               WARN_ON(1);
+       }
 
        type = get_mem_type(mtype);
        if (!type)
index 6e1c4f6a2b3f3a09ed3f10be9aeafab36c9a0924..e8ed9dc461fe39631efeeb4746b80854e13415c6 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/nodemask.h>
 #include <linux/memblock.h>
 #include <linux/sort.h>
+#include <linux/fs.h>
 
 #include <asm/cputype.h>
 #include <asm/sections.h>
@@ -246,6 +247,9 @@ static struct mem_type mem_types[] = {
                .domain    = DOMAIN_USER,
        },
        [MT_MEMORY] = {
+               .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+                               L_PTE_WRITE | L_PTE_EXEC,
+               .prot_l1   = PMD_TYPE_TABLE,
                .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
                .domain    = DOMAIN_KERNEL,
        },
@@ -254,6 +258,9 @@ static struct mem_type mem_types[] = {
                .domain    = DOMAIN_KERNEL,
        },
        [MT_MEMORY_NONCACHED] = {
+               .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+                               L_PTE_WRITE | L_PTE_EXEC | L_PTE_MT_BUFFERABLE,
+               .prot_l1   = PMD_TYPE_TABLE,
                .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
                .domain    = DOMAIN_KERNEL,
        },
@@ -411,9 +418,12 @@ static void __init build_mem_type_table(void)
         * Enable CPU-specific coherency if supported.
         * (Only available on XSC3 at the moment.)
         */
-       if (arch_is_coherent() && cpu_is_xsc3())
+       if (arch_is_coherent() && cpu_is_xsc3()) {
                mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
-
+               mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+               mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
+               mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
+       }
        /*
         * ARMv6 and above have extended page tables.
         */
@@ -438,7 +448,9 @@ static void __init build_mem_type_table(void)
                mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
                mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
                mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
+               mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
                mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
+               mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
 #endif
        }
 
@@ -475,6 +487,8 @@ static void __init build_mem_type_table(void)
        mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
        mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
        mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
+       mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
+       mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
        mem_types[MT_ROM].prot_sect |= cp->pmd;
 
        switch (cp->pmd) {
@@ -498,6 +512,19 @@ static void __init build_mem_type_table(void)
        }
 }
 
+#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
+pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+                             unsigned long size, pgprot_t vma_prot)
+{
+       if (!pfn_valid(pfn))
+               return pgprot_noncached(vma_prot);
+       else if (file->f_flags & O_SYNC)
+               return pgprot_writecombine(vma_prot);
+       return vma_prot;
+}
+EXPORT_SYMBOL(phys_mem_access_prot);
+#endif
+
 #define vectors_base() (vectors_high() ? 0xffff0000 : 0)
 
 static void __init *early_alloc(unsigned long sz)
index 6a8506d99ee9abbb0845f39d94d663f802c56885..197f21bed5e919f3a13f3cbc9404d428ad80fb97 100644 (file)
@@ -186,13 +186,14 @@ cpu_v7_name:
  *     It is assumed that:
  *     - cache type register is implemented
  */
-__v7_setup:
+__v7_ca9mp_setup:
 #ifdef CONFIG_SMP
        mrc     p15, 0, r0, c1, c0, 1
        tst     r0, #(1 << 6)                   @ SMP/nAMP mode enabled?
        orreq   r0, r0, #(1 << 6) | (1 << 0)    @ Enable SMP/nAMP mode and
        mcreq   p15, 0, r0, c1, c0, 1           @ TLB ops broadcasting
 #endif
+__v7_setup:
        adr     r12, __v7_setup_stack           @ the local stack
        stmia   r12, {r0-r5, r7, r9, r11, lr}
        bl      v7_flush_dcache_all
@@ -201,11 +202,16 @@ __v7_setup:
        mrc     p15, 0, r0, c0, c0, 0           @ read main ID register
        and     r10, r0, #0xff000000            @ ARM?
        teq     r10, #0x41000000
-       bne     2f
+       bne     3f
        and     r5, r0, #0x00f00000             @ variant
        and     r6, r0, #0x0000000f             @ revision
-       orr     r0, r6, r5, lsr #20-4           @ combine variant and revision
+       orr     r6, r6, r5, lsr #20-4           @ combine variant and revision
+       ubfx    r0, r0, #4, #12                 @ primary part number
 
+       /* Cortex-A8 Errata */
+       ldr     r10, =0x00000c08                @ Cortex-A8 primary part number
+       teq     r0, r10
+       bne     2f
 #ifdef CONFIG_ARM_ERRATA_430973
        teq     r5, #0x00100000                 @ only present in r1p*
        mrceq   p15, 0, r10, c1, c0, 1          @ read aux control register
@@ -213,21 +219,50 @@ __v7_setup:
        mcreq   p15, 0, r10, c1, c0, 1          @ write aux control register
 #endif
 #ifdef CONFIG_ARM_ERRATA_458693
-       teq     r0, #0x20                       @ only present in r2p0
+       teq     r6, #0x20                       @ only present in r2p0
        mrceq   p15, 0, r10, c1, c0, 1          @ read aux control register
        orreq   r10, r10, #(1 << 5)             @ set L1NEON to 1
        orreq   r10, r10, #(1 << 9)             @ set PLDNOP to 1
        mcreq   p15, 0, r10, c1, c0, 1          @ write aux control register
 #endif
 #ifdef CONFIG_ARM_ERRATA_460075
-       teq     r0, #0x20                       @ only present in r2p0
+       teq     r6, #0x20                       @ only present in r2p0
        mrceq   p15, 1, r10, c9, c0, 2          @ read L2 cache aux ctrl register
        tsteq   r10, #1 << 22
        orreq   r10, r10, #(1 << 22)            @ set the Write Allocate disable bit
        mcreq   p15, 1, r10, c9, c0, 2          @ write the L2 cache aux ctrl register
 #endif
+       b       3f
 
-2:     mov     r10, #0
+       /* Cortex-A9 Errata */
+2:     ldr     r10, =0x00000c09                @ Cortex-A9 primary part number
+       teq     r0, r10
+       bne     3f
+#ifdef CONFIG_ARM_ERRATA_742230
+       cmp     r6, #0x22                       @ only present up to r2p2
+       mrcle   p15, 0, r10, c15, c0, 1         @ read diagnostic register
+       orrle   r10, r10, #1 << 4               @ set bit #4
+       mcrle   p15, 0, r10, c15, c0, 1         @ write diagnostic register
+#endif
+#ifdef CONFIG_ARM_ERRATA_742231
+       teq     r6, #0x20                       @ present in r2p0
+       teqne   r6, #0x21                       @ present in r2p1
+       teqne   r6, #0x22                       @ present in r2p2
+       mrceq   p15, 0, r10, c15, c0, 1         @ read diagnostic register
+       orreq   r10, r10, #1 << 12              @ set bit #12
+       orreq   r10, r10, #1 << 22              @ set bit #22
+       mcreq   p15, 0, r10, c15, c0, 1         @ write diagnostic register
+#endif
+#ifdef CONFIG_ARM_ERRATA_743622
+       teq     r6, #0x20                       @ present in r2p0
+       teqne   r6, #0x21                       @ present in r2p1
+       teqne   r6, #0x22                       @ present in r2p2
+       mrceq   p15, 0, r10, c15, c0, 1         @ read diagnostic register
+       orreq   r10, r10, #1 << 6               @ set bit #6
+       mcreq   p15, 0, r10, c15, c0, 1         @ write diagnostic register
+#endif
+
+3:     mov     r10, #0
 #ifdef HARVARD_CACHE
        mcr     p15, 0, r10, c7, c5, 0          @ I+BTB cache invalidate
 #endif
@@ -323,6 +358,29 @@ cpu_elf_name:
 
        .section ".proc.info.init", #alloc, #execinstr
 
+       .type   __v7_ca9mp_proc_info, #object
+__v7_ca9mp_proc_info:
+       .long   0x410fc090              @ Required ID value
+       .long   0xff0ffff0              @ Mask for ID
+       .long   PMD_TYPE_SECT | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ | \
+               PMD_FLAGS
+       .long   PMD_TYPE_SECT | \
+               PMD_SECT_XN | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ
+       b       __v7_ca9mp_setup
+       .long   cpu_arch_name
+       .long   cpu_elf_name
+       .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS
+       .long   cpu_v7_name
+       .long   v7_processor_functions
+       .long   v7wbi_tlb_fns
+       .long   v6_user_fns
+       .long   v7_cache_fns
+       .size   __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
+
        /*
         * Match any ARMv7 processor core.
         */
index e666eafed15295aa5feeedf60d374a704b2caa15..b2215c61cdf02a18df71849611007d97cfea4470 100644 (file)
@@ -6,4 +6,8 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
                oprofilefs.o oprofile_stats.o \
                timer_int.o )
 
+ifeq ($(CONFIG_HW_PERF_EVENTS),y)
+DRIVER_OBJS += $(addprefix ../../../drivers/oprofile/, oprofile_perf.o)
+endif
+
 oprofile-y                             := $(DRIVER_OBJS) common.o
index 0691176899ffc24f0a176d154a34f5b7200c6047..8aa974491dfcd555a6962461c0d2aa5f3287b653 100644 (file)
 #include <asm/ptrace.h>
 
 #ifdef CONFIG_HW_PERF_EVENTS
-/*
- * Per performance monitor configuration as set via oprofilefs.
- */
-struct op_counter_config {
-       unsigned long count;
-       unsigned long enabled;
-       unsigned long event;
-       unsigned long unit_mask;
-       unsigned long kernel;
-       unsigned long user;
-       struct perf_event_attr attr;
-};
-
-static int op_arm_enabled;
-static DEFINE_MUTEX(op_arm_mutex);
-
-static struct op_counter_config *counter_config;
-static struct perf_event **perf_events[nr_cpumask_bits];
-static int perf_num_counters;
-
-/*
- * Overflow callback for oprofile.
- */
-static void op_overflow_handler(struct perf_event *event, int unused,
-                       struct perf_sample_data *data, struct pt_regs *regs)
-{
-       int id;
-       u32 cpu = smp_processor_id();
-
-       for (id = 0; id < perf_num_counters; ++id)
-               if (perf_events[cpu][id] == event)
-                       break;
-
-       if (id != perf_num_counters)
-               oprofile_add_sample(regs, id);
-       else
-               pr_warning("oprofile: ignoring spurious overflow "
-                               "on cpu %u\n", cpu);
-}
-
-/*
- * Called by op_arm_setup to create perf attributes to mirror the oprofile
- * settings in counter_config. Attributes are created as `pinned' events and
- * so are permanently scheduled on the PMU.
- */
-static void op_perf_setup(void)
-{
-       int i;
-       u32 size = sizeof(struct perf_event_attr);
-       struct perf_event_attr *attr;
-
-       for (i = 0; i < perf_num_counters; ++i) {
-               attr = &counter_config[i].attr;
-               memset(attr, 0, size);
-               attr->type              = PERF_TYPE_RAW;
-               attr->size              = size;
-               attr->config            = counter_config[i].event;
-               attr->sample_period     = counter_config[i].count;
-               attr->pinned            = 1;
-       }
-}
-
-static int op_create_counter(int cpu, int event)
-{
-       int ret = 0;
-       struct perf_event *pevent;
-
-       if (!counter_config[event].enabled || (perf_events[cpu][event] != NULL))
-               return ret;
-
-       pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
-                                                 cpu, -1,
-                                                 op_overflow_handler);
-
-       if (IS_ERR(pevent)) {
-               ret = PTR_ERR(pevent);
-       } else if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
-               pr_warning("oprofile: failed to enable event %d "
-                               "on CPU %d\n", event, cpu);
-               ret = -EBUSY;
-       } else {
-               perf_events[cpu][event] = pevent;
-       }
-
-       return ret;
-}
-
-static void op_destroy_counter(int cpu, int event)
-{
-       struct perf_event *pevent = perf_events[cpu][event];
-
-       if (pevent) {
-               perf_event_release_kernel(pevent);
-               perf_events[cpu][event] = NULL;
-       }
-}
-
-/*
- * Called by op_arm_start to create active perf events based on the
- * perviously configured attributes.
- */
-static int op_perf_start(void)
-{
-       int cpu, event, ret = 0;
-
-       for_each_online_cpu(cpu) {
-               for (event = 0; event < perf_num_counters; ++event) {
-                       ret = op_create_counter(cpu, event);
-                       if (ret)
-                               goto out;
-               }
-       }
-
-out:
-       return ret;
-}
-
-/*
- * Called by op_arm_stop at the end of a profiling run.
- */
-static void op_perf_stop(void)
+char *op_name_from_perf_id(void)
 {
-       int cpu, event;
+       enum arm_perf_pmu_ids id = armpmu_get_pmu_id();
 
-       for_each_online_cpu(cpu)
-               for (event = 0; event < perf_num_counters; ++event)
-                       op_destroy_counter(cpu, event);
-}
-
-
-static char *op_name_from_perf_id(enum arm_perf_pmu_ids id)
-{
        switch (id) {
        case ARM_PERF_PMU_ID_XSCALE1:
                return "arm/xscale1";
@@ -175,116 +47,6 @@ static char *op_name_from_perf_id(enum arm_perf_pmu_ids id)
        }
 }
 
-static int op_arm_create_files(struct super_block *sb, struct dentry *root)
-{
-       unsigned int i;
-
-       for (i = 0; i < perf_num_counters; i++) {
-               struct dentry *dir;
-               char buf[4];
-
-               snprintf(buf, sizeof buf, "%d", i);
-               dir = oprofilefs_mkdir(sb, root, buf);
-               oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
-               oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
-               oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
-               oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
-               oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
-               oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
-       }
-
-       return 0;
-}
-
-static int op_arm_setup(void)
-{
-       spin_lock(&oprofilefs_lock);
-       op_perf_setup();
-       spin_unlock(&oprofilefs_lock);
-       return 0;
-}
-
-static int op_arm_start(void)
-{
-       int ret = -EBUSY;
-
-       mutex_lock(&op_arm_mutex);
-       if (!op_arm_enabled) {
-               ret = 0;
-               op_perf_start();
-               op_arm_enabled = 1;
-       }
-       mutex_unlock(&op_arm_mutex);
-       return ret;
-}
-
-static void op_arm_stop(void)
-{
-       mutex_lock(&op_arm_mutex);
-       if (op_arm_enabled)
-               op_perf_stop();
-       op_arm_enabled = 0;
-       mutex_unlock(&op_arm_mutex);
-}
-
-#ifdef CONFIG_PM
-static int op_arm_suspend(struct platform_device *dev, pm_message_t state)
-{
-       mutex_lock(&op_arm_mutex);
-       if (op_arm_enabled)
-               op_perf_stop();
-       mutex_unlock(&op_arm_mutex);
-       return 0;
-}
-
-static int op_arm_resume(struct platform_device *dev)
-{
-       mutex_lock(&op_arm_mutex);
-       if (op_arm_enabled && op_perf_start())
-               op_arm_enabled = 0;
-       mutex_unlock(&op_arm_mutex);
-       return 0;
-}
-
-static struct platform_driver oprofile_driver = {
-       .driver         = {
-               .name           = "arm-oprofile",
-       },
-       .resume         = op_arm_resume,
-       .suspend        = op_arm_suspend,
-};
-
-static struct platform_device *oprofile_pdev;
-
-static int __init init_driverfs(void)
-{
-       int ret;
-
-       ret = platform_driver_register(&oprofile_driver);
-       if (ret)
-               goto out;
-
-       oprofile_pdev = platform_device_register_simple(
-                               oprofile_driver.driver.name, 0, NULL, 0);
-       if (IS_ERR(oprofile_pdev)) {
-               ret = PTR_ERR(oprofile_pdev);
-               platform_driver_unregister(&oprofile_driver);
-       }
-
-out:
-       return ret;
-}
-
-static void  exit_driverfs(void)
-{
-       platform_device_unregister(oprofile_pdev);
-       platform_driver_unregister(&oprofile_driver);
-}
-#else
-static int __init init_driverfs(void) { return 0; }
-#define exit_driverfs() do { } while (0)
-#endif /* CONFIG_PM */
-
 static int report_trace(struct stackframe *frame, void *d)
 {
        unsigned int *depth = d;
@@ -349,72 +111,14 @@ static void arm_backtrace(struct pt_regs * const regs, unsigned int depth)
 
 int __init oprofile_arch_init(struct oprofile_operations *ops)
 {
-       int cpu, ret = 0;
-
-       perf_num_counters = armpmu_get_max_events();
-
-       counter_config = kcalloc(perf_num_counters,
-                       sizeof(struct op_counter_config), GFP_KERNEL);
-
-       if (!counter_config) {
-               pr_info("oprofile: failed to allocate %d "
-                               "counters\n", perf_num_counters);
-               return -ENOMEM;
-       }
-
-       ret = init_driverfs();
-       if (ret) {
-               kfree(counter_config);
-               return ret;
-       }
-
-       for_each_possible_cpu(cpu) {
-               perf_events[cpu] = kcalloc(perf_num_counters,
-                               sizeof(struct perf_event *), GFP_KERNEL);
-               if (!perf_events[cpu]) {
-                       pr_info("oprofile: failed to allocate %d perf events "
-                                       "for cpu %d\n", perf_num_counters, cpu);
-                       while (--cpu >= 0)
-                               kfree(perf_events[cpu]);
-                       return -ENOMEM;
-               }
-       }
-
        ops->backtrace          = arm_backtrace;
-       ops->create_files       = op_arm_create_files;
-       ops->setup              = op_arm_setup;
-       ops->start              = op_arm_start;
-       ops->stop               = op_arm_stop;
-       ops->shutdown           = op_arm_stop;
-       ops->cpu_type           = op_name_from_perf_id(armpmu_get_pmu_id());
-
-       if (!ops->cpu_type)
-               ret = -ENODEV;
-       else
-               pr_info("oprofile: using %s\n", ops->cpu_type);
 
-       return ret;
+       return oprofile_perf_init(ops);
 }
 
-void oprofile_arch_exit(void)
+void __exit oprofile_arch_exit(void)
 {
-       int cpu, id;
-       struct perf_event *event;
-
-       if (*perf_events) {
-               exit_driverfs();
-               for_each_possible_cpu(cpu) {
-                       for (id = 0; id < perf_num_counters; ++id) {
-                               event = perf_events[cpu][id];
-                               if (event != NULL)
-                                       perf_event_release_kernel(event);
-                       }
-                       kfree(perf_events[cpu]);
-               }
-       }
-
-       if (counter_config)
-               kfree(counter_config);
+       oprofile_perf_exit();
 }
 #else
 int __init oprofile_arch_init(struct oprofile_operations *ops)
@@ -422,5 +126,5 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
        pr_info("oprofile: hardware counters not available\n");
        return -ENODEV;
 }
-void oprofile_arch_exit(void) {}
+void __exit oprofile_arch_exit(void) {}
 #endif /* CONFIG_HW_PERF_EVENTS */
index 0527e65318f4a647b5b00192ce08ba47368ce5f8..6785db4179b84ccd925f9112cd48e9be71668e7c 100644 (file)
@@ -43,6 +43,7 @@ config ARCH_MXC91231
 config ARCH_MX5
        bool "MX5-based"
        select CPU_V7
+       select ARM_L1_CACHE_SHIFT_6
        help
          This enables support for systems based on the Freescale i.MX51 family
 
index 634e3f4c454df222728aa678bdcd657967286dbc..656acb45d434b7333e0864798fb6b69538387701 100644 (file)
@@ -37,9 +37,9 @@
  * mach-mx5/eukrea_mbimx51-baseboard.c for cpuimx51
  */
 
-extern void eukrea_mbimx25_baseboard_init(void);
+extern void eukrea_mbimxsd25_baseboard_init(void);
 extern void eukrea_mbimx27_baseboard_init(void);
-extern void eukrea_mbimx35_baseboard_init(void);
+extern void eukrea_mbimxsd35_baseboard_init(void);
 extern void eukrea_mbimx51_baseboard_init(void);
 
 #endif
index b3da9aad4295704ef9ea8a4a43c95127d74e6d9e..3703ab28257fbbb55d3db89bee56877eebb38345 100644 (file)
@@ -164,8 +164,9 @@ int tzic_enable_wake(int is_idle)
                return -EAGAIN;
 
        for (i = 0; i < 4; i++) {
-               v = is_idle ? __raw_readl(TZIC_ENSET0(i)) : wakeup_intr[i];
-               __raw_writel(v, TZIC_WAKEUP0(i));
+               v = is_idle ? __raw_readl(tzic_base + TZIC_ENSET0(i)) :
+                       wakeup_intr[i];
+               __raw_writel(v, tzic_base + TZIC_WAKEUP0(i));
        }
 
        return 0;
index ea3ca86c52836ba1ec9fe780da9c1629db91723f..aedf9c1d645e4a820c8f1f9dd884fdf5fd1cebf5 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *  linux/arch/arm/mach-nomadik/timer.c
+ *  linux/arch/arm/plat-nomadik/timer.c
  *
  * Copyright (C) 2008 STMicroelectronics
  * Copyright (C) 2010 Alessandro Rubini
@@ -75,7 +75,7 @@ static void nmdk_clkevt_mode(enum clock_event_mode mode,
                cr = readl(mtu_base + MTU_CR(1));
                writel(0, mtu_base + MTU_LR(1));
                writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1));
-               writel(0x2, mtu_base + MTU_IMSC);
+               writel(1 << 1, mtu_base + MTU_IMSC);
                break;
        case CLOCK_EVT_MODE_SHUTDOWN:
        case CLOCK_EVT_MODE_UNUSED:
@@ -131,25 +131,23 @@ void __init nmdk_timer_init(void)
 {
        unsigned long rate;
        struct clk *clk0;
-       struct clk *clk1;
-       u32 cr;
+       u32 cr = MTU_CRn_32BITS;
 
        clk0 = clk_get_sys("mtu0", NULL);
        BUG_ON(IS_ERR(clk0));
 
-       clk1 = clk_get_sys("mtu1", NULL);
-       BUG_ON(IS_ERR(clk1));
-
        clk_enable(clk0);
-       clk_enable(clk1);
 
        /*
-        * Tick rate is 2.4MHz for Nomadik and 110MHz for ux500:
-        * use a divide-by-16 counter if it's more than 16MHz
+        * Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz
+        * for ux500.
+        * Use a divide-by-16 counter if the tick rate is more than 32MHz.
+        * At 32 MHz, the timer (with 32 bit counter) can be programmed
+        * to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer
+        * with 16 gives too low timer resolution.
         */
-       cr = MTU_CRn_32BITS;;
        rate = clk_get_rate(clk0);
-       if (rate > 16 << 20) {
+       if (rate > 32000000) {
                rate /= 16;
                cr |= MTU_CRn_PRESCALE_16;
        } else {
@@ -170,15 +168,8 @@ void __init nmdk_timer_init(void)
                pr_err("timer: failed to initialize clock source %s\n",
                       nmdk_clksrc.name);
 
-       /* Timer 1 is used for events, fix according to rate */
-       cr = MTU_CRn_32BITS;
-       rate = clk_get_rate(clk1);
-       if (rate > 16 << 20) {
-               rate /= 16;
-               cr |= MTU_CRn_PRESCALE_16;
-       } else {
-               cr |= MTU_CRn_PRESCALE_1;
-       }
+       /* Timer 1 is used for events */
+
        clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE);
 
        writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */
index e39a417a368dc92ad6776a68ef6fefc22a046b76..a92cb499313fdc9583890ebcc182ecae280cdc09 100644 (file)
@@ -33,7 +33,7 @@ config OMAP_DEBUG_DEVICES
 config OMAP_DEBUG_LEDS
        bool
        depends on OMAP_DEBUG_DEVICES
-       default y if LEDS
+       default y if LEDS_CLASS
 
 config OMAP_RESET_CLOCKS
        bool "Reset unused clocks during boot"
index a202a2ce6e3d0018ee3022ead0ba835527e3a4b0..6cd151b31bc5f7ee37afe54882737ddb60be1fbf 100644 (file)
@@ -320,6 +320,7 @@ void flush_iotlb_page(struct iommu *obj, u32 da)
                if ((start <= da) && (da < start + bytes)) {
                        dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
                                __func__, start, da, bytes);
+                       iotlb_load_cr(obj, &cr);
                        iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
                }
        }
index e31496e35b0f452d4ff9e375855718fc0a078d40..0c8612fd831237164968b1f2120a1134618557e3 100644 (file)
@@ -156,7 +156,7 @@ static irqreturn_t omap_mcbsp_rx_irq_handler(int irq, void *dev_id)
                /* Writing zero to RSYNC_ERR clears the IRQ */
                MCBSP_WRITE(mcbsp_rx, SPCR1, MCBSP_READ_CACHE(mcbsp_rx, SPCR1));
        } else {
-               complete(&mcbsp_rx->tx_irq_completion);
+               complete(&mcbsp_rx->rx_irq_completion);
        }
 
        return IRQ_HANDLED;
index 226b2e858d6c9617243a91138821fb0e355daf56..10b3b4c63372f406e6ee206ce691977753017e56 100644 (file)
@@ -220,20 +220,7 @@ void __init omap_map_sram(void)
        if (omap_sram_size == 0)
                return;
 
-       if (cpu_is_omap24xx()) {
-               omap_sram_io_desc[0].virtual = OMAP2_SRAM_VA;
-
-               base = OMAP2_SRAM_PA;
-               base = ROUND_DOWN(base, PAGE_SIZE);
-               omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
-       }
-
        if (cpu_is_omap34xx()) {
-               omap_sram_io_desc[0].virtual = OMAP3_SRAM_VA;
-               base = OMAP3_SRAM_PA;
-               base = ROUND_DOWN(base, PAGE_SIZE);
-               omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
-
                /*
                 * SRAM must be marked as non-cached on OMAP3 since the
                 * CORE DPLL M2 divider change code (in SRAM) runs with the
@@ -244,13 +231,11 @@ void __init omap_map_sram(void)
                omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED;
        }
 
-       if (cpu_is_omap44xx()) {
-               omap_sram_io_desc[0].virtual = OMAP4_SRAM_VA;
-               base = OMAP4_SRAM_PA;
-               base = ROUND_DOWN(base, PAGE_SIZE);
-               omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
-       }
-       omap_sram_io_desc[0].length = 1024 * 1024;      /* Use section desc */
+       omap_sram_io_desc[0].virtual = omap_sram_base;
+       base = omap_sram_start;
+       base = ROUND_DOWN(base, PAGE_SIZE);
+       omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
+       omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE);
        iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc));
 
        printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n",
index 0732c6c8d511979e354cced2cd6987889702a1e7..ef32686feef9431ab00f42e4a2a0e2d7656af783 100644 (file)
@@ -176,7 +176,7 @@ static inline void __add_pwm(struct pwm_device *pwm)
 
 static int __devinit pwm_probe(struct platform_device *pdev)
 {
-       struct platform_device_id *id = platform_get_device_id(pdev);
+       const struct platform_device_id *id = platform_get_device_id(pdev);
        struct pwm_device *pwm, *secondary = NULL;
        struct resource *r;
        int ret = 0;
index d3f1a9b5d2b5a4d01f040ba784a13185629b5938..608770fc1531335967f4e42a4c9355ed8e5f6c87 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/dma-mapping.h>
 #include <linux/platform_device.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
@@ -18,7 +19,7 @@
 static struct resource s5p_fimc0_resource[] = {
        [0] = {
                .start  = S5P_PA_FIMC0,
-               .end    = S5P_PA_FIMC0 + SZ_1M - 1,
+               .end    = S5P_PA_FIMC0 + SZ_4K - 1,
                .flags  = IORESOURCE_MEM,
        },
        [1] = {
@@ -28,9 +29,15 @@ static struct resource s5p_fimc0_resource[] = {
        },
 };
 
+static u64 s5p_fimc0_dma_mask = DMA_BIT_MASK(32);
+
 struct platform_device s5p_device_fimc0 = {
        .name           = "s5p-fimc",
        .id             = 0,
        .num_resources  = ARRAY_SIZE(s5p_fimc0_resource),
        .resource       = s5p_fimc0_resource,
+       .dev            = {
+               .dma_mask               = &s5p_fimc0_dma_mask,
+               .coherent_dma_mask      = DMA_BIT_MASK(32),
+       },
 };
index 41bd6986d0ad03210cf85c4e32f1f497a29d67f6..76e3a97a87d37c934f95da3ab2187831b9eb6da6 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/dma-mapping.h>
 #include <linux/platform_device.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
@@ -18,7 +19,7 @@
 static struct resource s5p_fimc1_resource[] = {
        [0] = {
                .start  = S5P_PA_FIMC1,
-               .end    = S5P_PA_FIMC1 + SZ_1M - 1,
+               .end    = S5P_PA_FIMC1 + SZ_4K - 1,
                .flags  = IORESOURCE_MEM,
        },
        [1] = {
@@ -28,9 +29,15 @@ static struct resource s5p_fimc1_resource[] = {
        },
 };
 
+static u64 s5p_fimc1_dma_mask = DMA_BIT_MASK(32);
+
 struct platform_device s5p_device_fimc1 = {
        .name           = "s5p-fimc",
        .id             = 1,
        .num_resources  = ARRAY_SIZE(s5p_fimc1_resource),
        .resource       = s5p_fimc1_resource,
+       .dev            = {
+               .dma_mask               = &s5p_fimc1_dma_mask,
+               .coherent_dma_mask      = DMA_BIT_MASK(32),
+       },
 };
index dfddeda6d4a373445abdabbbe3e59155c61fa84e..24d29816fa2c03711ad0b5a62e2561caabd11e3b 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/dma-mapping.h>
 #include <linux/platform_device.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
@@ -18,7 +19,7 @@
 static struct resource s5p_fimc2_resource[] = {
        [0] = {
                .start  = S5P_PA_FIMC2,
-               .end    = S5P_PA_FIMC2 + SZ_1M - 1,
+               .end    = S5P_PA_FIMC2 + SZ_4K - 1,
                .flags  = IORESOURCE_MEM,
        },
        [1] = {
@@ -28,9 +29,15 @@ static struct resource s5p_fimc2_resource[] = {
        },
 };
 
+static u64 s5p_fimc2_dma_mask = DMA_BIT_MASK(32);
+
 struct platform_device s5p_device_fimc2 = {
        .name           = "s5p-fimc",
        .id             = 2,
        .num_resources  = ARRAY_SIZE(s5p_fimc2_resource),
        .resource       = s5p_fimc2_resource,
+       .dev            = {
+               .dma_mask               = &s5p_fimc2_dma_mask,
+               .coherent_dma_mask      = DMA_BIT_MASK(32),
+       },
 };
index 04d9521ddc9f0e652d62453a99bbb19fa877c7fa..e8f2be2d67f2cac33961487b47a7880b68f33832 100644 (file)
@@ -435,7 +435,6 @@ static int s3c_adc_suspend(struct platform_device *pdev, pm_message_t state)
 static int s3c_adc_resume(struct platform_device *pdev)
 {
        struct adc_device *adc = platform_get_drvdata(pdev);
-       unsigned long flags;
 
        clk_enable(adc->clk);
        enable_irq(adc->irq);
index 90a20512d68d5a40d9fa789b15c24620a0c8f9b3..e8d20b0bc50e11ee43d78f230b811409def1cb87 100644 (file)
@@ -48,6 +48,9 @@
 #include <plat/clock.h>
 #include <plat/cpu.h>
 
+#include <linux/serial_core.h>
+#include <plat/regs-serial.h> /* for s3c24xx_uart_devs */
+
 /* clock information */
 
 static LIST_HEAD(clocks);
@@ -65,6 +68,28 @@ static int clk_null_enable(struct clk *clk, int enable)
        return 0;
 }
 
+static int dev_is_s3c_uart(struct device *dev)
+{
+       struct platform_device **pdev = s3c24xx_uart_devs;
+       int i;
+       for (i = 0; i < ARRAY_SIZE(s3c24xx_uart_devs); i++, pdev++)
+               if (*pdev && dev == &(*pdev)->dev)
+                       return 1;
+       return 0;
+}
+
+/*
+ * Serial drivers call get_clock() very early, before platform bus
+ * has been set up, this requires a special check to let them get
+ * a proper clock
+ */
+
+static int dev_is_platform_device(struct device *dev)
+{
+       return dev->bus == &platform_bus_type ||
+              (dev->bus == NULL && dev_is_s3c_uart(dev));
+}
+
 /* Clock API calls */
 
 struct clk *clk_get(struct device *dev, const char *id)
@@ -73,7 +98,7 @@ struct clk *clk_get(struct device *dev, const char *id)
        struct clk *clk = ERR_PTR(-ENOENT);
        int idno;
 
-       if (dev == NULL || dev->bus != &platform_bus_type)
+       if (dev == NULL || !dev_is_platform_device(dev))
                idno = -1;
        else
                idno = to_platform_device(dev)->id;
index 57b68a50f45e93aa14852500ace0df1fdc48da8b..e3d41eaed1ffd9feed571cef0af6bf5bf0841bf2 100644 (file)
@@ -273,13 +273,13 @@ s5p_gpio_drvstr_t s5p_gpio_get_drvstr(unsigned int pin)
        if (!chip)
                return -EINVAL;
 
-       off = chip->chip.base - pin;
+       off = pin - chip->chip.base;
        shift = off * 2;
        reg = chip->base + 0x0C;
 
        drvstr = __raw_readl(reg);
-       drvstr = 0xffff & (0x3 << shift);
        drvstr = drvstr >> shift;
+       drvstr &= 0x3;
 
        return (__force s5p_gpio_drvstr_t)drvstr;
 }
@@ -296,11 +296,12 @@ int s5p_gpio_set_drvstr(unsigned int pin, s5p_gpio_drvstr_t drvstr)
        if (!chip)
                return -EINVAL;
 
-       off = chip->chip.base - pin;
+       off = pin - chip->chip.base;
        shift = off * 2;
        reg = chip->base + 0x0C;
 
        tmp = __raw_readl(reg);
+       tmp &= ~(0x3 << shift);
        tmp |= drvstr << shift;
 
        __raw_writel(tmp, reg);
index db4112c6f2becd7371fc86428c5ca4b50a07bbdd..1c6b92947c5db7e8b955ac3831214bbf556bf2bb 100644 (file)
@@ -143,12 +143,12 @@ extern s3c_gpio_pull_t s3c_gpio_getpull(unsigned int pin);
 /* Define values for the drvstr available for each gpio pin.
  *
  * These values control the value of the output signal driver strength,
- * configurable on most pins on the S5C series.
+ * configurable on most pins on the S5P series.
  */
-#define S5P_GPIO_DRVSTR_LV1    ((__force s5p_gpio_drvstr_t)0x00)
-#define S5P_GPIO_DRVSTR_LV2    ((__force s5p_gpio_drvstr_t)0x01)
-#define S5P_GPIO_DRVSTR_LV3    ((__force s5p_gpio_drvstr_t)0x10)
-#define S5P_GPIO_DRVSTR_LV4    ((__force s5p_gpio_drvstr_t)0x11)
+#define S5P_GPIO_DRVSTR_LV1    ((__force s5p_gpio_drvstr_t)0x0)
+#define S5P_GPIO_DRVSTR_LV2    ((__force s5p_gpio_drvstr_t)0x2)
+#define S5P_GPIO_DRVSTR_LV3    ((__force s5p_gpio_drvstr_t)0x1)
+#define S5P_GPIO_DRVSTR_LV4    ((__force s5p_gpio_drvstr_t)0x3)
 
 /**
  * s5c_gpio_get_drvstr() - get the driver streght value of a gpio pin
index 48cbdcb6bbd4288929f31bef94da3691b160a489..55590a4d87c932984404d1df13ca4c296c9d7117 100644 (file)
@@ -12,7 +12,7 @@
 #
 #   http://www.arm.linux.org.uk/developer/machines/?action=new
 #
-# Last update: Mon Jul 12 21:10:14 2010
+# Last update: Thu Sep 9 22:43:01 2010
 #
 # machine_is_xxx       CONFIG_xxxx             MACH_TYPE_xxx           number
 #
@@ -2622,7 +2622,7 @@ kraken                    MACH_KRAKEN             KRAKEN                  2634
 gw2388                 MACH_GW2388             GW2388                  2635
 jadecpu                        MACH_JADECPU            JADECPU                 2636
 carlisle               MACH_CARLISLE           CARLISLE                2637
-lux_sf9                        MACH_LUX_SFT9           LUX_SFT9                2638
+lux_sf9                        MACH_LUX_SF9            LUX_SF9                 2638
 nemid_tb               MACH_NEMID_TB           NEMID_TB                2639
 terrier                        MACH_TERRIER            TERRIER                 2640
 turbot                 MACH_TURBOT             TURBOT                  2641
@@ -2950,3 +2950,97 @@ davinci_dm365_dvr        MACH_DAVINCI_DM365_DVR  DAVINCI_DM365_DVR       2963
 netviz                 MACH_NETVIZ             NETVIZ                  2964
 flexibity              MACH_FLEXIBITY          FLEXIBITY               2965
 wlan_computer          MACH_WLAN_COMPUTER      WLAN_COMPUTER           2966
+lpc24xx                        MACH_LPC24XX            LPC24XX                 2967
+spica                  MACH_SPICA              SPICA                   2968
+gpsdisplay             MACH_GPSDISPLAY         GPSDISPLAY              2969
+bipnet                 MACH_BIPNET             BIPNET                  2970
+overo_ctu_inertial     MACH_OVERO_CTU_INERTIAL OVERO_CTU_INERTIAL      2971
+davinci_dm355_mmm      MACH_DAVINCI_DM355_MMM  DAVINCI_DM355_MMM       2972
+pc9260_v2              MACH_PC9260_V2          PC9260_V2               2973
+ptx7545                        MACH_PTX7545            PTX7545                 2974
+tm_efdc                        MACH_TM_EFDC            TM_EFDC                 2975
+omap3_waldo1           MACH_OMAP3_WALDO1       OMAP3_WALDO1            2977
+flyer                  MACH_FLYER              FLYER                   2978
+tornado3240            MACH_TORNADO3240        TORNADO3240             2979
+soli_01                        MACH_SOLI_01            SOLI_01                 2980
+omapl138_europalc      MACH_OMAPL138_EUROPALC  OMAPL138_EUROPALC       2981
+helios_v1              MACH_HELIOS_V1          HELIOS_V1               2982
+netspace_lite_v2       MACH_NETSPACE_LITE_V2   NETSPACE_LITE_V2        2983
+ssc                    MACH_SSC                SSC                     2984
+premierwave_en         MACH_PREMIERWAVE_EN     PREMIERWAVE_EN          2985
+wasabi                 MACH_WASABI             WASABI                  2986
+vivow                  MACH_VIVOW              VIVOW                   2987
+mx50_rdp               MACH_MX50_RDP           MX50_RDP                2988
+universal              MACH_UNIVERSAL          UNIVERSAL               2989
+real6410               MACH_REAL6410           REAL6410                2990
+spx_sakura             MACH_SPX_SAKURA         SPX_SAKURA              2991
+ij3k_2440              MACH_IJ3K_2440          IJ3K_2440               2992
+omap3_bc10             MACH_OMAP3_BC10         OMAP3_BC10              2993
+thebe                  MACH_THEBE              THEBE                   2994
+rv082                  MACH_RV082              RV082                   2995
+armlguest              MACH_ARMLGUEST          ARMLGUEST               2996
+tjinc1000              MACH_TJINC1000          TJINC1000               2997
+dockstar               MACH_DOCKSTAR           DOCKSTAR                2998
+ax8008                 MACH_AX8008             AX8008                  2999
+gnet_sgce              MACH_GNET_SGCE          GNET_SGCE               3000
+pxwnas_500_1000                MACH_PXWNAS_500_1000    PXWNAS_500_1000         3001
+ea20                   MACH_EA20               EA20                    3002
+awm2                   MACH_AWM2               AWM2                    3003
+ti8148evm              MACH_TI8148EVM          TI8148EVM               3004
+tegra_seaboard         MACH_TEGRA_SEABOARD     TEGRA_SEABOARD          3005
+linkstation_chlv2      MACH_LINKSTATION_CHLV2  LINKSTATION_CHLV2       3006
+tera_pro2_rack         MACH_TERA_PRO2_RACK     TERA_PRO2_RACK          3007
+rubys                  MACH_RUBYS              RUBYS                   3008
+aquarius               MACH_AQUARIUS           AQUARIUS                3009
+mx53_ard               MACH_MX53_ARD           MX53_ARD                3010
+mx53_smd               MACH_MX53_SMD           MX53_SMD                3011
+lswxl                  MACH_LSWXL              LSWXL                   3012
+dove_avng_v3           MACH_DOVE_AVNG_V3       DOVE_AVNG_V3            3013
+sdi_ess_9263           MACH_SDI_ESS_9263       SDI_ESS_9263            3014
+jocpu550               MACH_JOCPU550           JOCPU550                3015
+msm8x60_rumi3          MACH_MSM8X60_RUMI3      MSM8X60_RUMI3           3016
+msm8x60_ffa            MACH_MSM8X60_FFA        MSM8X60_FFA             3017
+yanomami               MACH_YANOMAMI           YANOMAMI                3018
+gta04                  MACH_GTA04              GTA04                   3019
+cm_a510                        MACH_CM_A510            CM_A510                 3020
+omap3_rfs200           MACH_OMAP3_RFS200       OMAP3_RFS200            3021
+kx33xx                 MACH_KX33XX             KX33XX                  3022
+ptx7510                        MACH_PTX7510            PTX7510                 3023
+top9000                        MACH_TOP9000            TOP9000                 3024
+teenote                        MACH_TEENOTE            TEENOTE                 3025
+ts3                    MACH_TS3                TS3                     3026
+a0                     MACH_A0                 A0                      3027
+fsm9xxx_surf           MACH_FSM9XXX_SURF       FSM9XXX_SURF            3028
+fsm9xxx_ffa            MACH_FSM9XXX_FFA        FSM9XXX_FFA             3029
+frrhwcdma60w           MACH_FRRHWCDMA60W       FRRHWCDMA60W            3030
+remus                  MACH_REMUS              REMUS                   3031
+at91cap7xdk            MACH_AT91CAP7XDK        AT91CAP7XDK             3032
+at91cap7stk            MACH_AT91CAP7STK        AT91CAP7STK             3033
+kt_sbc_sam9_1          MACH_KT_SBC_SAM9_1      KT_SBC_SAM9_1           3034
+oratisrouter           MACH_ORATISROUTER       ORATISROUTER            3035
+armada_xp_db           MACH_ARMADA_XP_DB       ARMADA_XP_DB            3036
+spdm                   MACH_SPDM               SPDM                    3037
+gtib                   MACH_GTIB               GTIB                    3038
+dgm3240                        MACH_DGM3240            DGM3240                 3039
+atlas_i_lpe            MACH_ATLAS_I_LPE        ATLAS_I_LPE             3040
+htcmega                        MACH_HTCMEGA            HTCMEGA                 3041
+tricorder              MACH_TRICORDER          TRICORDER               3042
+tx28                   MACH_TX28               TX28                    3043
+bstbrd                 MACH_BSTBRD             BSTBRD                  3044
+pwb3090                        MACH_PWB3090            PWB3090                 3045
+idea6410               MACH_IDEA6410           IDEA6410                3046
+qbc9263                        MACH_QBC9263            QBC9263                 3047
+borabora               MACH_BORABORA           BORABORA                3048
+valdez                 MACH_VALDEZ             VALDEZ                  3049
+ls9g20                 MACH_LS9G20             LS9G20                  3050
+mios_v1                        MACH_MIOS_V1            MIOS_V1                 3051
+s5pc110_crespo         MACH_S5PC110_CRESPO     S5PC110_CRESPO          3052
+controltek9g20         MACH_CONTROLTEK9G20     CONTROLTEK9G20          3053
+tin307                 MACH_TIN307             TIN307                  3054
+tin510                 MACH_TIN510             TIN510                  3055
+bluecheese             MACH_BLUECHEESE         BLUECHEESE              3057
+tem3x30                        MACH_TEM3X30            TEM3X30                 3058
+harvest_desoto         MACH_HARVEST_DESOTO     HARVEST_DESOTO          3059
+msm8x60_qrdc           MACH_MSM8X60_QRDC       MSM8X60_QRDC            3060
+spear900               MACH_SPEAR900           SPEAR900                3061
+pcontrol_g20           MACH_PCONTROL_G20       PCONTROL_G20            3062
index 98f94d041d9c1dd212a0519efac60def8034b7db..a727f54d64d6e633d58ae2836bbbfae4c82f0017 100644 (file)
@@ -314,10 +314,9 @@ int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
        vfree(module->arch.syminfo);
        module->arch.syminfo = NULL;
 
-       return module_bug_finalize(hdr, sechdrs, module);
+       return 0;
 }
 
 void module_arch_cleanup(struct module *module)
 {
-       module_bug_cleanup(module);
 }
index 16399bd249930c1a9e2e06f7d0da247b288b6d34..0f2417df63230fb90114d96e7cb5f583d48fab34 100644 (file)
@@ -7,6 +7,7 @@ config FRV
        default y
        select HAVE_IDE
        select HAVE_ARCH_TRACEHOOK
+       select HAVE_IRQ_WORK
        select HAVE_PERF_EVENTS
 
 config ZONE_DMA
index 0974c0ecc594817ee9d8067aeba0eee7e3379dbf..bab01298b58ee2873fd7ef85de9230ac7af354f7 100644 (file)
@@ -121,6 +121,9 @@ static int restore_sigcontext(struct sigcontext __user *sc, int *_gr8)
        struct user_context *user = current->thread.user;
        unsigned long tbr, psr;
 
+       /* Always make any pending restarted system calls return -EINTR */
+       current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
        tbr = user->i.tbr;
        psr = user->i.psr;
        if (copy_from_user(user, &sc->sc_context, sizeof(sc->sc_context)))
@@ -250,6 +253,8 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set)
        struct sigframe __user *frame;
        int rsig;
 
+       set_fs(USER_DS);
+
        frame = get_sigframe(ka, sizeof(*frame));
 
        if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
@@ -293,22 +298,23 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set)
                                   (unsigned long) (frame->retcode + 2));
        }
 
-       /* set up registers for signal handler */
-       __frame->sp   = (unsigned long) frame;
-       __frame->lr   = (unsigned long) &frame->retcode;
-       __frame->gr8  = sig;
-
+       /* Set up registers for the signal handler */
        if (current->personality & FDPIC_FUNCPTRS) {
                struct fdpic_func_descriptor __user *funcptr =
                        (struct fdpic_func_descriptor __user *) ka->sa.sa_handler;
-               __get_user(__frame->pc, &funcptr->text);
-               __get_user(__frame->gr15, &funcptr->GOT);
+               struct fdpic_func_descriptor desc;
+               if (copy_from_user(&desc, funcptr, sizeof(desc)))
+                       goto give_sigsegv;
+               __frame->pc = desc.text;
+               __frame->gr15 = desc.GOT;
        } else {
                __frame->pc   = (unsigned long) ka->sa.sa_handler;
                __frame->gr15 = 0;
        }
 
-       set_fs(USER_DS);
+       __frame->sp   = (unsigned long) frame;
+       __frame->lr   = (unsigned long) &frame->retcode;
+       __frame->gr8  = sig;
 
        /* the tracer may want to single-step inside the handler */
        if (test_thread_flag(TIF_SINGLESTEP))
@@ -323,7 +329,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set)
        return 0;
 
 give_sigsegv:
-       force_sig(SIGSEGV, current);
+       force_sigsegv(sig, current);
        return -EFAULT;
 
 } /* end setup_frame() */
@@ -338,6 +344,8 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        struct rt_sigframe __user *frame;
        int rsig;
 
+       set_fs(USER_DS);
+
        frame = get_sigframe(ka, sizeof(*frame));
 
        if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
@@ -392,22 +400,23 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        }
 
        /* Set up registers for signal handler */
-       __frame->sp  = (unsigned long) frame;
-       __frame->lr   = (unsigned long) &frame->retcode;
-       __frame->gr8 = sig;
-       __frame->gr9 = (unsigned long) &frame->info;
-
        if (current->personality & FDPIC_FUNCPTRS) {
                struct fdpic_func_descriptor __user *funcptr =
                        (struct fdpic_func_descriptor __user *) ka->sa.sa_handler;
-               __get_user(__frame->pc, &funcptr->text);
-               __get_user(__frame->gr15, &funcptr->GOT);
+               struct fdpic_func_descriptor desc;
+               if (copy_from_user(&desc, funcptr, sizeof(desc)))
+                       goto give_sigsegv;
+               __frame->pc = desc.text;
+               __frame->gr15 = desc.GOT;
        } else {
                __frame->pc   = (unsigned long) ka->sa.sa_handler;
                __frame->gr15 = 0;
        }
 
-       set_fs(USER_DS);
+       __frame->sp  = (unsigned long) frame;
+       __frame->lr  = (unsigned long) &frame->retcode;
+       __frame->gr8 = sig;
+       __frame->gr9 = (unsigned long) &frame->info;
 
        /* the tracer may want to single-step inside the handler */
        if (test_thread_flag(TIF_SINGLESTEP))
@@ -422,7 +431,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        return 0;
 
 give_sigsegv:
-       force_sig(SIGSEGV, current);
+       force_sigsegv(sig, current);
        return -EFAULT;
 
 } /* end setup_rt_frame() */
@@ -437,7 +446,7 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
        int ret;
 
        /* Are we from a system call? */
-       if (in_syscall(__frame)) {
+       if (__frame->syscallno != -1) {
                /* If so, check system call restarting.. */
                switch (__frame->gr8) {
                case -ERESTART_RESTARTBLOCK:
@@ -456,6 +465,7 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
                        __frame->gr8 = __frame->orig_gr8;
                        __frame->pc -= 4;
                }
+               __frame->syscallno = -1;
        }
 
        /* Set up the stack frame */
@@ -538,10 +548,11 @@ no_signal:
                        break;
 
                case -ERESTART_RESTARTBLOCK:
-                       __frame->gr8 = __NR_restart_syscall;
+                       __frame->gr7 = __NR_restart_syscall;
                        __frame->pc -= 4;
                        break;
                }
+               __frame->syscallno = -1;
        }
 
        /* if there's no signal to deliver, we just put the saved sigmask
index f4709756d0d9da2046c89fb12d5af54aa032492f..4ff2fb1e6b1694848eb688700e8be330a41a9c51 100644 (file)
@@ -5,4 +5,4 @@
 lib-y := \
        __ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \
        checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \
-       outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o perf_event.o
+       outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o
index 0865e291c20d2948c95edc70f52925121409599d..db4953dc4e1b445adbdd7e004a68890a5c1c07a6 100644 (file)
@@ -112,10 +112,9 @@ int module_finalize(const Elf_Ehdr *hdr,
                    const Elf_Shdr *sechdrs,
                    struct module *me)
 {
-       return module_bug_finalize(hdr, sechdrs, me);
+       return 0;
 }
 
 void module_arch_cleanup(struct module *mod)
 {
-       module_bug_cleanup(mod);
 }
index f90edc85b50933a9df52391d041ad9f8b1a8ee66..9301a2821615b66cacd781eeba3e53d57e60fe75 100644 (file)
@@ -199,7 +199,7 @@ ptr_to_compat(void __user *uptr)
 }
 
 static __inline__ void __user *
-compat_alloc_user_space (long len)
+arch_compat_alloc_user_space (long len)
 {
        struct pt_regs *regs = task_pt_regs(current);
        return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len);
index d514cd9edb49f45c9ac5871d407ec7ff65b0927e..8fb7d33a661f9e6de23ec01be699e4300ad4f363 100644 (file)
@@ -6,12 +6,6 @@
  *     David Mosberger-Tang <davidm@hpl.hp.com>
  */
 
-
-#include <linux/threads.h>
-#include <linux/irq.h>
-
-#include <asm/processor.h>
-
 /*
  * No irq_cpustat_t for IA-64.  The data is held in the per-CPU data structure.
  */
 
 #define local_softirq_pending()                (local_cpu_data->softirq_pending)
 
+#include <linux/threads.h>
+#include <linux/irq.h>
+
+#include <asm/processor.h>
+
 extern void __iomem *ipi_base_addr;
 
 void ack_bad_irq(unsigned int irq);
index 9f342a574ce8c669310b6857b5b1ac7152f8bae0..dd028f2b13b39be8d2510d87d9531743b76a481f 100644 (file)
@@ -272,10 +272,6 @@ void cpu_idle_wait(void);
 
 void default_idle(void);
 
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
-extern void account_system_vtime(struct task_struct *);
-#endif
-
 #endif /* __KERNEL__ */
 
 #endif /* __ASSEMBLY__ */
index 3567d54f8cee7533ecba41847c5f9957d9481296..331d42bda77ae97f457b13f970aa83c40e11d4b0 100644 (file)
@@ -420,22 +420,31 @@ EX(.fail_efault, ld8 r14=[r33])                   // r14 <- *set
        ;;
 
        RSM_PSR_I(p0, r18, r19)                 // mask interrupt delivery
-       mov ar.ccv=0
        andcm r14=r14,r17                       // filter out SIGKILL & SIGSTOP
+       mov r8=EINVAL                   // default to EINVAL
 
 #ifdef CONFIG_SMP
-       mov r17=1
+       // __ticket_spin_trylock(r31)
+       ld4 r17=[r31]
        ;;
-       cmpxchg4.acq r18=[r31],r17,ar.ccv       // try to acquire the lock
-       mov r8=EINVAL                   // default to EINVAL
+       mov.m ar.ccv=r17
+       extr.u r9=r17,17,15
+       adds r19=1,r17
+       extr.u r18=r17,0,15
+       ;;
+       cmp.eq p6,p7=r9,r18
        ;;
+(p6)   cmpxchg4.acq r9=[r31],r19,ar.ccv
+(p6)   dep.z r20=r19,1,15              // next serving ticket for unlock
+(p7)   br.cond.spnt.many .lock_contention
+       ;;
+       cmp4.eq p0,p7=r9,r17
+       adds r31=2,r31
+(p7)   br.cond.spnt.many .lock_contention
        ld8 r3=[r2]                     // re-read current->blocked now that we hold the lock
-       cmp4.ne p6,p0=r18,r0
-(p6)   br.cond.spnt.many .lock_contention
        ;;
 #else
        ld8 r3=[r2]                     // re-read current->blocked now that we hold the lock
-       mov r8=EINVAL                   // default to EINVAL
 #endif
        add r18=IA64_TASK_PENDING_OFFSET+IA64_SIGPENDING_SIGNAL_OFFSET,r16
        add r19=IA64_TASK_SIGNAL_OFFSET,r16
@@ -490,7 +499,9 @@ EX(.fail_efault, ld8 r14=[r33])                     // r14 <- *set
 (p6)   br.cond.spnt.few 1b                     // yes -> retry
 
 #ifdef CONFIG_SMP
-       st4.rel [r31]=r0                        // release the lock
+       // __ticket_spin_unlock(r31)
+       st2.rel [r31]=r20
+       mov r20=0                                       // i must not leak kernel bits...
 #endif
        SSM_PSR_I(p0, p9, r31)
        ;;
@@ -512,7 +523,8 @@ EX(.fail_efault, (p15) st8 [r34]=r3)
 
 .sig_pending:
 #ifdef CONFIG_SMP
-       st4.rel [r31]=r0                        // release the lock
+       // __ticket_spin_unlock(r31)
+       st2.rel [r31]=r20                       // release the lock
 #endif
        SSM_PSR_I(p0, p9, r17)
        ;;
index 2f85412ef7302a0aedaf1b5e26f63719c69dc033..b8da7d0574d20635f489315ea8caf957d063be08 100644 (file)
@@ -82,9 +82,9 @@ typedef elf_fpreg_t elf_fpregset_t;
  * These are used to set parameters in the core dumps.
  */
 #define ELF_CLASS      ELFCLASS32
-#if defined(__LITTLE_ENDIAN)
+#if defined(__LITTLE_ENDIAN__)
 #define ELF_DATA       ELFDATA2LSB
-#elif defined(__BIG_ENDIAN)
+#elif defined(__BIG_ENDIAN__)
 #define ELF_DATA       ELFDATA2MSB
 #else
 #error no endian defined
index 9c1acb2b1a928984c8f62ce4a5cd91a25329dc21..b2eeb0de1c8d337a6d7ab5abef363b4797ab7e9a 100644 (file)
@@ -157,7 +157,6 @@ typedef struct sigaltstack {
 #undef __HAVE_ARCH_SIG_BITOPS
 
 struct pt_regs;
-extern int do_signal(struct pt_regs *regs, sigset_t *oldset);
 
 #define ptrace_signal_deliver(regs, cookie)    do { } while (0)
 
index 76125777483ccda07d9d31d55c2f8f3b3201ea05..c70545689da83ef2ffaef987b2c3374fadca04fc 100644 (file)
 #define __ARCH_WANT_SYS_OLD_GETRLIMIT /*will be unused*/
 #define __ARCH_WANT_SYS_OLDUMOUNT
 #define __ARCH_WANT_SYS_RT_SIGACTION
+#define __ARCH_WANT_SYS_RT_SIGSUSPEND
 
 #define __IGNORE_lchown
 #define __IGNORE_setuid
diff --git a/arch/m32r/kernel/.gitignore b/arch/m32r/kernel/.gitignore
new file mode 100644 (file)
index 0000000..c5f676c
--- /dev/null
@@ -0,0 +1 @@
+vmlinux.lds
index 403869833b98fe6c95fd360d80cd762f3a5008e2..225412bc227e690bcb313743dd16f58fca5c4115 100644 (file)
@@ -235,10 +235,9 @@ work_resched:
 work_notifysig:                                ; deal with pending signals and
                                        ; notify-resume requests
        mv      r0, sp                  ; arg1 : struct pt_regs *regs
-       ldi     r1, #0                  ; arg2 : sigset_t *oldset
-       mv      r2, r9                  ; arg3 : __u32 thread_info_flags
+       mv      r1, r9                  ; arg2 : __u32 thread_info_flags
        bl      do_notify_resume
-       bra     restore_all
+       bra     resume_userspace
 
        ; perform syscall exit tracing
        ALIGN
index e555091eb97cbcf8bbe261851be3b72e90e6075e..0021ade4cba8c86bf1d2fd348b283d8cac591955 100644 (file)
@@ -592,16 +592,17 @@ void user_enable_single_step(struct task_struct *child)
 
        if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
            != sizeof(insn))
-               break;
+               return -EIO;
 
        compute_next_pc(insn, pc, &next_pc, child);
        if (next_pc & 0x80000000)
-               break;
+               return -EIO;
 
        if (embed_debug_trap(child, next_pc))
-               break;
+               return -EIO;
 
        invalidate_cache();
+       return 0;
 }
 
 void user_disable_single_step(struct task_struct *child)
index 144b0f124fc72f08b20f93336f96da81327fe61c..a08697f0886d7988b012fa727c777d8bbed7b3fb 100644 (file)
 
 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
 
-int do_signal(struct pt_regs *, sigset_t *);
-
-asmlinkage int
-sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize,
-                 unsigned long r2, unsigned long r3, unsigned long r4,
-                 unsigned long r5, unsigned long r6, struct pt_regs *regs)
-{
-       sigset_t newset;
-
-       /* XXX: Don't preclude handling different sized sigset_t's.  */
-       if (sigsetsize != sizeof(sigset_t))
-               return -EINVAL;
-
-       if (copy_from_user(&newset, unewset, sizeof(newset)))
-               return -EFAULT;
-       sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
-
-       spin_lock_irq(&current->sighand->siglock);
-       current->saved_sigmask = current->blocked;
-       current->blocked = newset;
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-
-       current->state = TASK_INTERRUPTIBLE;
-       schedule();
-       set_thread_flag(TIF_RESTORE_SIGMASK);
-       return -ERESTARTNOHAND;
-}
-
 asmlinkage int
 sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
                unsigned long r2, unsigned long r3, unsigned long r4,
@@ -218,7 +189,7 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
        return (void __user *)((sp - frame_size) & -8ul);
 }
 
-static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
                           sigset_t *set, struct pt_regs *regs)
 {
        struct rt_sigframe __user *frame;
@@ -275,22 +246,34 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
                current->comm, current->pid, frame, regs->pc);
 #endif
 
-       return;
+       return 0;
 
 give_sigsegv:
        force_sigsegv(sig, current);
+       return -EFAULT;
+}
+
+static int prev_insn(struct pt_regs *regs)
+{
+       u16 inst;
+       if (get_user(inst, (u16 __user *)(regs->bpc - 2)))
+               return -EFAULT;
+       if ((inst & 0xfff0) == 0x10f0)  /* trap ? */
+               regs->bpc -= 2;
+       else
+               regs->bpc -= 4;
+       regs->syscall_nr = -1;
+       return 0;
 }
 
 /*
  * OK, we're invoking a handler
  */
 
-static void
+static int
 handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
              sigset_t *oldset, struct pt_regs *regs)
 {
-       unsigned short inst;
-
        /* Are we from a system call? */
        if (regs->syscall_nr >= 0) {
                /* If so, check system call restarting.. */
@@ -308,16 +291,14 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
                        /* fallthrough */
                        case -ERESTARTNOINTR:
                                regs->r0 = regs->orig_r0;
-                               inst = *(unsigned short *)(regs->bpc - 2);
-                               if ((inst & 0xfff0) == 0x10f0)  /* trap ? */
-                                       regs->bpc -= 2;
-                               else
-                                       regs->bpc -= 4;
+                               if (prev_insn(regs) < 0)
+                                       return -EFAULT;
                }
        }
 
        /* Set up the stack frame */
-       setup_rt_frame(sig, ka, info, oldset, regs);
+       if (setup_rt_frame(sig, ka, info, oldset, regs))
+               return -EFAULT;
 
        spin_lock_irq(&current->sighand->siglock);
        sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
@@ -325,6 +306,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
                sigaddset(&current->blocked,sig);
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
+       return 0;
 }
 
 /*
@@ -332,12 +314,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
  * want to handle. Thus you cannot kill init even with a SIGKILL even by
  * mistake.
  */
-int do_signal(struct pt_regs *regs, sigset_t *oldset)
+static void do_signal(struct pt_regs *regs)
 {
        siginfo_t info;
        int signr;
        struct k_sigaction ka;
-       unsigned short inst;
+       sigset_t *oldset;
 
        /*
         * We want the common case to go fast, which
@@ -346,12 +328,14 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
         * if so.
         */
        if (!user_mode(regs))
-               return 1;
+               return;
 
        if (try_to_freeze()) 
                goto no_signal;
 
-       if (!oldset)
+       if (test_thread_flag(TIF_RESTORE_SIGMASK))
+               oldset = &current->saved_sigmask;
+       else
                oldset = &current->blocked;
 
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
@@ -363,8 +347,10 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
                 */
 
                /* Whee!  Actually deliver the signal.  */
-               handle_signal(signr, &ka, &info, oldset, regs);
-               return 1;
+               if (handle_signal(signr, &ka, &info, oldset, regs) == 0)
+                       clear_thread_flag(TIF_RESTORE_SIGMASK);
+
+               return;
        }
 
  no_signal:
@@ -375,31 +361,24 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
                    regs->r0 == -ERESTARTSYS ||
                    regs->r0 == -ERESTARTNOINTR) {
                        regs->r0 = regs->orig_r0;
-                       inst = *(unsigned short *)(regs->bpc - 2);
-                       if ((inst & 0xfff0) == 0x10f0)  /* trap ? */
-                               regs->bpc -= 2;
-                       else
-                               regs->bpc -= 4;
-               }
-               if (regs->r0 == -ERESTART_RESTARTBLOCK){
+                       prev_insn(regs);
+               } else if (regs->r0 == -ERESTART_RESTARTBLOCK){
                        regs->r0 = regs->orig_r0;
                        regs->r7 = __NR_restart_syscall;
-                       inst = *(unsigned short *)(regs->bpc - 2);
-                       if ((inst & 0xfff0) == 0x10f0)  /* trap ? */
-                               regs->bpc -= 2;
-                       else
-                               regs->bpc -= 4;
+                       prev_insn(regs);
                }
        }
-       return 0;
+       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+               clear_thread_flag(TIF_RESTORE_SIGMASK);
+               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+       }
 }
 
 /*
  * notification of userspace execution resumption
  * - triggered by current->work.notify_resume
  */
-void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
-                     __u32 thread_info_flags)
+void do_notify_resume(struct pt_regs *regs, __u32 thread_info_flags)
 {
        /* Pending single-step? */
        if (thread_info_flags & _TIF_SINGLESTEP)
@@ -407,7 +386,7 @@ void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
 
        /* deal with pending signal delivery */
        if (thread_info_flags & _TIF_SIGPENDING)
-               do_signal(regs,oldset);
+               do_signal(regs);
 
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
index 60b15d0aa07290bfd2494423ecc4c331c33c3032..b43b36beafe37aed63baf7714a92ba55a7cf2f1c 100644 (file)
 #define __NR_set_thread_area   334
 #define __NR_atomic_cmpxchg_32 335
 #define __NR_atomic_barrier    336
+#define __NR_fanotify_init     337
+#define __NR_fanotify_mark     338
+#define __NR_prlimit64         339
 
 #ifdef __KERNEL__
 
-#define NR_syscalls            337
+#define NR_syscalls            340
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
index 2391bdff09962e7a51d36c7842290ea90932ca54..6360c437dcf51c5491e192151b95ce6c1a6923a2 100644 (file)
@@ -765,4 +765,7 @@ sys_call_table:
        .long sys_set_thread_area
        .long sys_atomic_cmpxchg_32     /* 335 */
        .long sys_atomic_barrier
+       .long sys_fanotify_init
+       .long sys_fanotify_mark
+       .long sys_prlimit64
 
index 8f0640847ad2bf7bf99d0a184ed10ce8272a84f8..05285d08e54767a71a814773c23a506386f9626a 100644 (file)
@@ -162,7 +162,7 @@ static void mac_init_asc( void )
 void mac_mksound( unsigned int freq, unsigned int length )
 {
        __u32 cfreq = ( freq << 5 ) / 468;
-       __u32 flags;
+       unsigned long flags;
        int i;
 
        if ( mac_special_bell == NULL )
@@ -224,7 +224,7 @@ static void mac_nosound( unsigned long ignored )
  */
 static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsigned int volume )
 {
-       __u32 flags;
+       unsigned long flags;
 
        /* if the bell is already ringing, ring longer */
        if ( mac_bell_duration > 0 )
@@ -271,7 +271,7 @@ static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsig
 static void mac_quadra_ring_bell( unsigned long ignored )
 {
        int     i, count = mac_asc_samplespersec / HZ;
-       __u32 flags;
+       unsigned long flags;
 
        /*
         * we neither want a sound buffer overflow nor underflow, so we need to match
index b30b3eb197a5d2c157d6e96f8d60de45395d4a21..79b1ed198c070dd40dbb3cf4f03dd6f24b79cb71 100644 (file)
@@ -355,6 +355,9 @@ ENTRY(sys_call_table)
        .long sys_set_thread_area
        .long sys_atomic_cmpxchg_32     /* 335 */
        .long sys_atomic_barrier
+       .long sys_fanotify_init
+       .long sys_fanotify_mark
+       .long sys_prlimit64
 
        .rept NR_syscalls-(.-sys_call_table)/4
                .long sys_ni_syscall
index e322d65f33a41e0085e5d352410ca0b5c3f37e26..7dd65cfae83759562e43ab20bb03be071ae56ce5 100644 (file)
@@ -7,6 +7,10 @@ subdir-ccflags-y := -Werror
 include arch/mips/Kbuild.platforms
 obj-y := $(platform-y)
 
+# make clean traverses $(obj-) without having included .config, so
+# everything ends up here
+obj- := $(platform-)
+
 # mips object files
 # The object files are linked as core-y files would be linked
 
index 3ad59dde485209bce858c425e4fc8a2f64b04c09..4c9f402295dd3d9b11548ab47d26853761df62f1 100644 (file)
@@ -13,6 +13,7 @@ config MIPS
        select HAVE_KPROBES
        select HAVE_KRETPROBES
        select RTC_LIB if !MACH_LOONGSON
+       select GENERIC_ATOMIC64 if !64BIT
 
 mainmenu "Linux/MIPS Kernel Configuration"
 
@@ -880,11 +881,15 @@ config NO_IOPORT
 config GENERIC_ISA_DMA
        bool
        select ZONE_DMA if GENERIC_ISA_DMA_SUPPORT_BROKEN=n
+       select ISA_DMA_API
 
 config GENERIC_ISA_DMA_SUPPORT_BROKEN
        bool
        select GENERIC_ISA_DMA
 
+config ISA_DMA_API
+       bool
+
 config GENERIC_GPIO
        bool
 
@@ -1646,8 +1651,16 @@ config MIPS_MT_SMP
        select SYS_SUPPORTS_SMP
        select SMP_UP
        help
-         This is a kernel model which is also known a VSMP or lately
-         has been marketesed into SMVP.
+         This is a kernel model which is known a VSMP but lately has been
+         marketesed into SMVP.
+         Virtual SMP uses the processor's VPEs  to implement virtual
+         processors. In currently available configuration of the 34K processor
+         this allows for a dual processor. Both processors will share the same
+         primary caches; each will obtain the half of the TLB for it's own
+         exclusive use. For a layman this model can be described as similar to
+         what Intel calls Hyperthreading.
+
+         For further information see http://www.linux-mips.org/wiki/34K#VSMP
 
 config MIPS_MT_SMTC
        bool "SMTC: Use all TCs on all VPEs for SMP"
@@ -1664,6 +1677,14 @@ config MIPS_MT_SMTC
        help
          This is a kernel model which is known a SMTC or lately has been
          marketesed into SMVP.
+         is presenting the available TC's of the core as processors to Linux.
+         On currently available 34K processors this means a Linux system will
+         see up to 5 processors. The implementation of the SMTC kernel differs
+         significantly from VSMP and cannot efficiently coexist in the same
+         kernel binary so the choice between VSMP and SMTC is a compile time
+         decision.
+
+         For further information see http://www.linux-mips.org/wiki/34K#SMTC
 
 endchoice
 
index c29511b11d44fd6732b0dd153d009dbd7051555d..5340210596297fa54c8723e866aebaeb8c20269e 100644 (file)
@@ -43,7 +43,7 @@ int prom_argc;
 char **prom_argv;
 char **prom_envp;
 
-void prom_init_cmdline(void)
+void __init prom_init_cmdline(void)
 {
        int i;
 
@@ -104,7 +104,7 @@ static inline void str2eaddr(unsigned char *ea, unsigned char *str)
        }
 }
 
-int prom_get_ethernet_addr(char *ethernet_addr)
+int __init prom_get_ethernet_addr(char *ethernet_addr)
 {
        char *ethaddr_str;
 
@@ -123,7 +123,6 @@ int prom_get_ethernet_addr(char *ethernet_addr)
 
        return 0;
 }
-EXPORT_SYMBOL(prom_get_ethernet_addr);
 
 void __init prom_free_prom_memory(void)
 {
index ed9bb709c9a3816a4738d0ceef91db8c8de20cd4..5042d51b0512a087b02b85853aad2d2b38d9d7d6 100644 (file)
@@ -59,7 +59,7 @@ $(obj)/piggy.o: $(obj)/dummy.o $(obj)/vmlinux.bin.z FORCE
 hostprogs-y := calc_vmlinuz_load_addr
 
 VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \
-               $(objtree)/$(KBUILD_IMAGE) $(VMLINUX_LOAD_ADDRESS))
+               $(obj)/vmlinux.bin $(VMLINUX_LOAD_ADDRESS))
 
 vmlinuzobjs-y += $(obj)/piggy.o
 
@@ -105,4 +105,4 @@ OBJCOPYFLAGS_vmlinuz.srec := $(OBJCOPYFLAGS) -S -O srec
 vmlinuz.srec: vmlinuz
        $(call cmd,objcopy)
 
-clean-files := $(objtree)/vmlinuz.*
+clean-files := $(objtree)/vmlinuz $(objtree)/vmlinuz.{32,ecoff,bin,srec}
index 094c17e38e163ab691b71bef78710f73848c7645..47323ca452dcbde751536c58c9f613dd1d9d51c8 100644 (file)
@@ -83,3 +83,7 @@ config ARCH_SPARSEMEM_ENABLE
        def_bool y
        select SPARSEMEM_STATIC
        depends on CPU_CAVIUM_OCTEON
+
+config CAVIUM_OCTEON_HELPER
+       def_bool y
+       depends on OCTEON_ETHERNET || PCI
index c664c8cc2b42cb8970b9f57531a03e2998075566..a5b427909b5cac04d28c4da1b099342ee72df4ce 100644 (file)
@@ -41,7 +41,7 @@ static int cnmips_cu2_call(struct notifier_block *nfb, unsigned long action,
        return NOTIFY_OK;               /* Let default notifier send signals */
 }
 
-static int cnmips_cu2_setup(void)
+static int __init cnmips_cu2_setup(void)
 {
        return cu2_notifier(cnmips_cu2_call, 0);
 }
index 2fd66db6939e0f981c338015127c69788d960843..7f41c5be2190ddca03fc92a00e8f21bd735414f5 100644 (file)
@@ -11,4 +11,4 @@
 
 obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o
 
-obj-$(CONFIG_PCI) += cvmx-helper-errata.o cvmx-helper-jtag.o
+obj-$(CONFIG_CAVIUM_OCTEON_HELPER) += cvmx-helper-errata.o cvmx-helper-jtag.o
index 3adbcbd95db1efd4dbb18fba91bc978153079d7f..cf55a6f4e720c4e831733ceab40d820fa136a2c8 100644 (file)
@@ -1,7 +1,7 @@
 #
 # DECstation family
 #
-platform-$(CONFIG_MACH_DECSTATION)     = dec/
+platform-$(CONFIG_MACH_DECSTATION)     += dec/
 cflags-$(CONFIG_MACH_DECSTATION)       += \
                        -I$(srctree)/arch/mips/include/asm/mach-dec
 libs-$(CONFIG_MACH_DECSTATION)         += arch/mips/dec/prom/
index c63c56bfd18461b558e0cba7380d6297fc47115e..47d87da379f947c8a578f2a2b84e7da804f559a8 100644 (file)
@@ -782,6 +782,10 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
  */
 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
 
+#else /* !CONFIG_64BIT */
+
+#include <asm-generic/atomic64.h>
+
 #endif /* CONFIG_64BIT */
 
 /*
index 613f6912dfc1d024b2aa6b28fb4215ba8b251552..dbc51065df5b3fe53611ff15e9cc259188ff0dbc 100644 (file)
@@ -145,7 +145,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
        return (u32)(unsigned long)uptr;
 }
 
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
 {
        struct pt_regs *regs = (struct pt_regs *)
                ((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1;
index 2cb2f0c2c4f89342ae5256a79d8082f01af4d319..3532e2c5f098ae46a4a79f7455cd004d699dd4a9 100644 (file)
@@ -24,7 +24,7 @@ extern int cu2_notifier_call_chain(unsigned long val, void *v);
 
 #define cu2_notifier(fn, pri)                                          \
 ({                                                                     \
-       static struct notifier_block fn##_nb __cpuinitdata = {          \
+       static struct notifier_block fn##_nb = {                        \
                .notifier_call = fn,                                    \
                .priority = pri                                         \
        };                                                              \
index e482fe90fe8850609ed05f79d4caa9423e31031a..75eddedcfc3ee31a5ba500089fb92215311f51eb 100644 (file)
@@ -56,6 +56,7 @@
  */
 
 #ifdef CONFIG_32BIT
+#include <linux/types.h>
 
 struct flock {
        short   l_type;
index 9b9436a4d816bfe7f26a1eb8a7e7cb677507c388..86548da650e765f79db345f4d3964a5d0eb6c0fe 100644 (file)
@@ -321,6 +321,7 @@ struct gic_intrmask_regs {
  */
 struct gic_intr_map {
        unsigned int cpunum;    /* Directed to this CPU */
+#define GIC_UNUSED             0xdead                  /* Dummy data */
        unsigned int pin;       /* Directed to this Pin */
        unsigned int polarity;  /* Polarity : +/-       */
        unsigned int trigtype;  /* Trigger  : Edge/Levl */
index b74caf65482b2e068b86d2763daa81733bc45503..ff9a8b86cb9363c1fb458546c56550fc35a7a3ab 100644 (file)
@@ -1,6 +1,6 @@
 #ifndef __ASM_MACH_TX49XX_KMALLOC_H
 #define __ASM_MACH_TX49XX_KMALLOC_H
 
-#define ARCH_KMALLOC_MINALIGN  L1_CACHE_BYTES
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
 
 #endif /* __ASM_MACH_TX49XX_KMALLOC_H */
index cea872fc6f5c0d1ae92f00f920f7f991c8b9b806..d11aa02a956a57ca41ff890dbe16bbdd0145db53 100644 (file)
@@ -88,9 +88,6 @@
 
 #define GIC_EXT_INTR(x)                x
 
-/* Dummy data */
-#define X                      0xdead
-
 /* External Interrupts used for IPI */
 #define GIC_IPI_EXT_INTR_RESCHED_VPE0  16
 #define GIC_IPI_EXT_INTR_CALLFNC_VPE0  17
index a16beafcea91dd091f0b491ea572b5902e272a3d..e59cd1ac09c2f82eb8c91af1bdb0b6a520513d89 100644 (file)
@@ -150,6 +150,20 @@ typedef struct { unsigned long pgprot; } pgprot_t;
     ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
 #endif
 #define __va(x)                ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
+
+/*
+ * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad
+ * (lmo) rsp. 8431fd094d625b94d364fe393076ccef88e6ce18 (kernel.org).  The
+ * discussion can be found in lkml posting
+ * <a2ebde260608230500o3407b108hc03debb9da6e62c@mail.gmail.com> which is
+ * archived at http://lists.linuxcoding.com/kernel/2006-q3/msg17360.html
+ *
+ * It is unclear if the misscompilations mentioned in
+ * http://lkml.org/lkml/2010/8/8/138 also affect MIPS so we keep this one
+ * until GCC 3.x has been retired before we can apply
+ * https://patchwork.linux-mips.org/patch/1541/
+ */
+
 #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
 
 #define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
index 96e28f18dad11fbc0dabf41e4fbaf25f1302f3b0..1ca64b4d33d96844da375d3df443246f781e564a 100644 (file)
@@ -88,6 +88,7 @@ typedef struct siginfo {
 #ifdef __ARCH_SI_TRAPNO
                        int _trapno;    /* TRAP # which caused the signal */
 #endif
+                       short _addr_lsb;
                } _sigfault;
 
                /* SIGPOLL, SIGXFSZ (To do ...)  */
index 2376f2e06e470a264eeff5115dc692c80e9f37c3..70df9c0d3c5be20e2d7b646460276a44374fc077 100644 (file)
@@ -146,7 +146,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
 #define _TIF_LOAD_WATCH                (1<<TIF_LOAD_WATCH)
 
 /* work to do on interrupt/exception return */
-#define _TIF_WORK_MASK         (0x0000ffef & ~_TIF_SECCOMP)
+#define _TIF_WORK_MASK         (0x0000ffef &                           \
+                                       ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT))
 /* work to do on any return to u-space */
 #define _TIF_ALLWORK_MASK      (0x8000ffff & ~_TIF_SECCOMP)
 
index baa318a59c97f8c2791842df809c1218796c7492..550725b881d5edec666a5b32bbe1200164ac7fd8 100644 (file)
 #define __NR_perf_event_open           (__NR_Linux + 333)
 #define __NR_accept4                   (__NR_Linux + 334)
 #define __NR_recvmmsg                  (__NR_Linux + 335)
+#define __NR_fanotify_init             (__NR_Linux + 336)
+#define __NR_fanotify_mark             (__NR_Linux + 337)
+#define __NR_prlimit64                 (__NR_Linux + 338)
 
 /*
  * Offset of the last Linux o32 flavoured syscall
  */
-#define __NR_Linux_syscalls            335
+#define __NR_Linux_syscalls            338
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 #define __NR_O32_Linux                 4000
-#define __NR_O32_Linux_syscalls                335
+#define __NR_O32_Linux_syscalls                338
 
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 
 #define __NR_perf_event_open           (__NR_Linux + 292)
 #define __NR_accept4                   (__NR_Linux + 293)
 #define __NR_recvmmsg                  (__NR_Linux + 294)
+#define __NR_fanotify_init             (__NR_Linux + 295)
+#define __NR_fanotify_mark             (__NR_Linux + 296)
+#define __NR_prlimit64                 (__NR_Linux + 297)
 
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  */
-#define __NR_Linux_syscalls            294
+#define __NR_Linux_syscalls            297
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_64_Linux                  5000
-#define __NR_64_Linux_syscalls         294
+#define __NR_64_Linux_syscalls         297
 
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 
 #define __NR_accept4                   (__NR_Linux + 297)
 #define __NR_recvmmsg                  (__NR_Linux + 298)
 #define __NR_getdents64                        (__NR_Linux + 299)
+#define __NR_fanotify_init             (__NR_Linux + 300)
+#define __NR_fanotify_mark             (__NR_Linux + 301)
+#define __NR_prlimit64                 (__NR_Linux + 302)
 
 /*
  * Offset of the last N32 flavoured syscall
  */
-#define __NR_Linux_syscalls            299
+#define __NR_Linux_syscalls            302
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
 #define __NR_N32_Linux                 6000
-#define __NR_N32_Linux_syscalls                299
+#define __NR_N32_Linux_syscalls                302
 
 #ifdef __KERNEL__
 
index 6a97230e3d05ee4a53478c2a4625c51d92b26bd2..ba91be9c21ef405f65e0ae87fafe92a66ac8b06f 100644 (file)
@@ -1,3 +1,3 @@
-core-$(CONFIG_MACH_JZ4740)     += arch/mips/jz4740/
+platform-$(CONFIG_MACH_JZ4740) += jz4740/
 cflags-$(CONFIG_MACH_JZ4740)   += -I$(srctree)/arch/mips/include/asm/mach-jz4740
 load-$(CONFIG_MACH_JZ4740)     += 0xffffffff80010000
index 0176ed015c895644bc72fc30bc661c2ad555b515..32103cc2a2576877592d91050b86c8944a2479d9 100644 (file)
@@ -40,7 +40,6 @@ int __compute_return_epc(struct pt_regs *regs)
                return -EFAULT;
        }
 
-       regs->regs[0] = 0;
        switch (insn.i_format.opcode) {
        /*
         * jr and jalr are in r_format format.
index b181f2f0ea8e71709f331c8ee32a7f6792999106..82ba9f62f49e3b2faa98abc1f6da2fe1e062a4ed 100644 (file)
@@ -7,7 +7,6 @@
 #include <asm/io.h>
 #include <asm/gic.h>
 #include <asm/gcmpregs.h>
-#include <asm/mips-boards/maltaint.h>
 #include <asm/irq.h>
 #include <linux/hardirq.h>
 #include <asm-generic/bitops/find.h>
@@ -131,7 +130,7 @@ static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
        int             i;
 
        irq -= _irqbase;
-       pr_debug(KERN_DEBUG "%s(%d) called\n", __func__, irq);
+       pr_debug("%s(%d) called\n", __func__, irq);
        cpumask_and(&tmp, cpumask, cpu_online_mask);
        if (cpus_empty(tmp))
                return -1;
@@ -222,7 +221,7 @@ static void __init gic_basic_init(int numintrs, int numvpes,
        /* Setup specifics */
        for (i = 0; i < mapsize; i++) {
                cpu = intrmap[i].cpunum;
-               if (cpu == X)
+               if (cpu == GIC_UNUSED)
                        continue;
                if (cpu == 0 && i != 0 && intrmap[i].flags == 0)
                        continue;
index 1f4e2fa64140ee8204aed74ecf82eba7bab056be..f4546e97c60db111215495f924aa567595c39581 100644 (file)
@@ -283,7 +283,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
        struct pt_regs *regs = args->regs;
        int trap = (regs->cp0_cause & 0x7c) >> 2;
 
-       /* Userpace events, ignore. */
+       /* Userspace events, ignore. */
        if (user_mode(regs))
                return NOTIFY_DONE;
 
index 80e2ba694babcd0d70bd8266a6be941996e2a8a8..29811f043399588604da9bbc00efd9f0997aa530 100644 (file)
@@ -251,7 +251,7 @@ void sp_work_handle_request(void)
                memset(&tz, 0, sizeof(tz));
                if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv,
                                             (int)&tz, 0, 0)) == 0)
-               ret.retval = tv.tv_sec;
+                       ret.retval = tv.tv_sec;
                break;
 
        case MTSP_SYSCALL_EXIT:
index c2dab140dc98fb1588259063699c7ba09b6f8ed7..6343b4a5b8350cb3a93edea5d75f3154cde48343 100644 (file)
@@ -341,3 +341,10 @@ asmlinkage long sys32_lookup_dcookie(u32 a0, u32 a1, char __user *buf,
 {
        return sys_lookup_dcookie(merge_64(a0, a1), buf, len);
 }
+
+SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags,
+               u64, a3, u64, a4, int, dfd, const char  __user *, pathname)
+{
+       return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4),
+                                dfd, pathname);
+}
index 2340f11dc29cc8de689593c8b49315b613a6aae8..9a526ba6f25766f3ab6b58bc70b7f792c4011ff9 100644 (file)
@@ -103,7 +103,7 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
        if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
                goto out_unlock;
 
-       retval = security_task_setscheduler(p, 0, NULL);
+       retval = security_task_setscheduler(p)
        if (retval)
                goto out_unlock;
 
index c51b95ff86443e2fcb1eed618afe0861fd781eef..c8777333e19833667fe882110fe40d954fee5eeb 100644 (file)
@@ -536,7 +536,7 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
 {
        /* do the secure computing check first */
        if (!entryexit)
-               secure_computing(regs->regs[0]);
+               secure_computing(regs->regs[2]);
 
        if (unlikely(current->audit_context) && entryexit)
                audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]),
@@ -565,7 +565,7 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
 
 out:
        if (unlikely(current->audit_context) && !entryexit)
-               audit_syscall_entry(audit_arch(), regs->regs[0],
+               audit_syscall_entry(audit_arch(), regs->regs[2],
                                    regs->regs[4], regs->regs[5],
                                    regs->regs[6], regs->regs[7]);
 }
index 17202bbe843f91172534dd30e41e8a9542e0d762..fbaabad0e6e28466aa1098d4f50c516d520b0c7d 100644 (file)
@@ -63,9 +63,9 @@ stack_done:
        sw      t0, PT_R7(sp)           # set error flag
        beqz    t0, 1f
 
+       lw      t1, PT_R2(sp)           # syscall number
        negu    v0                      # error
-       sw      v0, PT_R0(sp)           # set flag for syscall
-                                       # restarting
+       sw      t1, PT_R0(sp)           # save it for syscall restarting
 1:     sw      v0, PT_R2(sp)           # result
 
 o32_syscall_exit:
@@ -104,9 +104,9 @@ syscall_trace_entry:
        sw      t0, PT_R7(sp)           # set error flag
        beqz    t0, 1f
 
+       lw      t1, PT_R2(sp)           # syscall number
        negu    v0                      # error
-       sw      v0, PT_R0(sp)           # set flag for syscall
-                                       # restarting
+       sw      t1, PT_R0(sp)           # save it for syscall restarting
 1:     sw      v0, PT_R2(sp)           # result
 
        j       syscall_exit
@@ -169,8 +169,7 @@ stackargs:
         * We probably should handle this case a bit more drastic.
         */
 bad_stack:
-       negu    v0                              # error
-       sw      v0, PT_R0(sp)
+       li      v0, EFAULT
        sw      v0, PT_R2(sp)
        li      t0, 1                           # set error flag
        sw      t0, PT_R7(sp)
@@ -583,7 +582,10 @@ einval:    li      v0, -ENOSYS
        sys     sys_rt_tgsigqueueinfo   4
        sys     sys_perf_event_open     5
        sys     sys_accept4             4
-       sys     sys_recvmmsg            5
+       sys     sys_recvmmsg            5       /* 4335 */
+       sys     sys_fanotify_init       2
+       sys     sys_fanotify_mark       6
+       sys     sys_prlimit64           4
        .endm
 
        /* We pre-compute the number of _instruction_ bytes needed to
index a8a6c596eb0405bab886e8dfff6ffeb8097d7fc6..3f4179283207b1cc21e7fc14d9fea4da0c38bb28 100644 (file)
@@ -66,9 +66,9 @@ NESTED(handle_sys64, PT_SIZE, sp)
        sd      t0, PT_R7(sp)           # set error flag
        beqz    t0, 1f
 
+       ld      t1, PT_R2(sp)           # syscall number
        dnegu   v0                      # error
-       sd      v0, PT_R0(sp)           # set flag for syscall
-                                       # restarting
+       sd      t1, PT_R0(sp)           # save it for syscall restarting
 1:     sd      v0, PT_R2(sp)           # result
 
 n64_syscall_exit:
@@ -109,8 +109,9 @@ syscall_trace_entry:
        sd      t0, PT_R7(sp)           # set error flag
        beqz    t0, 1f
 
+       ld      t1, PT_R2(sp)           # syscall number
        dnegu   v0                      # error
-       sd      v0, PT_R0(sp)           # set flag for syscall restarting
+       sd      t1, PT_R0(sp)           # save it for syscall restarting
 1:     sd      v0, PT_R2(sp)           # result
 
        j       syscall_exit
@@ -416,9 +417,12 @@ sys_call_table:
        PTR     sys_pipe2
        PTR     sys_inotify_init1
        PTR     sys_preadv
-       PTR     sys_pwritev                     /* 5390 */
+       PTR     sys_pwritev                     /* 5290 */
        PTR     sys_rt_tgsigqueueinfo
        PTR     sys_perf_event_open
        PTR     sys_accept4
-       PTR     sys_recvmmsg
+       PTR     sys_recvmmsg
+       PTR     sys_fanotify_init               /* 5295 */
+       PTR     sys_fanotify_mark
+       PTR     sys_prlimit64
        .size   sys_call_table,.-sys_call_table
index a3d66137731ac24386972c82426a4679d9ff9be3..f08ece6d8acc7f3aa78ecbca23f801ae76727f6a 100644 (file)
@@ -65,8 +65,9 @@ NESTED(handle_sysn32, PT_SIZE, sp)
        sd      t0, PT_R7(sp)           # set error flag
        beqz    t0, 1f
 
+       ld      t1, PT_R2(sp)           # syscall number
        dnegu   v0                      # error
-       sd      v0, PT_R0(sp)           # set flag for syscall restarting
+       sd      t1, PT_R0(sp)           # save it for syscall restarting
 1:     sd      v0, PT_R2(sp)           # result
 
        local_irq_disable               # make sure need_resched and
@@ -106,8 +107,9 @@ n32_syscall_trace_entry:
        sd      t0, PT_R7(sp)           # set error flag
        beqz    t0, 1f
 
+       ld      t1, PT_R2(sp)           # syscall number
        dnegu   v0                      # error
-       sd      v0, PT_R0(sp)           # set flag for syscall restarting
+       sd      t1, PT_R0(sp)           # save it for syscall restarting
 1:     sd      v0, PT_R2(sp)           # result
 
        j       syscall_exit
@@ -320,10 +322,10 @@ EXPORT(sysn32_call_table)
        PTR     sys_cacheflush
        PTR     sys_cachectl
        PTR     sys_sysmips
-       PTR     sys_io_setup                    /* 6200 */
+       PTR     compat_sys_io_setup                     /* 6200 */
        PTR     sys_io_destroy
-       PTR     sys_io_getevents
-       PTR     sys_io_submit
+       PTR     compat_sys_io_getevents
+       PTR     compat_sys_io_submit
        PTR     sys_io_cancel
        PTR     sys_exit_group                  /* 6205 */
        PTR     sys_lookup_dcookie
@@ -419,5 +421,8 @@ EXPORT(sysn32_call_table)
        PTR     sys_perf_event_open
        PTR     sys_accept4
        PTR     compat_sys_recvmmsg
-       PTR     sys_getdents
+       PTR     sys_getdents64
+       PTR     sys_fanotify_init               /* 6300 */
+       PTR     sys_fanotify_mark
+       PTR     sys_prlimit64
        .size   sysn32_call_table,.-sysn32_call_table
index 813689ef23847c6a2db230ebd6dcae60e0ff79f6..78d768a3e19da78fc986e9170f73b240ee98c1c2 100644 (file)
@@ -93,8 +93,9 @@ NESTED(handle_sys, PT_SIZE, sp)
        sd      t0, PT_R7(sp)           # set error flag
        beqz    t0, 1f
 
+       ld      t1, PT_R2(sp)           # syscall number
        dnegu   v0                      # error
-       sd      v0, PT_R0(sp)           # flag for syscall restarting
+       sd      t1, PT_R0(sp)           # save it for syscall restarting
 1:     sd      v0, PT_R2(sp)           # result
 
 o32_syscall_exit:
@@ -142,8 +143,9 @@ trace_a_syscall:
        sd      t0, PT_R7(sp)           # set error flag
        beqz    t0, 1f
 
+       ld      t1, PT_R2(sp)           # syscall number
        dnegu   v0                      # error
-       sd      v0, PT_R0(sp)           # set flag for syscall restarting
+       sd      t1, PT_R0(sp)           # save it for syscall restarting
 1:     sd      v0, PT_R2(sp)           # result
 
        j       syscall_exit
@@ -154,8 +156,7 @@ trace_a_syscall:
         * The stackpointer for a call with more than 4 arguments is bad.
         */
 bad_stack:
-       dnegu   v0                      # error
-       sd      v0, PT_R0(sp)
+       li      v0, EFAULT
        sd      v0, PT_R2(sp)
        li      t0, 1                   # set error flag
        sd      t0, PT_R7(sp)
@@ -444,10 +445,10 @@ sys_call_table:
        PTR     compat_sys_futex
        PTR     compat_sys_sched_setaffinity
        PTR     compat_sys_sched_getaffinity    /* 4240 */
-       PTR     sys_io_setup
+       PTR     compat_sys_io_setup
        PTR     sys_io_destroy
-       PTR     sys_io_getevents
-       PTR     sys_io_submit
+       PTR     compat_sys_io_getevents
+       PTR     compat_sys_io_submit
        PTR     sys_io_cancel                   /* 4245 */
        PTR     sys_exit_group
        PTR     sys32_lookup_dcookie
@@ -538,5 +539,8 @@ sys_call_table:
        PTR     compat_sys_rt_tgsigqueueinfo
        PTR     sys_perf_event_open
        PTR     sys_accept4
-       PTR     compat_sys_recvmmsg
+       PTR     compat_sys_recvmmsg             /* 4335 */
+       PTR     sys_fanotify_init
+       PTR     sys_32_fanotify_mark
+       PTR     sys_prlimit64
        .size   sys_call_table,.-sys_call_table
index 2099d5a4c4b78224f85ee5f9b175907be3d8f15c..5922342bca3991d4b7ab4a9d6f8483fb3e416779 100644 (file)
@@ -390,7 +390,6 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
 {
        struct rt_sigframe __user *frame;
        sigset_t set;
-       stack_t st;
        int sig;
 
        frame = (struct rt_sigframe __user *) regs.regs[29];
@@ -411,11 +410,9 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
        else if (sig)
                force_sig(sig, current);
 
-       if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st)))
-               goto badframe;
        /* It is more difficult to avoid calling this function than to
           call it and ignore errors.  */
-       do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]);
+       do_sigaltstack(&frame->rs_uc.uc_stack, NULL, regs.regs[29]);
 
        /*
         * Don't let your children do this ...
@@ -550,23 +547,26 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
        struct mips_abi *abi = current->thread.abi;
        void *vdso = current->mm->context.vdso;
 
-       switch(regs->regs[0]) {
-       case ERESTART_RESTARTBLOCK:
-       case ERESTARTNOHAND:
-               regs->regs[2] = EINTR;
-               break;
-       case ERESTARTSYS:
-               if (!(ka->sa.sa_flags & SA_RESTART)) {
+       if (regs->regs[0]) {
+               switch(regs->regs[2]) {
+               case ERESTART_RESTARTBLOCK:
+               case ERESTARTNOHAND:
                        regs->regs[2] = EINTR;
                        break;
+               case ERESTARTSYS:
+                       if (!(ka->sa.sa_flags & SA_RESTART)) {
+                               regs->regs[2] = EINTR;
+                               break;
+                       }
+               /* fallthrough */
+               case ERESTARTNOINTR:
+                       regs->regs[7] = regs->regs[26];
+                       regs->regs[2] = regs->regs[0];
+                       regs->cp0_epc -= 4;
                }
-       /* fallthrough */
-       case ERESTARTNOINTR:            /* Userland will reload $v0.  */
-               regs->regs[7] = regs->regs[26];
-               regs->cp0_epc -= 8;
-       }
 
-       regs->regs[0] = 0;              /* Don't deal with this again.  */
+               regs->regs[0] = 0;              /* Don't deal with this again.  */
+       }
 
        if (sig_uses_siginfo(ka))
                ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset,
@@ -575,6 +575,9 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
                ret = abi->setup_frame(vdso + abi->signal_return_offset,
                                       ka, regs, sig, oldset);
 
+       if (ret)
+               return ret;
+
        spin_lock_irq(&current->sighand->siglock);
        sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
        if (!(ka->sa.sa_flags & SA_NODEFER))
@@ -622,17 +625,13 @@ static void do_signal(struct pt_regs *regs)
                return;
        }
 
-       /*
-        * Who's code doesn't conform to the restartable syscall convention
-        * dies here!!!  The li instruction, a single machine instruction,
-        * must directly be followed by the syscall instruction.
-        */
        if (regs->regs[0]) {
                if (regs->regs[2] == ERESTARTNOHAND ||
                    regs->regs[2] == ERESTARTSYS ||
                    regs->regs[2] == ERESTARTNOINTR) {
+                       regs->regs[2] = regs->regs[0];
                        regs->regs[7] = regs->regs[26];
-                       regs->cp0_epc -= 8;
+                       regs->cp0_epc -= 4;
                }
                if (regs->regs[2] == ERESTART_RESTARTBLOCK) {
                        regs->regs[2] = current->thread.abi->restart;
index 2c5df818c65ae0395768264f1192059b792aaf02..ee24d814d5b91bb474ff3ff114e49f86618cdeae 100644 (file)
@@ -109,6 +109,7 @@ asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
 asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
 {
        struct rt_sigframe_n32 __user *frame;
+       mm_segment_t old_fs;
        sigset_t set;
        stack_t st;
        s32 sp;
@@ -143,7 +144,11 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
 
        /* It is more difficult to avoid calling this function than to
           call it and ignore errors.  */
+       old_fs = get_fs();
+       set_fs(KERNEL_DS);
        do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]);
+       set_fs(old_fs);
+
 
        /*
         * Don't let your children do this ...
index 69b039ca8d8337e60ecead9e32fbe7bd64659a64..33d5a5ce4a29d56037a38abb346a99e0212c16c2 100644 (file)
@@ -109,8 +109,6 @@ static void emulate_load_store_insn(struct pt_regs *regs,
        unsigned long value;
        unsigned int res;
 
-       regs->regs[0] = 0;
-
        /*
         * This load never faults.
         */
index 7ba890860d98cb3916c84f369e3fef0200b07f2b..469d4019f795bd072b0aa4ba109d075b5377f55c 100644 (file)
@@ -44,27 +44,39 @@ static inline int cpu_is_noncoherent_r10000(struct device *dev)
 
 static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
 {
+       gfp_t dma_flag;
+
        /* ignore region specifiers */
        gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
 
-#ifdef CONFIG_ZONE_DMA
+#ifdef CONFIG_ISA
        if (dev == NULL)
-               gfp |= __GFP_DMA;
-       else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
-               gfp |= __GFP_DMA;
+               dma_flag = __GFP_DMA;
        else
 #endif
-#ifdef CONFIG_ZONE_DMA32
+#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
             if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
-               gfp |= __GFP_DMA32;
+                       dma_flag = __GFP_DMA;
+       else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
+                       dma_flag = __GFP_DMA32;
+       else
+#endif
+#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
+            if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
+               dma_flag = __GFP_DMA32;
+       else
+#endif
+#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
+            if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
+               dma_flag = __GFP_DMA;
        else
 #endif
-               ;
+               dma_flag = 0;
 
        /* Don't invoke OOM killer */
        gfp |= __GFP_NORETRY;
 
-       return gfp;
+       return gfp | dma_flag;
 }
 
 void *dma_alloc_noncoherent(struct device *dev, size_t size,
index 1ef75cd80a0d819827f5057549fdd2622442a752..274af3be1442b42fa41d3cb960b598ddcbf5b8c2 100644 (file)
@@ -30,7 +30,7 @@
 #define tc_lsize       32
 
 extern unsigned long icache_way_size, dcache_way_size;
-unsigned long tcache_size;
+static unsigned long tcache_size;
 
 #include <asm/r4kcache.h>
 
index 15949b0be811f9718af9e2896d4bd9e947c84897..b79b24afe3a2fc67ab6687a082e44d45bf219242 100644 (file)
@@ -385,6 +385,8 @@ static int __initdata msc_nr_eicirqs = ARRAY_SIZE(msc_eicirqmap);
  */
 
 #define GIC_CPU_NMI GIC_MAP_TO_NMI_MSK
+#define X GIC_UNUSED
+
 static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
        { X, X,            X,           X,              0 },
        { X, X,            X,           X,              0 },
@@ -404,6 +406,7 @@ static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
        { X, X,            X,           X,              0 },
        /* The remainder of this table is initialised by fill_ipi_map */
 };
+#undef X
 
 /*
  * GCMP needs to be detected before any SMP initialisation
index 71f7d27b0d4cccf28dba3777b0a8848de53f82c4..f31218e17d3c1437f4f8ef15a7d1855ddc6a32fb 100644 (file)
@@ -118,7 +118,7 @@ static int __init rc32434_pcibridge_init(void)
        if (!((pcicvalue == PCIM_H_EA) ||
              (pcicvalue == PCIM_H_IA_FIX) ||
              (pcicvalue == PCIM_H_IA_RR))) {
-               pr_err(KERN_ERR "PCI init error!!!\n");
+               pr_err("PCI init error!!!\n");
                /* Not in Host Mode, return ERROR */
                return -1;
        }
index fadd8744a6bccfbf25283369608c66b16afeab04..e7a12ff304b9475c0db2e6097989c510b288a3c4 100644 (file)
  */
 #include <linux/kernel.h>
 
+#include <asm/processor.h>
 #include <asm/reboot.h>
 #include <glb.h>
 
 void pnx8550_machine_restart(char *command)
 {
-       char head[] = "************* Machine restart *************";
-       char foot[] = "*******************************************";
-
-       printk("\n\n");
-       printk("%s\n", head);
-       if (command != NULL)
-               printk("* %s\n", command);
-       printk("%s\n", foot);
-
        PNX8550_RST_CTL = PNX8550_RST_DO_SW_RST;
 }
 
 void pnx8550_machine_halt(void)
 {
-       printk("*** Machine halt. (Not implemented) ***\n");
-}
-
-void pnx8550_machine_power_off(void)
-{
-       printk("*** Machine power off.  (Not implemented) ***\n");
+       while (1) {
+               if (cpu_wait)
+                       cpu_wait();
+       }
 }
index 64246c9c875c51d09e5c3861ca0e6f1096d50ac5..43cb3945fdbfffb8b355789e237a9b27df21abef 100644 (file)
@@ -44,7 +44,6 @@
 extern void __init board_setup(void);
 extern void pnx8550_machine_restart(char *);
 extern void pnx8550_machine_halt(void);
-extern void pnx8550_machine_power_off(void);
 extern struct resource ioport_resource;
 extern struct resource iomem_resource;
 extern char *prom_getcmdline(void);
@@ -100,7 +99,7 @@ void __init plat_mem_setup(void)
 
         _machine_restart = pnx8550_machine_restart;
         _machine_halt = pnx8550_machine_halt;
-        pm_power_off = pnx8550_machine_power_off;
+        pm_power_off = pnx8550_machine_halt;
 
        /* Clear the Global 2 Register, PCI Inta Output Enable Registers
           Bit 1:Enable DAC Powerdown
index 444b9f918fdf8f2d5dec64b9827d8d746bb1ade1..7c2a2f7f8dc143889b74605d2741f5707ad330e4 100644 (file)
@@ -8,7 +8,6 @@ mainmenu "Linux Kernel Configuration"
 config MN10300
        def_bool y
        select HAVE_OPROFILE
-       select HAVE_ARCH_TRACEHOOK
 
 config AM33
        def_bool y
index ff80e86b9bd2d2305d34a731685d94fe5d3f3491..ce83c74b3fd714abf68fca4a4d2beef024545c83 100644 (file)
@@ -101,7 +101,7 @@ config GDBSTUB_DEBUG_BREAKPOINT
 
 choice
        prompt "GDB stub port"
-       default GDBSTUB_TTYSM0
+       default GDBSTUB_ON_TTYSM0
        depends on GDBSTUB
        help
          Select the serial port used for GDB-stub.
index f49ac49e09adc079adaabdd9893258ae9b795b7f..3f50e966107641f21f346a38e50cca97d2eda24b 100644 (file)
@@ -229,9 +229,9 @@ int ffs(int x)
 #include <asm-generic/bitops/hweight.h>
 
 #define ext2_set_bit_atomic(lock, nr, addr) \
-       test_and_set_bit((nr) ^ 0x18, (addr))
+       test_and_set_bit((nr), (addr))
 #define ext2_clear_bit_atomic(lock, nr, addr) \
-       test_and_clear_bit((nr) ^ 0x18, (addr))
+       test_and_clear_bit((nr), (addr))
 
 #include <asm-generic/bitops/ext2-non-atomic.h>
 #include <asm-generic/bitops/minix-le.h>
index 7e891fce2370028acea4a56497aafa74c443c0be..1865d72a86ff7cc6823a7be07dbbb0f3907e2518 100644 (file)
@@ -78,7 +78,7 @@ typedef unsigned long sigset_t;
 
 /* These should not be considered constants from userland.  */
 #define SIGRTMIN       32
-#define SIGRTMAX       (_NSIG-1)
+#define SIGRTMAX       _NSIG
 
 /*
  * SA_FLAGS values:
index 9d49073e827a26429335b54cc51cff3304433ac7..db509dd80565b9e91c2b661c08c5fd64bc52e8d4 100644 (file)
@@ -156,17 +156,17 @@ struct mn10300_serial_port mn10300_serial_port_sif0 = {
        ._intr          = &SC0ICR,
        ._rxb           = &SC0RXB,
        ._txb           = &SC0TXB,
-       .rx_name        = "ttySM0/Rx",
-       .tx_name        = "ttySM0/Tx",
+       .rx_name        = "ttySM0:Rx",
+       .tx_name        = "ttySM0:Tx",
 #ifdef CONFIG_MN10300_TTYSM0_TIMER8
-       .tm_name        = "ttySM0/Timer8",
+       .tm_name        = "ttySM0:Timer8",
        ._tmxmd         = &TM8MD,
        ._tmxbr         = &TM8BR,
        ._tmicr         = &TM8ICR,
        .tm_irq         = TM8IRQ,
        .div_timer      = MNSCx_DIV_TIMER_16BIT,
 #else /* CONFIG_MN10300_TTYSM0_TIMER2 */
-       .tm_name        = "ttySM0/Timer2",
+       .tm_name        = "ttySM0:Timer2",
        ._tmxmd         = &TM2MD,
        ._tmxbr         = (volatile u16 *) &TM2BR,
        ._tmicr         = &TM2ICR,
@@ -209,17 +209,17 @@ struct mn10300_serial_port mn10300_serial_port_sif1 = {
        ._intr          = &SC1ICR,
        ._rxb           = &SC1RXB,
        ._txb           = &SC1TXB,
-       .rx_name        = "ttySM1/Rx",
-       .tx_name        = "ttySM1/Tx",
+       .rx_name        = "ttySM1:Rx",
+       .tx_name        = "ttySM1:Tx",
 #ifdef CONFIG_MN10300_TTYSM1_TIMER9
-       .tm_name        = "ttySM1/Timer9",
+       .tm_name        = "ttySM1:Timer9",
        ._tmxmd         = &TM9MD,
        ._tmxbr         = &TM9BR,
        ._tmicr         = &TM9ICR,
        .tm_irq         = TM9IRQ,
        .div_timer      = MNSCx_DIV_TIMER_16BIT,
 #else /* CONFIG_MN10300_TTYSM1_TIMER3 */
-       .tm_name        = "ttySM1/Timer3",
+       .tm_name        = "ttySM1:Timer3",
        ._tmxmd         = &TM3MD,
        ._tmxbr         = (volatile u16 *) &TM3BR,
        ._tmicr         = &TM3ICR,
@@ -260,9 +260,9 @@ struct mn10300_serial_port mn10300_serial_port_sif2 = {
        .uart.lock      =
        __SPIN_LOCK_UNLOCKED(mn10300_serial_port_sif2.uart.lock),
        .name           = "ttySM2",
-       .rx_name        = "ttySM2/Rx",
-       .tx_name        = "ttySM2/Tx",
-       .tm_name        = "ttySM2/Timer10",
+       .rx_name        = "ttySM2:Rx",
+       .tx_name        = "ttySM2:Tx",
+       .tm_name        = "ttySM2:Timer10",
        ._iobase        = &SC2CTR,
        ._control       = &SC2CTR,
        ._status        = &SC2STR,
index 6aea7fd76993b931f31f2dda76e72aecc1e31b4d..196a111e2e2937b134217356991c0ca2f68bda05 100644 (file)
@@ -206,7 +206,7 @@ int module_finalize(const Elf_Ehdr *hdr,
                    const Elf_Shdr *sechdrs,
                    struct module *me)
 {
-       return module_bug_finalize(hdr, sechdrs, me);
+       return 0;
 }
 
 /*
@@ -214,5 +214,4 @@ int module_finalize(const Elf_Ehdr *hdr,
  */
 void module_arch_cleanup(struct module *mod)
 {
-       module_bug_cleanup(mod);
 }
index 717db14c2cc32d8905a45e6cfae4bf2a00d38abd..d4de05ab786464cd585e7f1f0e6ed1652ad3de1c 100644 (file)
@@ -65,10 +65,10 @@ asmlinkage long sys_sigaction(int sig,
                old_sigset_t mask;
                if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
                    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
-                   __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+                   __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
+                   __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
+                   __get_user(mask, &act->sa_mask))
                        return -EFAULT;
-               __get_user(new_ka.sa.sa_flags, &act->sa_flags);
-               __get_user(mask, &act->sa_mask);
                siginitset(&new_ka.sa.sa_mask, mask);
        }
 
@@ -77,10 +77,10 @@ asmlinkage long sys_sigaction(int sig,
        if (!ret && oact) {
                if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
                    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
-                   __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+                   __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
+                   __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
+                   __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
                        return -EFAULT;
-               __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
-               __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
        }
 
        return ret;
@@ -102,6 +102,9 @@ static int restore_sigcontext(struct pt_regs *regs,
 {
        unsigned int err = 0;
 
+       /* Always make any pending restarted system calls return -EINTR */
+       current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
        if (is_using_fpu(current))
                fpu_kill_state(current);
 
@@ -330,8 +333,6 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
        regs->d0 = sig;
        regs->d1 = (unsigned long) &frame->sc;
 
-       set_fs(USER_DS);
-
        /* the tracer may want to single-step inside the handler */
        if (test_thread_flag(TIF_SINGLESTEP))
                ptrace_notify(SIGTRAP);
@@ -345,7 +346,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
        return 0;
 
 give_sigsegv:
-       force_sig(SIGSEGV, current);
+       force_sigsegv(sig, current);
        return -EFAULT;
 }
 
@@ -413,8 +414,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        regs->d0 = sig;
        regs->d1 = (long) &frame->info;
 
-       set_fs(USER_DS);
-
        /* the tracer may want to single-step inside the handler */
        if (test_thread_flag(TIF_SINGLESTEP))
                ptrace_notify(SIGTRAP);
@@ -428,10 +427,16 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        return 0;
 
 give_sigsegv:
-       force_sig(SIGSEGV, current);
+       force_sigsegv(sig, current);
        return -EFAULT;
 }
 
+static inline void stepback(struct pt_regs *regs)
+{
+       regs->pc -= 2;
+       regs->orig_d0 = -1;
+}
+
 /*
  * handle the actual delivery of a signal to userspace
  */
@@ -459,7 +464,7 @@ static int handle_signal(int sig,
                        /* fallthrough */
                case -ERESTARTNOINTR:
                        regs->d0 = regs->orig_d0;
-                       regs->pc -= 2;
+                       stepback(regs);
                }
        }
 
@@ -527,12 +532,12 @@ static void do_signal(struct pt_regs *regs)
                case -ERESTARTSYS:
                case -ERESTARTNOINTR:
                        regs->d0 = regs->orig_d0;
-                       regs->pc -= 2;
+                       stepback(regs);
                        break;
 
                case -ERESTART_RESTARTBLOCK:
                        regs->d0 = __NR_restart_syscall;
-                       regs->pc -= 2;
+                       stepback(regs);
                        break;
                }
        }
index 28b9d983db0cb280c07dcd76921d695892e11c48..1557277fbc5c03962c56f39b7d1a5687bdea80bd 100644 (file)
@@ -2,13 +2,11 @@
 # Makefile for the MN10300-specific memory management code
 #
 
+cacheflush-y   := cache.o cache-mn10300.o
+cacheflush-$(CONFIG_MN10300_CACHE_WBACK) += cache-flush-mn10300.o
+
+cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o
+
 obj-y := \
        init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \
-       misalignment.o dma-alloc.o
-
-ifneq ($(CONFIG_MN10300_CACHE_DISABLED),y)
-obj-y  += cache.o cache-mn10300.o
-ifeq ($(CONFIG_MN10300_CACHE_WBACK),y)
-obj-y  += cache-flush-mn10300.o
-endif
-endif
+       misalignment.o dma-alloc.o $(cacheflush-y)
similarity index 53%
rename from arch/frv/lib/perf_event.c
rename to arch/mn10300/mm/cache-disabled.c
index 9ac5acfd2e9101016140be7360713366b2ffc16f..f669ea42aba638472a9552103dfd66e978d12009 100644 (file)
@@ -1,6 +1,6 @@
-/* Performance event handling
+/* Handle the cache being disabled
  *
- * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
  *
  * This program is free software; you can redistribute it and/or
@@ -8,12 +8,14 @@
  * as published by the Free Software Foundation; either version
  * 2 of the Licence, or (at your option) any later version.
  */
-
-#include <linux/perf_event.h>
+#include <linux/mm.h>
 
 /*
- * mark the performance event as pending
+ * allow userspace to flush the instruction cache
  */
-void set_perf_event_pending(void)
+asmlinkage long sys_cacheflush(unsigned long start, unsigned long end)
 {
+       if (end < start)
+               return -EINVAL;
+       return 0;
 }
index 1b76719ec1c37b1686a648cd07f8c5e7baaf9ce1..9261217e8d2c5741bb500b829bbd7663859b5541 100644 (file)
@@ -54,13 +54,30 @@ EXPORT_SYMBOL(flush_icache_page);
 void flush_icache_range(unsigned long start, unsigned long end)
 {
 #ifdef CONFIG_MN10300_CACHE_WBACK
-       unsigned long addr, size, off;
+       unsigned long addr, size, base, off;
        struct page *page;
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd;
        pte_t *ppte, pte;
 
+       if (end > 0x80000000UL) {
+               /* addresses above 0xa0000000 do not go through the cache */
+               if (end > 0xa0000000UL) {
+                       end = 0xa0000000UL;
+                       if (start >= end)
+                               return;
+               }
+
+               /* kernel addresses between 0x80000000 and 0x9fffffff do not
+                * require page tables, so we just map such addresses directly */
+               base = (start >= 0x80000000UL) ? start : 0x80000000UL;
+               mn10300_dcache_flush_range(base, end);
+               if (base == start)
+                       goto invalidate;
+               end = base;
+       }
+
        for (; start < end; start += size) {
                /* work out how much of the page to flush */
                off = start & (PAGE_SIZE - 1);
@@ -104,6 +121,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
        }
 #endif
 
+invalidate:
        mn10300_icache_inv();
 }
 EXPORT_SYMBOL(flush_icache_range);
index 907417d187e15039bb4bb6d96075cd950a045dbd..79a04a9394d5ad33b768d79ce1c2230b24e1c9bb 100644 (file)
@@ -16,6 +16,7 @@ config PARISC
        select RTC_DRV_GENERIC
        select INIT_ALL_POSSIBLE
        select BUG
+       select HAVE_IRQ_WORK
        select HAVE_PERF_EVENTS
        select GENERIC_ATOMIC64 if !64BIT
        help
index 02b77baa5da69f04729013cd9b6402ffc2dc3c39..efa0b60c63fe683f22629ddd540ed036495cd896 100644 (file)
@@ -147,7 +147,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
        return (u32)(unsigned long)uptr;
 }
 
-static __inline__ void __user *compat_alloc_user_space(long len)
+static __inline__ void __user *arch_compat_alloc_user_space(long len)
 {
        struct pt_regs *regs = &current->thread.regs;
        return (void __user *)regs->gr[30];
index cc146427d8f9acf2219aa14813d76e52835bb83f..1e0fd8ba6c033e5f277afc02075fc7de57b1adc6 100644 (file)
@@ -1,7 +1,6 @@
 #ifndef __ASM_PARISC_PERF_EVENT_H
 #define __ASM_PARISC_PERF_EVENT_H
 
-/* parisc only supports software events through this interface. */
-static inline void set_perf_event_pending(void) { }
+/* Empty, just to avoid compiling error */
 
 #endif /* __ASM_PARISC_PERF_EVENT_H */
index 159a2b81e90c630db82eb9834c2096df7eb66896..6e81bb596e5b476e598e4a7309e4aba80ba0a322 100644 (file)
@@ -941,11 +941,10 @@ int module_finalize(const Elf_Ehdr *hdr,
        nsyms = newptr - (Elf_Sym *)symhdr->sh_addr;
        DEBUGP("NEW num_symtab %lu\n", nsyms);
        symhdr->sh_size = nsyms * sizeof(Elf_Sym);
-       return module_bug_finalize(hdr, sechdrs, me);
+       return 0;
 }
 
 void module_arch_cleanup(struct module *mod)
 {
        deregister_unwind_table(mod);
-       module_bug_cleanup(mod);
 }
index 631e5a0fb6abcf2e124f0dfc6d1ed95d9b9f4566..4b1e521d966f0facaf7d1b70be55ebaf83df3954 100644 (file)
@@ -138,6 +138,7 @@ config PPC
        select HAVE_OPROFILE
        select HAVE_SYSCALL_WRAPPERS if PPC64
        select GENERIC_ATOMIC64 if PPC32
+       select HAVE_IRQ_WORK
        select HAVE_PERF_EVENTS
        select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64
index 396d21a800587f0c8a740714a3277a743d2618cc..a11d4eac4f97f369f48866a7475696879b8be5d3 100644 (file)
@@ -134,7 +134,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
        return (u32)(unsigned long)uptr;
 }
 
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
 {
        struct pt_regs *regs = current->thread.regs;
        unsigned long usp = regs->gpr[1];
index a67aeed17d405fbc37e3a44a860c67a3a1dd9fcf..debc5ed96d6e087a2e241e47421f537a13bf1feb 100644 (file)
@@ -11,6 +11,7 @@
 #ifndef __ARCH_POWERPC_ASM_FSLDMA_H__
 #define __ARCH_POWERPC_ASM_FSLDMA_H__
 
+#include <linux/slab.h>
 #include <linux/dmaengine.h>
 
 /*
index 1ff6662f7faf28ffce6577ac40f0a237f2b866e8..9b287fdd8ea335352cb7034a600225b6161d6a30 100644 (file)
@@ -129,7 +129,7 @@ struct paca_struct {
        u8 soft_enabled;                /* irq soft-enable flag */
        u8 hard_enabled;                /* set if irqs are enabled in MSR */
        u8 io_sync;                     /* writel() needs spin_unlock sync */
-       u8 perf_event_pending;          /* PM interrupt while soft-disabled */
+       u8 irq_work_pending;            /* IRQ_WORK interrupt while soft-disable */
 
        /* Stuff for accurate time accounting */
        u64 user_time;                  /* accumulated usermode TB ticks */
index 6c294acac848145d868d9f936c5df5ca879b1920..9c3d160670b4ed928da2b92bdbfd60dad8a214ae 100644 (file)
@@ -542,10 +542,6 @@ extern void reloc_got2(unsigned long);
 
 #define PTRRELOC(x)    ((typeof(x)) add_reloc_offset((unsigned long)(x)))
 
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
-extern void account_system_vtime(struct task_struct *);
-#endif
-
 extern struct dentry *powerpc_debugfs_root;
 
 #endif /* __KERNEL__ */
index 477c663e014043a5c08fbaf51e82853005391344..49cee9df225be8bfc6b06a429ee9243d10484439 100644 (file)
@@ -63,11 +63,6 @@ int module_finalize(const Elf_Ehdr *hdr,
                const Elf_Shdr *sechdrs, struct module *me)
 {
        const Elf_Shdr *sect;
-       int err;
-
-       err = module_bug_finalize(hdr, sechdrs, me);
-       if (err)
-               return err;
 
        /* Apply feature fixups */
        sect = find_section(hdr, sechdrs, "__ftr_fixup");
@@ -101,5 +96,4 @@ int module_finalize(const Elf_Ehdr *hdr,
 
 void module_arch_cleanup(struct module *mod)
 {
-       module_bug_cleanup(mod);
 }
index 95ad9dad298e9d4773117b0406bc4a3378d77e5e..d05ae4204bbf3d3ddcc84266476b736a6790715c 100644 (file)
 #include "ppc32.h"
 #endif
 
-/*
- * Store another value in a callchain_entry.
- */
-static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
-{
-       unsigned int nr = entry->nr;
-
-       if (nr < PERF_MAX_STACK_DEPTH) {
-               entry->ip[nr] = ip;
-               entry->nr = nr + 1;
-       }
-}
 
 /*
  * Is sp valid as the address of the next kernel stack frame after prev_sp?
@@ -58,8 +46,8 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
        return 0;
 }
 
-static void perf_callchain_kernel(struct pt_regs *regs,
-                                 struct perf_callchain_entry *entry)
+void
+perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
        unsigned long sp, next_sp;
        unsigned long next_ip;
@@ -69,8 +57,7 @@ static void perf_callchain_kernel(struct pt_regs *regs,
 
        lr = regs->link;
        sp = regs->gpr[1];
-       callchain_store(entry, PERF_CONTEXT_KERNEL);
-       callchain_store(entry, regs->nip);
+       perf_callchain_store(entry, regs->nip);
 
        if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
                return;
@@ -89,7 +76,7 @@ static void perf_callchain_kernel(struct pt_regs *regs,
                        next_ip = regs->nip;
                        lr = regs->link;
                        level = 0;
-                       callchain_store(entry, PERF_CONTEXT_KERNEL);
+                       perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
 
                } else {
                        if (level == 0)
@@ -111,7 +98,7 @@ static void perf_callchain_kernel(struct pt_regs *regs,
                        ++level;
                }
 
-               callchain_store(entry, next_ip);
+               perf_callchain_store(entry, next_ip);
                if (!valid_next_sp(next_sp, sp))
                        return;
                sp = next_sp;
@@ -233,8 +220,8 @@ static int sane_signal_64_frame(unsigned long sp)
                puc == (unsigned long) &sf->uc;
 }
 
-static void perf_callchain_user_64(struct pt_regs *regs,
-                                  struct perf_callchain_entry *entry)
+static void perf_callchain_user_64(struct perf_callchain_entry *entry,
+                                  struct pt_regs *regs)
 {
        unsigned long sp, next_sp;
        unsigned long next_ip;
@@ -246,8 +233,7 @@ static void perf_callchain_user_64(struct pt_regs *regs,
        next_ip = regs->nip;
        lr = regs->link;
        sp = regs->gpr[1];
-       callchain_store(entry, PERF_CONTEXT_USER);
-       callchain_store(entry, next_ip);
+       perf_callchain_store(entry, next_ip);
 
        for (;;) {
                fp = (unsigned long __user *) sp;
@@ -276,14 +262,14 @@ static void perf_callchain_user_64(struct pt_regs *regs,
                            read_user_stack_64(&uregs[PT_R1], &sp))
                                return;
                        level = 0;
-                       callchain_store(entry, PERF_CONTEXT_USER);
-                       callchain_store(entry, next_ip);
+                       perf_callchain_store(entry, PERF_CONTEXT_USER);
+                       perf_callchain_store(entry, next_ip);
                        continue;
                }
 
                if (level == 0)
                        next_ip = lr;
-               callchain_store(entry, next_ip);
+               perf_callchain_store(entry, next_ip);
                ++level;
                sp = next_sp;
        }
@@ -315,8 +301,8 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
        return __get_user_inatomic(*ret, ptr);
 }
 
-static inline void perf_callchain_user_64(struct pt_regs *regs,
-                                         struct perf_callchain_entry *entry)
+static inline void perf_callchain_user_64(struct perf_callchain_entry *entry,
+                                         struct pt_regs *regs)
 {
 }
 
@@ -435,8 +421,8 @@ static unsigned int __user *signal_frame_32_regs(unsigned int sp,
        return mctx->mc_gregs;
 }
 
-static void perf_callchain_user_32(struct pt_regs *regs,
-                                  struct perf_callchain_entry *entry)
+static void perf_callchain_user_32(struct perf_callchain_entry *entry,
+                                  struct pt_regs *regs)
 {
        unsigned int sp, next_sp;
        unsigned int next_ip;
@@ -447,8 +433,7 @@ static void perf_callchain_user_32(struct pt_regs *regs,
        next_ip = regs->nip;
        lr = regs->link;
        sp = regs->gpr[1];
-       callchain_store(entry, PERF_CONTEXT_USER);
-       callchain_store(entry, next_ip);
+       perf_callchain_store(entry, next_ip);
 
        while (entry->nr < PERF_MAX_STACK_DEPTH) {
                fp = (unsigned int __user *) (unsigned long) sp;
@@ -470,45 +455,24 @@ static void perf_callchain_user_32(struct pt_regs *regs,
                            read_user_stack_32(&uregs[PT_R1], &sp))
                                return;
                        level = 0;
-                       callchain_store(entry, PERF_CONTEXT_USER);
-                       callchain_store(entry, next_ip);
+                       perf_callchain_store(entry, PERF_CONTEXT_USER);
+                       perf_callchain_store(entry, next_ip);
                        continue;
                }
 
                if (level == 0)
                        next_ip = lr;
-               callchain_store(entry, next_ip);
+               perf_callchain_store(entry, next_ip);
                ++level;
                sp = next_sp;
        }
 }
 
-/*
- * Since we can't get PMU interrupts inside a PMU interrupt handler,
- * we don't need separate irq and nmi entries here.
- */
-static DEFINE_PER_CPU(struct perf_callchain_entry, cpu_perf_callchain);
-
-struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+void
+perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
-       struct perf_callchain_entry *entry = &__get_cpu_var(cpu_perf_callchain);
-
-       entry->nr = 0;
-
-       if (!user_mode(regs)) {
-               perf_callchain_kernel(regs, entry);
-               if (current->mm)
-                       regs = task_pt_regs(current);
-               else
-                       regs = NULL;
-       }
-
-       if (regs) {
-               if (current_is_64bit())
-                       perf_callchain_user_64(regs, entry);
-               else
-                       perf_callchain_user_32(regs, entry);
-       }
-
-       return entry;
+       if (current_is_64bit())
+               perf_callchain_user_64(entry, regs);
+       else
+               perf_callchain_user_32(entry, regs);
 }
index d301a30445e09a49cec4a3d4dcf2ea01529934b3..3129c855933c2a3857b0c4b3321b259b851279b8 100644 (file)
@@ -402,6 +402,9 @@ static void power_pmu_read(struct perf_event *event)
 {
        s64 val, delta, prev;
 
+       if (event->hw.state & PERF_HES_STOPPED)
+               return;
+
        if (!event->hw.idx)
                return;
        /*
@@ -517,7 +520,7 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
  * Disable all events to prevent PMU interrupts and to allow
  * events to be added or removed.
  */
-void hw_perf_disable(void)
+static void power_pmu_disable(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw;
        unsigned long flags;
@@ -565,7 +568,7 @@ void hw_perf_disable(void)
  * If we were previously disabled and events were added, then
  * put the new config on the PMU.
  */
-void hw_perf_enable(void)
+static void power_pmu_enable(struct pmu *pmu)
 {
        struct perf_event *event;
        struct cpu_hw_events *cpuhw;
@@ -672,6 +675,8 @@ void hw_perf_enable(void)
                }
                local64_set(&event->hw.prev_count, val);
                event->hw.idx = idx;
+               if (event->hw.state & PERF_HES_STOPPED)
+                       val = 0;
                write_pmc(idx, val);
                perf_event_update_userpage(event);
        }
@@ -727,7 +732,7 @@ static int collect_events(struct perf_event *group, int max_count,
  * re-enable the PMU in order to get hw_perf_enable to do the
  * actual work of reconfiguring the PMU.
  */
-static int power_pmu_enable(struct perf_event *event)
+static int power_pmu_add(struct perf_event *event, int ef_flags)
 {
        struct cpu_hw_events *cpuhw;
        unsigned long flags;
@@ -735,7 +740,7 @@ static int power_pmu_enable(struct perf_event *event)
        int ret = -EAGAIN;
 
        local_irq_save(flags);
-       perf_disable();
+       perf_pmu_disable(event->pmu);
 
        /*
         * Add the event to the list (if there is room)
@@ -749,6 +754,9 @@ static int power_pmu_enable(struct perf_event *event)
        cpuhw->events[n0] = event->hw.config;
        cpuhw->flags[n0] = event->hw.event_base;
 
+       if (!(ef_flags & PERF_EF_START))
+               event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+
        /*
         * If group events scheduling transaction was started,
         * skip the schedulability test here, it will be peformed
@@ -769,7 +777,7 @@ nocheck:
 
        ret = 0;
  out:
-       perf_enable();
+       perf_pmu_enable(event->pmu);
        local_irq_restore(flags);
        return ret;
 }
@@ -777,14 +785,14 @@ nocheck:
 /*
  * Remove a event from the PMU.
  */
-static void power_pmu_disable(struct perf_event *event)
+static void power_pmu_del(struct perf_event *event, int ef_flags)
 {
        struct cpu_hw_events *cpuhw;
        long i;
        unsigned long flags;
 
        local_irq_save(flags);
-       perf_disable();
+       perf_pmu_disable(event->pmu);
 
        power_pmu_read(event);
 
@@ -821,34 +829,60 @@ static void power_pmu_disable(struct perf_event *event)
                cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
        }
 
-       perf_enable();
+       perf_pmu_enable(event->pmu);
        local_irq_restore(flags);
 }
 
 /*
- * Re-enable interrupts on a event after they were throttled
- * because they were coming too fast.
+ * POWER-PMU does not support disabling individual counters, hence
+ * program their cycle counter to their max value and ignore the interrupts.
  */
-static void power_pmu_unthrottle(struct perf_event *event)
+
+static void power_pmu_start(struct perf_event *event, int ef_flags)
+{
+       unsigned long flags;
+       s64 left;
+
+       if (!event->hw.idx || !event->hw.sample_period)
+               return;
+
+       if (!(event->hw.state & PERF_HES_STOPPED))
+               return;
+
+       if (ef_flags & PERF_EF_RELOAD)
+               WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+
+       local_irq_save(flags);
+       perf_pmu_disable(event->pmu);
+
+       event->hw.state = 0;
+       left = local64_read(&event->hw.period_left);
+       write_pmc(event->hw.idx, left);
+
+       perf_event_update_userpage(event);
+       perf_pmu_enable(event->pmu);
+       local_irq_restore(flags);
+}
+
+static void power_pmu_stop(struct perf_event *event, int ef_flags)
 {
-       s64 val, left;
        unsigned long flags;
 
        if (!event->hw.idx || !event->hw.sample_period)
                return;
+
+       if (event->hw.state & PERF_HES_STOPPED)
+               return;
+
        local_irq_save(flags);
-       perf_disable();
+       perf_pmu_disable(event->pmu);
+
        power_pmu_read(event);
-       left = event->hw.sample_period;
-       event->hw.last_period = left;
-       val = 0;
-       if (left < 0x80000000L)
-               val = 0x80000000L - left;
-       write_pmc(event->hw.idx, val);
-       local64_set(&event->hw.prev_count, val);
-       local64_set(&event->hw.period_left, left);
+       event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+       write_pmc(event->hw.idx, 0);
+
        perf_event_update_userpage(event);
-       perf_enable();
+       perf_pmu_enable(event->pmu);
        local_irq_restore(flags);
 }
 
@@ -857,10 +891,11 @@ static void power_pmu_unthrottle(struct perf_event *event)
  * Set the flag to make pmu::enable() not perform the
  * schedulability test, it will be performed at commit time
  */
-void power_pmu_start_txn(const struct pmu *pmu)
+void power_pmu_start_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 
+       perf_pmu_disable(pmu);
        cpuhw->group_flag |= PERF_EVENT_TXN;
        cpuhw->n_txn_start = cpuhw->n_events;
 }
@@ -870,11 +905,12 @@ void power_pmu_start_txn(const struct pmu *pmu)
  * Clear the flag and pmu::enable() will perform the
  * schedulability test.
  */
-void power_pmu_cancel_txn(const struct pmu *pmu)
+void power_pmu_cancel_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 
        cpuhw->group_flag &= ~PERF_EVENT_TXN;
+       perf_pmu_enable(pmu);
 }
 
 /*
@@ -882,7 +918,7 @@ void power_pmu_cancel_txn(const struct pmu *pmu)
  * Perform the group schedulability test as a whole
  * Return 0 if success
  */
-int power_pmu_commit_txn(const struct pmu *pmu)
+int power_pmu_commit_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw;
        long i, n;
@@ -901,19 +937,10 @@ int power_pmu_commit_txn(const struct pmu *pmu)
                cpuhw->event[i]->hw.config = cpuhw->events[i];
 
        cpuhw->group_flag &= ~PERF_EVENT_TXN;
+       perf_pmu_enable(pmu);
        return 0;
 }
 
-struct pmu power_pmu = {
-       .enable         = power_pmu_enable,
-       .disable        = power_pmu_disable,
-       .read           = power_pmu_read,
-       .unthrottle     = power_pmu_unthrottle,
-       .start_txn      = power_pmu_start_txn,
-       .cancel_txn     = power_pmu_cancel_txn,
-       .commit_txn     = power_pmu_commit_txn,
-};
-
 /*
  * Return 1 if we might be able to put event on a limited PMC,
  * or 0 if not.
@@ -1014,7 +1041,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp)
        return 0;
 }
 
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+static int power_pmu_event_init(struct perf_event *event)
 {
        u64 ev;
        unsigned long flags;
@@ -1026,25 +1053,27 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
        struct cpu_hw_events *cpuhw;
 
        if (!ppmu)
-               return ERR_PTR(-ENXIO);
+               return -ENOENT;
+
        switch (event->attr.type) {
        case PERF_TYPE_HARDWARE:
                ev = event->attr.config;
                if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
-                       return ERR_PTR(-EOPNOTSUPP);
+                       return -EOPNOTSUPP;
                ev = ppmu->generic_events[ev];
                break;
        case PERF_TYPE_HW_CACHE:
                err = hw_perf_cache_event(event->attr.config, &ev);
                if (err)
-                       return ERR_PTR(err);
+                       return err;
                break;
        case PERF_TYPE_RAW:
                ev = event->attr.config;
                break;
        default:
-               return ERR_PTR(-EINVAL);
+               return -ENOENT;
        }
+
        event->hw.config_base = ev;
        event->hw.idx = 0;
 
@@ -1063,7 +1092,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
         * XXX we should check if the task is an idle task.
         */
        flags = 0;
-       if (event->ctx->task)
+       if (event->attach_state & PERF_ATTACH_TASK)
                flags |= PPMU_ONLY_COUNT_RUN;
 
        /*
@@ -1081,7 +1110,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
                         */
                        ev = normal_pmc_alternative(ev, flags);
                        if (!ev)
-                               return ERR_PTR(-EINVAL);
+                               return -EINVAL;
                }
        }
 
@@ -1095,19 +1124,19 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
                n = collect_events(event->group_leader, ppmu->n_counter - 1,
                                   ctrs, events, cflags);
                if (n < 0)
-                       return ERR_PTR(-EINVAL);
+                       return -EINVAL;
        }
        events[n] = ev;
        ctrs[n] = event;
        cflags[n] = flags;
        if (check_excludes(ctrs, cflags, n, 1))
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
 
        cpuhw = &get_cpu_var(cpu_hw_events);
        err = power_check_constraints(cpuhw, events, cflags, n + 1);
        put_cpu_var(cpu_hw_events);
        if (err)
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
 
        event->hw.config = events[n];
        event->hw.event_base = cflags[n];
@@ -1132,11 +1161,23 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
        }
        event->destroy = hw_perf_event_destroy;
 
-       if (err)
-               return ERR_PTR(err);
-       return &power_pmu;
+       return err;
 }
 
+struct pmu power_pmu = {
+       .pmu_enable     = power_pmu_enable,
+       .pmu_disable    = power_pmu_disable,
+       .event_init     = power_pmu_event_init,
+       .add            = power_pmu_add,
+       .del            = power_pmu_del,
+       .start          = power_pmu_start,
+       .stop           = power_pmu_stop,
+       .read           = power_pmu_read,
+       .start_txn      = power_pmu_start_txn,
+       .cancel_txn     = power_pmu_cancel_txn,
+       .commit_txn     = power_pmu_commit_txn,
+};
+
 /*
  * A counter has overflowed; update its count and record
  * things if requested.  Note that interrupts are hard-disabled
@@ -1149,6 +1190,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
        s64 prev, delta, left;
        int record = 0;
 
+       if (event->hw.state & PERF_HES_STOPPED) {
+               write_pmc(event->hw.idx, 0);
+               return;
+       }
+
        /* we don't have to worry about interrupts here */
        prev = local64_read(&event->hw.prev_count);
        delta = (val - prev) & 0xfffffffful;
@@ -1171,6 +1217,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
                        val = 0x80000000LL - left;
        }
 
+       write_pmc(event->hw.idx, val);
+       local64_set(&event->hw.prev_count, val);
+       local64_set(&event->hw.period_left, left);
+       perf_event_update_userpage(event);
+
        /*
         * Finally record data if requested.
         */
@@ -1183,23 +1234,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
                if (event->attr.sample_type & PERF_SAMPLE_ADDR)
                        perf_get_data_addr(regs, &data.addr);
 
-               if (perf_event_overflow(event, nmi, &data, regs)) {
-                       /*
-                        * Interrupts are coming too fast - throttle them
-                        * by setting the event to 0, so it will be
-                        * at least 2^30 cycles until the next interrupt
-                        * (assuming each event counts at most 2 counts
-                        * per cycle).
-                        */
-                       val = 0;
-                       left = ~0ULL >> 1;
-               }
+               if (perf_event_overflow(event, nmi, &data, regs))
+                       power_pmu_stop(event, 0);
        }
-
-       write_pmc(event->hw.idx, val);
-       local64_set(&event->hw.prev_count, val);
-       local64_set(&event->hw.period_left, left);
-       perf_event_update_userpage(event);
 }
 
 /*
@@ -1342,6 +1379,7 @@ int register_power_pmu(struct power_pmu *pmu)
                freeze_events_kernel = MMCR0_FCHV;
 #endif /* CONFIG_PPC64 */
 
+       perf_pmu_register(&power_pmu);
        perf_cpu_notifier(power_pmu_notifier);
 
        return 0;
index 1ba45471ae436617e1ecbf3654a5064ef15d1af7..7ecca59ddf77fe20bd46b470d9392cdd16fd5ba9 100644 (file)
@@ -156,6 +156,9 @@ static void fsl_emb_pmu_read(struct perf_event *event)
 {
        s64 val, delta, prev;
 
+       if (event->hw.state & PERF_HES_STOPPED)
+               return;
+
        /*
         * Performance monitor interrupts come even when interrupts
         * are soft-disabled, as long as interrupts are hard-enabled.
@@ -177,7 +180,7 @@ static void fsl_emb_pmu_read(struct perf_event *event)
  * Disable all events to prevent PMU interrupts and to allow
  * events to be added or removed.
  */
-void hw_perf_disable(void)
+static void fsl_emb_pmu_disable(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw;
        unsigned long flags;
@@ -216,7 +219,7 @@ void hw_perf_disable(void)
  * If we were previously disabled and events were added, then
  * put the new config on the PMU.
  */
-void hw_perf_enable(void)
+static void fsl_emb_pmu_enable(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw;
        unsigned long flags;
@@ -262,8 +265,8 @@ static int collect_events(struct perf_event *group, int max_count,
        return n;
 }
 
-/* perf must be disabled, context locked on entry */
-static int fsl_emb_pmu_enable(struct perf_event *event)
+/* context locked on entry */
+static int fsl_emb_pmu_add(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuhw;
        int ret = -EAGAIN;
@@ -271,6 +274,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
        u64 val;
        int i;
 
+       perf_pmu_disable(event->pmu);
        cpuhw = &get_cpu_var(cpu_hw_events);
 
        if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
@@ -301,6 +305,12 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
                        val = 0x80000000L - left;
        }
        local64_set(&event->hw.prev_count, val);
+
+       if (!(flags & PERF_EF_START)) {
+               event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+               val = 0;
+       }
+
        write_pmc(i, val);
        perf_event_update_userpage(event);
 
@@ -310,15 +320,17 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
        ret = 0;
  out:
        put_cpu_var(cpu_hw_events);
+       perf_pmu_enable(event->pmu);
        return ret;
 }
 
-/* perf must be disabled, context locked on entry */
-static void fsl_emb_pmu_disable(struct perf_event *event)
+/* context locked on entry */
+static void fsl_emb_pmu_del(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuhw;
        int i = event->hw.idx;
 
+       perf_pmu_disable(event->pmu);
        if (i < 0)
                goto out;
 
@@ -346,44 +358,57 @@ static void fsl_emb_pmu_disable(struct perf_event *event)
        cpuhw->n_events--;
 
  out:
+       perf_pmu_enable(event->pmu);
        put_cpu_var(cpu_hw_events);
 }
 
-/*
- * Re-enable interrupts on a event after they were throttled
- * because they were coming too fast.
- *
- * Context is locked on entry, but perf is not disabled.
- */
-static void fsl_emb_pmu_unthrottle(struct perf_event *event)
+static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags)
 {
-       s64 val, left;
        unsigned long flags;
+       s64 left;
 
        if (event->hw.idx < 0 || !event->hw.sample_period)
                return;
+
+       if (!(event->hw.state & PERF_HES_STOPPED))
+               return;
+
+       if (ef_flags & PERF_EF_RELOAD)
+               WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+
        local_irq_save(flags);
-       perf_disable();
-       fsl_emb_pmu_read(event);
-       left = event->hw.sample_period;
-       event->hw.last_period = left;
-       val = 0;
-       if (left < 0x80000000L)
-               val = 0x80000000L - left;
-       write_pmc(event->hw.idx, val);
-       local64_set(&event->hw.prev_count, val);
-       local64_set(&event->hw.period_left, left);
+       perf_pmu_disable(event->pmu);
+
+       event->hw.state = 0;
+       left = local64_read(&event->hw.period_left);
+       write_pmc(event->hw.idx, left);
+
        perf_event_update_userpage(event);
-       perf_enable();
+       perf_pmu_enable(event->pmu);
        local_irq_restore(flags);
 }
 
-static struct pmu fsl_emb_pmu = {
-       .enable         = fsl_emb_pmu_enable,
-       .disable        = fsl_emb_pmu_disable,
-       .read           = fsl_emb_pmu_read,
-       .unthrottle     = fsl_emb_pmu_unthrottle,
-};
+static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags)
+{
+       unsigned long flags;
+
+       if (event->hw.idx < 0 || !event->hw.sample_period)
+               return;
+
+       if (event->hw.state & PERF_HES_STOPPED)
+               return;
+
+       local_irq_save(flags);
+       perf_pmu_disable(event->pmu);
+
+       fsl_emb_pmu_read(event);
+       event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+       write_pmc(event->hw.idx, 0);
+
+       perf_event_update_userpage(event);
+       perf_pmu_enable(event->pmu);
+       local_irq_restore(flags);
+}
 
 /*
  * Release the PMU if this is the last perf_event.
@@ -428,7 +453,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp)
        return 0;
 }
 
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+static int fsl_emb_pmu_event_init(struct perf_event *event)
 {
        u64 ev;
        struct perf_event *events[MAX_HWEVENTS];
@@ -441,14 +466,14 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
        case PERF_TYPE_HARDWARE:
                ev = event->attr.config;
                if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
-                       return ERR_PTR(-EOPNOTSUPP);
+                       return -EOPNOTSUPP;
                ev = ppmu->generic_events[ev];
                break;
 
        case PERF_TYPE_HW_CACHE:
                err = hw_perf_cache_event(event->attr.config, &ev);
                if (err)
-                       return ERR_PTR(err);
+                       return err;
                break;
 
        case PERF_TYPE_RAW:
@@ -456,12 +481,12 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
                break;
 
        default:
-               return ERR_PTR(-EINVAL);
+               return -ENOENT;
        }
 
        event->hw.config = ppmu->xlate_event(ev);
        if (!(event->hw.config & FSL_EMB_EVENT_VALID))
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
 
        /*
         * If this is in a group, check if it can go on with all the
@@ -473,7 +498,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
                n = collect_events(event->group_leader,
                                   ppmu->n_counter - 1, events);
                if (n < 0)
-                       return ERR_PTR(-EINVAL);
+                       return -EINVAL;
        }
 
        if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) {
@@ -484,7 +509,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
                }
 
                if (num_restricted >= ppmu->n_restricted)
-                       return ERR_PTR(-EINVAL);
+                       return -EINVAL;
        }
 
        event->hw.idx = -1;
@@ -497,7 +522,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
        if (event->attr.exclude_kernel)
                event->hw.config_base |= PMLCA_FCS;
        if (event->attr.exclude_idle)
-               return ERR_PTR(-ENOTSUPP);
+               return -ENOTSUPP;
 
        event->hw.last_period = event->hw.sample_period;
        local64_set(&event->hw.period_left, event->hw.last_period);
@@ -523,11 +548,20 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
        }
        event->destroy = hw_perf_event_destroy;
 
-       if (err)
-               return ERR_PTR(err);
-       return &fsl_emb_pmu;
+       return err;
 }
 
+static struct pmu fsl_emb_pmu = {
+       .pmu_enable     = fsl_emb_pmu_enable,
+       .pmu_disable    = fsl_emb_pmu_disable,
+       .event_init     = fsl_emb_pmu_event_init,
+       .add            = fsl_emb_pmu_add,
+       .del            = fsl_emb_pmu_del,
+       .start          = fsl_emb_pmu_start,
+       .stop           = fsl_emb_pmu_stop,
+       .read           = fsl_emb_pmu_read,
+};
+
 /*
  * A counter has overflowed; update its count and record
  * things if requested.  Note that interrupts are hard-disabled
@@ -540,6 +574,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
        s64 prev, delta, left;
        int record = 0;
 
+       if (event->hw.state & PERF_HES_STOPPED) {
+               write_pmc(event->hw.idx, 0);
+               return;
+       }
+
        /* we don't have to worry about interrupts here */
        prev = local64_read(&event->hw.prev_count);
        delta = (val - prev) & 0xfffffffful;
@@ -562,6 +601,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
                        val = 0x80000000LL - left;
        }
 
+       write_pmc(event->hw.idx, val);
+       local64_set(&event->hw.prev_count, val);
+       local64_set(&event->hw.period_left, left);
+       perf_event_update_userpage(event);
+
        /*
         * Finally record data if requested.
         */
@@ -571,23 +615,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
                perf_sample_data_init(&data, 0);
                data.period = event->hw.last_period;
 
-               if (perf_event_overflow(event, nmi, &data, regs)) {
-                       /*
-                        * Interrupts are coming too fast - throttle them
-                        * by setting the event to 0, so it will be
-                        * at least 2^30 cycles until the next interrupt
-                        * (assuming each event counts at most 2 counts
-                        * per cycle).
-                        */
-                       val = 0;
-                       left = ~0ULL >> 1;
-               }
+               if (perf_event_overflow(event, nmi, &data, regs))
+                       fsl_emb_pmu_stop(event, 0);
        }
-
-       write_pmc(event->hw.idx, val);
-       local64_set(&event->hw.prev_count, val);
-       local64_set(&event->hw.period_left, left);
-       perf_event_update_userpage(event);
 }
 
 static void perf_event_interrupt(struct pt_regs *regs)
@@ -651,5 +681,7 @@ int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu)
        pr_info("%s performance monitor hardware support registered\n",
                pmu->name);
 
+       perf_pmu_register(&fsl_emb_pmu);
+
        return 0;
 }
index 7109f5b1baa87bd63e36aa18910c7d0bdfa40b12..2300426e531a096239b0620f9ec29f71e70b6daf 100644 (file)
@@ -138,6 +138,7 @@ static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs)
                        ti->local_flags &= ~_TLF_RESTORE_SIGMASK;
                        sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
                }
+               regs->trap = 0;
                return 0;               /* no signals delivered */
        }
 
@@ -164,6 +165,7 @@ static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs)
                ret = handle_rt_signal64(signr, &ka, &info, oldset, regs);
        }
 
+       regs->trap = 0;
        if (ret) {
                spin_lock_irq(&current->sighand->siglock);
                sigorsets(&current->blocked, &current->blocked,
index 266610119f664970c66b72832ebb571eb842b1bc..b96a3a010c26859ab93f8cca5ec74cd4905dd16b 100644 (file)
@@ -511,6 +511,7 @@ static long restore_user_regs(struct pt_regs *regs,
        if (!sig)
                save_r2 = (unsigned int)regs->gpr[2];
        err = restore_general_regs(regs, sr);
+       regs->trap = 0;
        err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
        if (!sig)
                regs->gpr[2] = (unsigned long) save_r2;
@@ -884,7 +885,6 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
        regs->nip = (unsigned long) ka->sa.sa_handler;
        /* enter the signal handler in big-endian mode */
        regs->msr &= ~MSR_LE;
-       regs->trap = 0;
        return 1;
 
 badframe:
@@ -1228,7 +1228,6 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
        regs->nip = (unsigned long) ka->sa.sa_handler;
        /* enter the signal handler in big-endian mode */
        regs->msr &= ~MSR_LE;
-       regs->trap = 0;
 
        return 1;
 
index 2fe6fc64b614ef9d1b935a472487eaf161967fd9..27c4a4584f805b83fbcf09c5478e7ec9fa7f5e7c 100644 (file)
@@ -178,7 +178,7 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
        err |= __get_user(regs->xer, &sc->gp_regs[PT_XER]);
        err |= __get_user(regs->ccr, &sc->gp_regs[PT_CCR]);
        /* skip SOFTE */
-       err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]);
+       regs->trap = 0;
        err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]);
        err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
        err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
index 8533b3b83f5d0e35b50fb39eefb663e6d81975b0..54888eb10c3b4686f5b3d29a7166afc87bc76da0 100644 (file)
@@ -53,7 +53,7 @@
 #include <linux/posix-timers.h>
 #include <linux/irq.h>
 #include <linux/delay.h>
-#include <linux/perf_event.h>
+#include <linux/irq_work.h>
 #include <asm/trace.h>
 
 #include <asm/io.h>
@@ -493,60 +493,60 @@ void __init iSeries_time_init_early(void)
 }
 #endif /* CONFIG_PPC_ISERIES */
 
-#ifdef CONFIG_PERF_EVENTS
+#ifdef CONFIG_IRQ_WORK
 
 /*
  * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
  */
 #ifdef CONFIG_PPC64
-static inline unsigned long test_perf_event_pending(void)
+static inline unsigned long test_irq_work_pending(void)
 {
        unsigned long x;
 
        asm volatile("lbz %0,%1(13)"
                : "=r" (x)
-               : "i" (offsetof(struct paca_struct, perf_event_pending)));
+               : "i" (offsetof(struct paca_struct, irq_work_pending)));
        return x;
 }
 
-static inline void set_perf_event_pending_flag(void)
+static inline void set_irq_work_pending_flag(void)
 {
        asm volatile("stb %0,%1(13)" : :
                "r" (1),
-               "i" (offsetof(struct paca_struct, perf_event_pending)));
+               "i" (offsetof(struct paca_struct, irq_work_pending)));
 }
 
-static inline void clear_perf_event_pending(void)
+static inline void clear_irq_work_pending(void)
 {
        asm volatile("stb %0,%1(13)" : :
                "r" (0),
-               "i" (offsetof(struct paca_struct, perf_event_pending)));
+               "i" (offsetof(struct paca_struct, irq_work_pending)));
 }
 
 #else /* 32-bit */
 
-DEFINE_PER_CPU(u8, perf_event_pending);
+DEFINE_PER_CPU(u8, irq_work_pending);
 
-#define set_perf_event_pending_flag()  __get_cpu_var(perf_event_pending) = 1
-#define test_perf_event_pending()      __get_cpu_var(perf_event_pending)
-#define clear_perf_event_pending()     __get_cpu_var(perf_event_pending) = 0
+#define set_irq_work_pending_flag()    __get_cpu_var(irq_work_pending) = 1
+#define test_irq_work_pending()                __get_cpu_var(irq_work_pending)
+#define clear_irq_work_pending()       __get_cpu_var(irq_work_pending) = 0
 
 #endif /* 32 vs 64 bit */
 
-void set_perf_event_pending(void)
+void set_irq_work_pending(void)
 {
        preempt_disable();
-       set_perf_event_pending_flag();
+       set_irq_work_pending_flag();
        set_dec(1);
        preempt_enable();
 }
 
-#else  /* CONFIG_PERF_EVENTS */
+#else  /* CONFIG_IRQ_WORK */
 
-#define test_perf_event_pending()      0
-#define clear_perf_event_pending()
+#define test_irq_work_pending()        0
+#define clear_irq_work_pending()
 
-#endif /* CONFIG_PERF_EVENTS */
+#endif /* CONFIG_IRQ_WORK */
 
 /*
  * For iSeries shared processors, we have to let the hypervisor
@@ -587,9 +587,9 @@ void timer_interrupt(struct pt_regs * regs)
 
        calculate_steal_time();
 
-       if (test_perf_event_pending()) {
-               clear_perf_event_pending();
-               perf_event_do_pending();
+       if (test_irq_work_pending()) {
+               clear_irq_work_pending();
+               irq_work_run();
        }
 
 #ifdef CONFIG_PPC_ISERIES
index 5b243bd3eb3b699ee6a0712340c9df51db2ed948..3dc2a8d262b8731aa4995b6c4b20621f06742fef 100644 (file)
@@ -57,7 +57,7 @@ static struct clk *mpc5121_clk_get(struct device *dev, const char *id)
        int id_match = 0;
 
        if (dev == NULL || id == NULL)
-               return NULL;
+               return clk;
 
        mutex_lock(&clocks_mutex);
        list_for_each_entry(p, &clocks, node) {
index 45c0cb9b67e6774958c621b6e3c8055d43e46163..18c10482019811fd4fa25cbf1b0972ef87d4d0c7 100644 (file)
@@ -99,7 +99,7 @@ static void __init efika_pcisetup(void)
        if (bus_range == NULL || len < 2 * sizeof(int)) {
                printk(KERN_WARNING EFIKA_PLATFORM_NAME
                       ": Can't get bus-range for %s\n", pcictrl->full_name);
-               return;
+               goto out_put;
        }
 
        if (bus_range[1] == bus_range[0])
@@ -111,12 +111,12 @@ static void __init efika_pcisetup(void)
        printk(" controlled by %s\n", pcictrl->full_name);
        printk("\n");
 
-       hose = pcibios_alloc_controller(of_node_get(pcictrl));
+       hose = pcibios_alloc_controller(pcictrl);
        if (!hose) {
                printk(KERN_WARNING EFIKA_PLATFORM_NAME
                       ": Can't allocate PCI controller structure for %s\n",
                       pcictrl->full_name);
-               return;
+               goto out_put;
        }
 
        hose->first_busno = bus_range[0];
@@ -124,6 +124,9 @@ static void __init efika_pcisetup(void)
        hose->ops = &rtas_pci_ops;
 
        pci_process_bridge_OF_ranges(hose, pcictrl, 0);
+       return;
+out_put:
+       of_node_put(pcictrl);
 }
 
 #else
index 6e905314ad5d66035daf38a514adbfaa2aa60dfa..41f3a7eda1def670c1c12864de488788fc27ff98 100644 (file)
@@ -325,12 +325,16 @@ int mpc5200_psc_ac97_gpio_reset(int psc_number)
        clrbits32(&simple_gpio->simple_dvo, sync | out);
        clrbits8(&wkup_gpio->wkup_dvo, reset);
 
-       /* wait at lease 1 us */
-       udelay(2);
+       /* wait for 1 us */
+       udelay(1);
 
        /* Deassert reset */
        setbits8(&wkup_gpio->wkup_dvo, reset);
 
+       /* wait at least 200ns */
+       /* 7 ~= (200ns * timebase) / ns2sec */
+       __delay(7);
+
        /* Restore pin-muxing */
        out_be32(&simple_gpio->port_config, mux);
 
index f0777a47e3a531c17156a76694648aaf89cacd44..75976a14194770390154ca124d9e94f95eedecf4 100644 (file)
@@ -95,6 +95,7 @@ config S390
        select HAVE_KVM if 64BIT
        select HAVE_ARCH_TRACEHOOK
        select INIT_ALL_POSSIBLE
+       select HAVE_IRQ_WORK
        select HAVE_PERF_EVENTS
        select HAVE_KERNEL_GZIP
        select HAVE_KERNEL_BZIP2
@@ -198,6 +199,13 @@ config HOTPLUG_CPU
          can be controlled through /sys/devices/system/cpu/cpu#.
          Say N if you want to disable CPU hotplug.
 
+config SCHED_BOOK
+       bool "Book scheduler support"
+       depends on SMP
+       help
+         Book scheduler support improves the CPU scheduler's decision making
+         when dealing with machines that have several books.
+
 config MATHEMU
        bool "IEEE FPU emulation"
        depends on MARCH_G5
index 104f2007f097720b339e469d37b7732c120c8168..a875c2f542e1070a120b484c96a0d2fc10fc55cb 100644 (file)
@@ -181,7 +181,7 @@ static inline int is_compat_task(void)
 
 #endif
 
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
 {
        unsigned long stack;
 
index 498bc38923856ddb1957ddd0ac934b7c7e510003..881d94590aeb4c84efb8e5c58fae68b73b70f6e5 100644 (file)
 #ifndef __ASM_HARDIRQ_H
 #define __ASM_HARDIRQ_H
 
-#include <linux/threads.h>
-#include <linux/sched.h>
-#include <linux/cache.h>
-#include <linux/interrupt.h>
 #include <asm/lowcore.h>
 
 #define local_softirq_pending() (S390_lowcore.softirq_pending)
index 3840cbe77637fe1ffef8c5baceb2c82c8ed7759d..a75f168d2718578544914c8a00eb0e1245ef9c8f 100644 (file)
@@ -4,7 +4,6 @@
  * Copyright 2009 Martin Schwidefsky, IBM Corporation.
  */
 
-static inline void set_perf_event_pending(void) {}
-static inline void clear_perf_event_pending(void) {}
+/* Empty, just to avoid compiling error */
 
 #define PERF_EVENT_INDEX_OFFSET 0
index cef66210c8466d68e13eaccce453582060c18a4f..38ddd8a9a9e877c8e4680924322b4aff792c7110 100644 (file)
@@ -97,7 +97,6 @@ static inline void restore_access_regs(unsigned int *acrs)
 
 extern void account_vtime(struct task_struct *, struct task_struct *);
 extern void account_tick_vtime(struct task_struct *);
-extern void account_system_vtime(struct task_struct *);
 
 #ifdef CONFIG_PFAULT
 extern void pfault_irq_init(void);
index 831bd033ea77b3f2c72c408f40621a82d22ac017..051107a2c5e249397f71163544ae105dbde6d302 100644 (file)
@@ -3,15 +3,32 @@
 
 #include <linux/cpumask.h>
 
-#define mc_capable()   (1)
-
-const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
-
 extern unsigned char cpu_core_id[NR_CPUS];
 extern cpumask_t cpu_core_map[NR_CPUS];
 
+static inline const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
+{
+       return &cpu_core_map[cpu];
+}
+
 #define topology_core_id(cpu)          (cpu_core_id[cpu])
 #define topology_core_cpumask(cpu)     (&cpu_core_map[cpu])
+#define mc_capable()                   (1)
+
+#ifdef CONFIG_SCHED_BOOK
+
+extern unsigned char cpu_book_id[NR_CPUS];
+extern cpumask_t cpu_book_map[NR_CPUS];
+
+static inline const struct cpumask *cpu_book_mask(unsigned int cpu)
+{
+       return &cpu_book_map[cpu];
+}
+
+#define topology_book_id(cpu)          (cpu_book_id[cpu])
+#define topology_book_cpumask(cpu)     (&cpu_book_map[cpu])
+
+#endif /* CONFIG_SCHED_BOOK */
 
 int topology_set_cpu_management(int fc);
 void topology_schedule_update(void);
@@ -30,6 +47,8 @@ static inline void s390_init_cpu_topology(void)
 };
 #endif
 
+#define SD_BOOK_INIT   SD_CPU_INIT
+
 #include <asm-generic/topology.h>
 
 #endif /* _ASM_S390_TOPOLOGY_H */
index 22cfd634c35531b7f8a1d4057f92b4af72e20d1a..f7167ee4604cf7033e30eb3845aa8f9ba9fd538b 100644 (file)
@@ -407,10 +407,9 @@ int module_finalize(const Elf_Ehdr *hdr,
 {
        vfree(me->arch.syminfo);
        me->arch.syminfo = NULL;
-       return module_bug_finalize(hdr, sechdrs, me);
+       return 0;
 }
 
 void module_arch_cleanup(struct module *mod)
 {
-       module_bug_cleanup(mod);
 }
index bcef00766a646dec0a5158bdf10c0574d8177293..13559c9938470b9d7d0c2597f6418e518d865c78 100644 (file)
@@ -57,8 +57,8 @@ struct tl_info {
        union tl_entry tle[0];
 };
 
-struct core_info {
-       struct core_info *next;
+struct mask_info {
+       struct mask_info *next;
        unsigned char id;
        cpumask_t mask;
 };
@@ -66,7 +66,6 @@ struct core_info {
 static int topology_enabled;
 static void topology_work_fn(struct work_struct *work);
 static struct tl_info *tl_info;
-static struct core_info core_info;
 static int machine_has_topology;
 static struct timer_list topology_timer;
 static void set_topology_timer(void);
@@ -74,38 +73,37 @@ static DECLARE_WORK(topology_work, topology_work_fn);
 /* topology_lock protects the core linked list */
 static DEFINE_SPINLOCK(topology_lock);
 
+static struct mask_info core_info;
 cpumask_t cpu_core_map[NR_CPUS];
 unsigned char cpu_core_id[NR_CPUS];
 
-static cpumask_t cpu_coregroup_map(unsigned int cpu)
+#ifdef CONFIG_SCHED_BOOK
+static struct mask_info book_info;
+cpumask_t cpu_book_map[NR_CPUS];
+unsigned char cpu_book_id[NR_CPUS];
+#endif
+
+static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
 {
-       struct core_info *core = &core_info;
-       unsigned long flags;
        cpumask_t mask;
 
        cpus_clear(mask);
        if (!topology_enabled || !machine_has_topology)
                return cpu_possible_map;
-       spin_lock_irqsave(&topology_lock, flags);
-       while (core) {
-               if (cpu_isset(cpu, core->mask)) {
-                       mask = core->mask;
+       while (info) {
+               if (cpu_isset(cpu, info->mask)) {
+                       mask = info->mask;
                        break;
                }
-               core = core->next;
+               info = info->next;
        }
-       spin_unlock_irqrestore(&topology_lock, flags);
        if (cpus_empty(mask))
                mask = cpumask_of_cpu(cpu);
        return mask;
 }
 
-const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
-{
-       return &cpu_core_map[cpu];
-}
-
-static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
+static void add_cpus_to_mask(struct tl_cpu *tl_cpu, struct mask_info *book,
+                            struct mask_info *core)
 {
        unsigned int cpu;
 
@@ -117,23 +115,35 @@ static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
 
                rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin;
                for_each_present_cpu(lcpu) {
-                       if (cpu_logical_map(lcpu) == rcpu) {
-                               cpu_set(lcpu, core->mask);
-                               cpu_core_id[lcpu] = core->id;
-                               smp_cpu_polarization[lcpu] = tl_cpu->pp;
-                       }
+                       if (cpu_logical_map(lcpu) != rcpu)
+                               continue;
+#ifdef CONFIG_SCHED_BOOK
+                       cpu_set(lcpu, book->mask);
+                       cpu_book_id[lcpu] = book->id;
+#endif
+                       cpu_set(lcpu, core->mask);
+                       cpu_core_id[lcpu] = core->id;
+                       smp_cpu_polarization[lcpu] = tl_cpu->pp;
                }
        }
 }
 
-static void clear_cores(void)
+static void clear_masks(void)
 {
-       struct core_info *core = &core_info;
+       struct mask_info *info;
 
-       while (core) {
-               cpus_clear(core->mask);
-               core = core->next;
+       info = &core_info;
+       while (info) {
+               cpus_clear(info->mask);
+               info = info->next;
+       }
+#ifdef CONFIG_SCHED_BOOK
+       info = &book_info;
+       while (info) {
+               cpus_clear(info->mask);
+               info = info->next;
        }
+#endif
 }
 
 static union tl_entry *next_tle(union tl_entry *tle)
@@ -146,29 +156,36 @@ static union tl_entry *next_tle(union tl_entry *tle)
 
 static void tl_to_cores(struct tl_info *info)
 {
+#ifdef CONFIG_SCHED_BOOK
+       struct mask_info *book = &book_info;
+#else
+       struct mask_info *book = NULL;
+#endif
+       struct mask_info *core = &core_info;
        union tl_entry *tle, *end;
-       struct core_info *core = &core_info;
+
 
        spin_lock_irq(&topology_lock);
-       clear_cores();
+       clear_masks();
        tle = info->tle;
        end = (union tl_entry *)((unsigned long)info + info->length);
        while (tle < end) {
                switch (tle->nl) {
-               case 5:
-               case 4:
-               case 3:
+#ifdef CONFIG_SCHED_BOOK
                case 2:
+                       book = book->next;
+                       book->id = tle->container.id;
                        break;
+#endif
                case 1:
                        core = core->next;
                        core->id = tle->container.id;
                        break;
                case 0:
-                       add_cpus_to_core(&tle->cpu, core);
+                       add_cpus_to_mask(&tle->cpu, book, core);
                        break;
                default:
-                       clear_cores();
+                       clear_masks();
                        machine_has_topology = 0;
                        goto out;
                }
@@ -221,10 +238,29 @@ int topology_set_cpu_management(int fc)
 
 static void update_cpu_core_map(void)
 {
+       unsigned long flags;
        int cpu;
 
-       for_each_possible_cpu(cpu)
-               cpu_core_map[cpu] = cpu_coregroup_map(cpu);
+       spin_lock_irqsave(&topology_lock, flags);
+       for_each_possible_cpu(cpu) {
+               cpu_core_map[cpu] = cpu_group_map(&core_info, cpu);
+#ifdef CONFIG_SCHED_BOOK
+               cpu_book_map[cpu] = cpu_group_map(&book_info, cpu);
+#endif
+       }
+       spin_unlock_irqrestore(&topology_lock, flags);
+}
+
+static void store_topology(struct tl_info *info)
+{
+#ifdef CONFIG_SCHED_BOOK
+       int rc;
+
+       rc = stsi(info, 15, 1, 3);
+       if (rc != -ENOSYS)
+               return;
+#endif
+       stsi(info, 15, 1, 2);
 }
 
 int arch_update_cpu_topology(void)
@@ -238,7 +274,7 @@ int arch_update_cpu_topology(void)
                topology_update_polarization_simple();
                return 0;
        }
-       stsi(info, 15, 1, 2);
+       store_topology(info);
        tl_to_cores(info);
        update_cpu_core_map();
        for_each_online_cpu(cpu) {
@@ -299,12 +335,24 @@ out:
 }
 __initcall(init_topology_update);
 
+static void alloc_masks(struct tl_info *info, struct mask_info *mask, int offset)
+{
+       int i, nr_masks;
+
+       nr_masks = info->mag[NR_MAG - offset];
+       for (i = 0; i < info->mnest - offset; i++)
+               nr_masks *= info->mag[NR_MAG - offset - 1 - i];
+       nr_masks = max(nr_masks, 1);
+       for (i = 0; i < nr_masks; i++) {
+               mask->next = alloc_bootmem(sizeof(struct mask_info));
+               mask = mask->next;
+       }
+}
+
 void __init s390_init_cpu_topology(void)
 {
        unsigned long long facility_bits;
        struct tl_info *info;
-       struct core_info *core;
-       int nr_cores;
        int i;
 
        if (stfle(&facility_bits, 1) <= 0)
@@ -315,25 +363,13 @@ void __init s390_init_cpu_topology(void)
 
        tl_info = alloc_bootmem_pages(PAGE_SIZE);
        info = tl_info;
-       stsi(info, 15, 1, 2);
-
-       nr_cores = info->mag[NR_MAG - 2];
-       for (i = 0; i < info->mnest - 2; i++)
-               nr_cores *= info->mag[NR_MAG - 3 - i];
-
+       store_topology(info);
        pr_info("The CPU configuration topology of the machine is:");
        for (i = 0; i < NR_MAG; i++)
                printk(" %d", info->mag[i]);
        printk(" / %d\n", info->mnest);
-
-       core = &core_info;
-       for (i = 0; i < nr_cores; i++) {
-               core->next = alloc_bootmem(sizeof(struct core_info));
-               core = core->next;
-               if (!core)
-                       goto error;
-       }
-       return;
-error:
-       machine_has_topology = 0;
+       alloc_masks(info, &core_info, 2);
+#ifdef CONFIG_SCHED_BOOK
+       alloc_masks(info, &book_info, 3);
+#endif
 }
index 33990fa95af013a64e081c4bfc411f58228f64cb..35b6879628a04feccd1dd2071846cb92563f5586 100644 (file)
@@ -16,6 +16,7 @@ config SUPERH
        select HAVE_ARCH_TRACEHOOK
        select HAVE_DMA_API_DEBUG
        select HAVE_DMA_ATTRS
+       select HAVE_IRQ_WORK
        select HAVE_PERF_EVENTS
        select PERF_USE_VMALLOC
        select HAVE_KERNEL_GZIP
@@ -249,6 +250,11 @@ config ARCH_SHMOBILE
        select PM
        select PM_RUNTIME
 
+config CPU_HAS_PMU
+       depends on CPU_SH4 || CPU_SH4A
+       default y
+       bool
+
 if SUPERH32
 
 choice
@@ -738,6 +744,14 @@ config GUSA_RB
          LLSC, this should be more efficient than the other alternative of
          disabling interrupts around the atomic sequence.
 
+config HW_PERF_EVENTS
+       bool "Enable hardware performance counter support for perf events"
+       depends on PERF_EVENTS && CPU_HAS_PMU
+       default y
+       help
+         Enable hardware performance counter support for perf events. If
+         disabled, perf events will use software events only.
+
 source "drivers/sh/Kconfig"
 
 endmenu
index 3d0c9f36d15050bb049fe8cfdbe4e7ae73f08f6d..14308bed7ea510cb6b43429887ca727cf1f0510d 100644 (file)
@@ -26,11 +26,4 @@ extern int register_sh_pmu(struct sh_pmu *);
 extern int reserve_pmc_hardware(void);
 extern void release_pmc_hardware(void);
 
-static inline void set_perf_event_pending(void)
-{
-       /* Nothing to see here, move along. */
-}
-
-#define PERF_EVENT_INDEX_OFFSET        0
-
 #endif /* __ASM_SH_PERF_EVENT_H */
index 43adddfe4c04b6d2eee9acfa8cad99a7788dfcc4..ae0be697a89e4b220f527a2bdb34f4da7ec7d318 100644 (file)
@@ -149,13 +149,11 @@ int module_finalize(const Elf_Ehdr *hdr,
        int ret = 0;
 
        ret |= module_dwarf_finalize(hdr, sechdrs, me);
-       ret |= module_bug_finalize(hdr, sechdrs, me);
 
        return ret;
 }
 
 void module_arch_cleanup(struct module *mod)
 {
-       module_bug_cleanup(mod);
        module_dwarf_cleanup(mod);
 }
index a9dd3abde28e3f45bbd7d7654e8717c13aed8f34..d5ca1ef50fa9694a1942a8c304bd2c5aa69d9388 100644 (file)
 #include <asm/unwinder.h>
 #include <asm/ptrace.h>
 
-static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
-{
-       if (entry->nr < PERF_MAX_STACK_DEPTH)
-               entry->ip[entry->nr++] = ip;
-}
 
 static void callchain_warning(void *data, char *msg)
 {
@@ -39,7 +34,7 @@ static void callchain_address(void *data, unsigned long addr, int reliable)
        struct perf_callchain_entry *entry = data;
 
        if (reliable)
-               callchain_store(entry, addr);
+               perf_callchain_store(entry, addr);
 }
 
 static const struct stacktrace_ops callchain_ops = {
@@ -49,47 +44,10 @@ static const struct stacktrace_ops callchain_ops = {
        .address        = callchain_address,
 };
 
-static void
-perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
+void
+perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
-       callchain_store(entry, PERF_CONTEXT_KERNEL);
-       callchain_store(entry, regs->pc);
+       perf_callchain_store(entry, regs->pc);
 
        unwind_stack(NULL, regs, NULL, &callchain_ops, entry);
 }
-
-static void
-perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
-{
-       int is_user;
-
-       if (!regs)
-               return;
-
-       is_user = user_mode(regs);
-
-       if (is_user && current->state != TASK_RUNNING)
-               return;
-
-       /*
-        * Only the kernel side is implemented for now.
-        */
-       if (!is_user)
-               perf_callchain_kernel(regs, entry);
-}
-
-/*
- * No need for separate IRQ and NMI entries.
- */
-static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
-
-struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
-{
-       struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
-
-       entry->nr = 0;
-
-       perf_do_callchain(regs, entry);
-
-       return entry;
-}
index 7a3dc356725839f2cf8579491efd8d02ba11b483..5a4b33435650c8ea108668d8e7e30786a20bd335 100644 (file)
@@ -59,6 +59,24 @@ static inline int sh_pmu_initialized(void)
        return !!sh_pmu;
 }
 
+const char *perf_pmu_name(void)
+{
+       if (!sh_pmu)
+               return NULL;
+
+       return sh_pmu->name;
+}
+EXPORT_SYMBOL_GPL(perf_pmu_name);
+
+int perf_num_counters(void)
+{
+       if (!sh_pmu)
+               return 0;
+
+       return sh_pmu->num_events;
+}
+EXPORT_SYMBOL_GPL(perf_num_counters);
+
 /*
  * Release the PMU if this is the last perf_event.
  */
@@ -206,50 +224,80 @@ again:
        local64_add(delta, &event->count);
 }
 
-static void sh_pmu_disable(struct perf_event *event)
+static void sh_pmu_stop(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
        int idx = hwc->idx;
 
-       clear_bit(idx, cpuc->active_mask);
-       sh_pmu->disable(hwc, idx);
+       if (!(event->hw.state & PERF_HES_STOPPED)) {
+               sh_pmu->disable(hwc, idx);
+               cpuc->events[idx] = NULL;
+               event->hw.state |= PERF_HES_STOPPED;
+       }
+
+       if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
+               sh_perf_event_update(event, &event->hw, idx);
+               event->hw.state |= PERF_HES_UPTODATE;
+       }
+}
+
+static void sh_pmu_start(struct perf_event *event, int flags)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       struct hw_perf_event *hwc = &event->hw;
+       int idx = hwc->idx;
+
+       if (WARN_ON_ONCE(idx == -1))
+               return;
+
+       if (flags & PERF_EF_RELOAD)
+               WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
 
-       barrier();
+       cpuc->events[idx] = event;
+       event->hw.state = 0;
+       sh_pmu->enable(hwc, idx);
+}
 
-       sh_perf_event_update(event, &event->hw, idx);
+static void sh_pmu_del(struct perf_event *event, int flags)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
-       cpuc->events[idx] = NULL;
-       clear_bit(idx, cpuc->used_mask);
+       sh_pmu_stop(event, PERF_EF_UPDATE);
+       __clear_bit(event->hw.idx, cpuc->used_mask);
 
        perf_event_update_userpage(event);
 }
 
-static int sh_pmu_enable(struct perf_event *event)
+static int sh_pmu_add(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
        int idx = hwc->idx;
+       int ret = -EAGAIN;
+
+       perf_pmu_disable(event->pmu);
 
-       if (test_and_set_bit(idx, cpuc->used_mask)) {
+       if (__test_and_set_bit(idx, cpuc->used_mask)) {
                idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events);
                if (idx == sh_pmu->num_events)
-                       return -EAGAIN;
+                       goto out;
 
-               set_bit(idx, cpuc->used_mask);
+               __set_bit(idx, cpuc->used_mask);
                hwc->idx = idx;
        }
 
        sh_pmu->disable(hwc, idx);
 
-       cpuc->events[idx] = event;
-       set_bit(idx, cpuc->active_mask);
-
-       sh_pmu->enable(hwc, idx);
+       event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+       if (flags & PERF_EF_START)
+               sh_pmu_start(event, PERF_EF_RELOAD);
 
        perf_event_update_userpage(event);
-
-       return 0;
+       ret = 0;
+out:
+       perf_pmu_enable(event->pmu);
+       return ret;
 }
 
 static void sh_pmu_read(struct perf_event *event)
@@ -257,24 +305,56 @@ static void sh_pmu_read(struct perf_event *event)
        sh_perf_event_update(event, &event->hw, event->hw.idx);
 }
 
-static const struct pmu pmu = {
-       .enable         = sh_pmu_enable,
-       .disable        = sh_pmu_disable,
-       .read           = sh_pmu_read,
-};
-
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+static int sh_pmu_event_init(struct perf_event *event)
 {
-       int err = __hw_perf_event_init(event);
+       int err;
+
+       switch (event->attr.type) {
+       case PERF_TYPE_RAW:
+       case PERF_TYPE_HW_CACHE:
+       case PERF_TYPE_HARDWARE:
+               err = __hw_perf_event_init(event);
+               break;
+
+       default:
+               return -ENOENT;
+       }
+
        if (unlikely(err)) {
                if (event->destroy)
                        event->destroy(event);
-               return ERR_PTR(err);
        }
 
-       return &pmu;
+       return err;
+}
+
+static void sh_pmu_enable(struct pmu *pmu)
+{
+       if (!sh_pmu_initialized())
+               return;
+
+       sh_pmu->enable_all();
+}
+
+static void sh_pmu_disable(struct pmu *pmu)
+{
+       if (!sh_pmu_initialized())
+               return;
+
+       sh_pmu->disable_all();
 }
 
+static struct pmu pmu = {
+       .pmu_enable     = sh_pmu_enable,
+       .pmu_disable    = sh_pmu_disable,
+       .event_init     = sh_pmu_event_init,
+       .add            = sh_pmu_add,
+       .del            = sh_pmu_del,
+       .start          = sh_pmu_start,
+       .stop           = sh_pmu_stop,
+       .read           = sh_pmu_read,
+};
+
 static void sh_pmu_setup(int cpu)
 {
        struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
@@ -299,32 +379,17 @@ sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
        return NOTIFY_OK;
 }
 
-void hw_perf_enable(void)
-{
-       if (!sh_pmu_initialized())
-               return;
-
-       sh_pmu->enable_all();
-}
-
-void hw_perf_disable(void)
-{
-       if (!sh_pmu_initialized())
-               return;
-
-       sh_pmu->disable_all();
-}
-
-int __cpuinit register_sh_pmu(struct sh_pmu *pmu)
+int __cpuinit register_sh_pmu(struct sh_pmu *_pmu)
 {
        if (sh_pmu)
                return -EBUSY;
-       sh_pmu = pmu;
+       sh_pmu = _pmu;
 
-       pr_info("Performance Events: %s support registered\n", pmu->name);
+       pr_info("Performance Events: %s support registered\n", _pmu->name);
 
-       WARN_ON(pmu->num_events > MAX_HWEVENTS);
+       WARN_ON(_pmu->num_events > MAX_HWEVENTS);
 
+       perf_pmu_register(&pmu);
        perf_cpu_notifier(sh_pmu_notifier);
        return 0;
 }
index 4886c5c1786c24dbacadccd492a9dbf5fa8bd075..e85aae73e3dcf12a4ef1bb06b344b901efd54c6f 100644 (file)
@@ -6,4 +6,8 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
                oprofilefs.o oprofile_stats.o \
                timer_int.o )
 
+ifeq ($(CONFIG_HW_PERF_EVENTS),y)
+DRIVER_OBJS += $(addprefix ../../../drivers/oprofile/, oprofile_perf.o)
+endif
+
 oprofile-y     := $(DRIVER_OBJS) common.o backtrace.o
index ac604937f3ee16fde018d6f5b0c094a189f4d2b5..e10d89376f9b79140adaa44f7fb63f0681604482 100644 (file)
 #include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/smp.h>
+#include <linux/perf_event.h>
 #include <asm/processor.h>
-#include "op_impl.h"
-
-static struct op_sh_model *model;
-
-static struct op_counter_config ctr[20];
 
+#ifdef CONFIG_HW_PERF_EVENTS
 extern void sh_backtrace(struct pt_regs * const regs, unsigned int depth);
 
-static int op_sh_setup(void)
-{
-       /* Pre-compute the values to stuff in the hardware registers.  */
-       model->reg_setup(ctr);
-
-       /* Configure the registers on all cpus.  */
-       on_each_cpu(model->cpu_setup, NULL, 1);
-
-        return 0;
-}
-
-static int op_sh_create_files(struct super_block *sb, struct dentry *root)
+char *op_name_from_perf_id(void)
 {
-       int i, ret = 0;
+       const char *pmu;
+       char buf[20];
+       int size;
 
-       for (i = 0; i < model->num_counters; i++) {
-               struct dentry *dir;
-               char buf[4];
+       pmu = perf_pmu_name();
+       if (!pmu)
+               return NULL;
 
-               snprintf(buf, sizeof(buf), "%d", i);
-               dir = oprofilefs_mkdir(sb, root, buf);
+       size = snprintf(buf, sizeof(buf), "sh/%s", pmu);
+       if (size > -1 && size < sizeof(buf))
+               return buf;
 
-               ret |= oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
-               ret |= oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
-               ret |= oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
-               ret |= oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
-
-               if (model->create_files)
-                       ret |= model->create_files(sb, dir);
-               else
-                       ret |= oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
-
-               /* Dummy entries */
-               ret |= oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
-       }
-
-       return ret;
+       return NULL;
 }
 
-static int op_sh_start(void)
+int __init oprofile_arch_init(struct oprofile_operations *ops)
 {
-       /* Enable performance monitoring for all counters.  */
-       on_each_cpu(model->cpu_start, NULL, 1);
+       ops->backtrace = sh_backtrace;
 
-       return 0;
+       return oprofile_perf_init(ops);
 }
 
-static void op_sh_stop(void)
+void __exit oprofile_arch_exit(void)
 {
-       /* Disable performance monitoring for all counters.  */
-       on_each_cpu(model->cpu_stop, NULL, 1);
+       oprofile_perf_exit();
 }
-
+#else
 int __init oprofile_arch_init(struct oprofile_operations *ops)
 {
-       struct op_sh_model *lmodel = NULL;
-       int ret;
-
-       /*
-        * Always assign the backtrace op. If the counter initialization
-        * fails, we fall back to the timer which will still make use of
-        * this.
-        */
-       ops->backtrace = sh_backtrace;
-
-       /*
-        * XXX
-        *
-        * All of the SH7750/SH-4A counters have been converted to perf,
-        * this infrastructure hook is left for other users until they've
-        * had a chance to convert over, at which point all of this
-        * will be deleted.
-        */
-
-       if (!lmodel)
-               return -ENODEV;
-       if (!(current_cpu_data.flags & CPU_HAS_PERF_COUNTER))
-               return -ENODEV;
-
-       ret = lmodel->init();
-       if (unlikely(ret != 0))
-               return ret;
-
-       model = lmodel;
-
-       ops->setup              = op_sh_setup;
-       ops->create_files       = op_sh_create_files;
-       ops->start              = op_sh_start;
-       ops->stop               = op_sh_stop;
-       ops->cpu_type           = lmodel->cpu_type;
-
-       printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
-              lmodel->cpu_type);
-
-       return 0;
-}
-
-void oprofile_arch_exit(void)
-{
-       if (model && model->exit)
-               model->exit();
+       pr_info("oprofile: hardware counters not available\n");
+       return -ENODEV;
 }
+void __exit oprofile_arch_exit(void) {}
+#endif /* CONFIG_HW_PERF_EVENTS */
diff --git a/arch/sh/oprofile/op_impl.h b/arch/sh/oprofile/op_impl.h
deleted file mode 100644 (file)
index 1244479..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef __OP_IMPL_H
-#define __OP_IMPL_H
-
-/* Per-counter configuration as set via oprofilefs.  */
-struct op_counter_config {
-       unsigned long enabled;
-       unsigned long event;
-
-       unsigned long count;
-
-       /* Dummy values for userspace tool compliance */
-       unsigned long kernel;
-       unsigned long user;
-       unsigned long unit_mask;
-};
-
-/* Per-architecture configury and hooks.  */
-struct op_sh_model {
-       void (*reg_setup)(struct op_counter_config *);
-       int (*create_files)(struct super_block *sb, struct dentry *dir);
-       void (*cpu_setup)(void *dummy);
-       int (*init)(void);
-       void (*exit)(void);
-       void (*cpu_start)(void *args);
-       void (*cpu_stop)(void *args);
-       char *cpu_type;
-       unsigned char num_counters;
-};
-
-/* arch/sh/oprofile/common.c */
-extern void sh_backtrace(struct pt_regs * const regs, unsigned int depth);
-
-#endif /* __OP_IMPL_H */
index 491e9d6de1912d56cea2b21f51bf8cfd7980e278..3e9d31401fb24e9dd1b6b3e67add09a4ea7f1468 100644 (file)
@@ -26,10 +26,12 @@ config SPARC
        select ARCH_WANT_OPTIONAL_GPIOLIB
        select RTC_CLASS
        select RTC_DRV_M48T59
+       select HAVE_IRQ_WORK
        select HAVE_PERF_EVENTS
        select PERF_USE_VMALLOC
        select HAVE_DMA_ATTRS
        select HAVE_DMA_API_DEBUG
+       select HAVE_ARCH_JUMP_LABEL
 
 config SPARC32
        def_bool !64BIT
@@ -53,6 +55,7 @@ config SPARC64
        select RTC_DRV_BQ4802
        select RTC_DRV_SUN4V
        select RTC_DRV_STARFIRE
+       select HAVE_IRQ_WORK
        select HAVE_PERF_EVENTS
        select PERF_USE_VMALLOC
 
index 5016f76ea98a6f38510d3dc5a8593a1d871d4f53..6f57325bb883c5553b4781691cd699952bb90ff8 100644 (file)
@@ -167,7 +167,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
        return (u32)(unsigned long)uptr;
 }
 
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
 {
        struct pt_regs *regs = current_thread_info()->kregs;
        unsigned long usp = regs->u_regs[UREG_I6];
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
new file mode 100644 (file)
index 0000000..62e66d7
--- /dev/null
@@ -0,0 +1,32 @@
+#ifndef _ASM_SPARC_JUMP_LABEL_H
+#define _ASM_SPARC_JUMP_LABEL_H
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <asm/system.h>
+
+#define JUMP_LABEL_NOP_SIZE 4
+
+#define JUMP_LABEL(key, label)                                 \
+       do {                                                    \
+               asm goto("1:\n\t"                               \
+                        "nop\n\t"                              \
+                        "nop\n\t"                              \
+                        ".pushsection __jump_table,  \"a\"\n\t"\
+                        ".word 1b, %l[" #label "], %c0\n\t"    \
+                        ".popsection \n\t"                     \
+                        : :  "i" (key) :  : label);\
+       } while (0)
+
+#endif /* __KERNEL__ */
+
+typedef u32 jump_label_t;
+
+struct jump_entry {
+       jump_label_t code;
+       jump_label_t target;
+       jump_label_t key;
+};
+
+#endif
index 727af70646cbddff92f36e6c47ad782c9018ea11..6e8bfa1786dab1f45d3dff5a1dcacc31a08d4844 100644 (file)
@@ -1,10 +1,6 @@
 #ifndef __ASM_SPARC_PERF_EVENT_H
 #define __ASM_SPARC_PERF_EVENT_H
 
-extern void set_perf_event_pending(void);
-
-#define        PERF_EVENT_INDEX_OFFSET 0
-
 #ifdef CONFIG_PERF_EVENTS
 #include <asm/ptrace.h>
 
index 0c2dc1f24a9a74adb05299a89ee07f8ed2bc1eb9..599398fbbc7cb78fd2f8849400d9092a95ffa786 100644 (file)
@@ -119,3 +119,5 @@ obj-$(CONFIG_COMPAT)    += $(audit--y)
 
 pc--$(CONFIG_PERF_EVENTS) := perf_event.o
 obj-$(CONFIG_SPARC64)  += $(pc--y)
+
+obj-$(CONFIG_SPARC64)  += jump_label.o
diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
new file mode 100644 (file)
index 0000000..ea2dafc
--- /dev/null
@@ -0,0 +1,47 @@
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/cpu.h>
+
+#include <linux/jump_label.h>
+#include <linux/memory.h>
+
+#ifdef HAVE_JUMP_LABEL
+
+void arch_jump_label_transform(struct jump_entry *entry,
+                              enum jump_label_type type)
+{
+       u32 val;
+       u32 *insn = (u32 *) (unsigned long) entry->code;
+
+       if (type == JUMP_LABEL_ENABLE) {
+               s32 off = (s32)entry->target - (s32)entry->code;
+
+#ifdef CONFIG_SPARC64
+               /* ba,pt %xcc, . + (off << 2) */
+               val = 0x10680000 | ((u32) off >> 2);
+#else
+               /* ba . + (off << 2) */
+               val = 0x10800000 | ((u32) off >> 2);
+#endif
+       } else {
+               val = 0x01000000;
+       }
+
+       get_online_cpus();
+       mutex_lock(&text_mutex);
+       *insn = val;
+       flushi(insn);
+       mutex_unlock(&text_mutex);
+       put_online_cpus();
+}
+
+void arch_jump_label_text_poke_early(jump_label_t addr)
+{
+       u32 *insn_p = (u32 *) (unsigned long) addr;
+
+       *insn_p = 0x01000000;
+       flushi(insn_p);
+}
+
+#endif
index f848aadf54dc1c2c1feb537752fdf3410efbe9d8..ee3c7dde8d9fbd5af21f5dbf555628eff1c64b9c 100644 (file)
@@ -18,6 +18,9 @@
 #include <asm/spitfire.h>
 
 #ifdef CONFIG_SPARC64
+
+#include <linux/jump_label.h>
+
 static void *module_map(unsigned long size)
 {
        struct vm_struct *area;
@@ -227,6 +230,9 @@ int module_finalize(const Elf_Ehdr *hdr,
                    const Elf_Shdr *sechdrs,
                    struct module *me)
 {
+       /* make jump label nops */
+       jump_label_apply_nops(me);
+
        /* Cheetah's I-cache is fully coherent.  */
        if (tlb_type == spitfire) {
                unsigned long va;
index c4a6a50b4849a64c0f98759921267fbe76c3cfe7..b87873c0e8ea5f72be8ffe784e1035911b05a68c 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/init.h>
 #include <linux/irq.h>
 
-#include <linux/perf_event.h>
+#include <linux/irq_work.h>
 #include <linux/ftrace.h>
 
 #include <asm/pil.h>
@@ -43,14 +43,14 @@ void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
 
        old_regs = set_irq_regs(regs);
        irq_enter();
-#ifdef CONFIG_PERF_EVENTS
-       perf_event_do_pending();
+#ifdef CONFIG_IRQ_WORK
+       irq_work_run();
 #endif
        irq_exit();
        set_irq_regs(old_regs);
 }
 
-void set_perf_event_pending(void)
+void arch_irq_work_raise(void)
 {
        set_softint(1 << PIL_DEFERRED_PCR_WORK);
 }
index 357ced3c33ffac87a992e01b6820a77084cfb8de..0d6deb55a2ae7e4189b5ab60aec81cd8df28adb6 100644 (file)
@@ -658,13 +658,16 @@ static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
 
                enc = perf_event_get_enc(cpuc->events[i]);
                pcr &= ~mask_for_index(idx);
-               pcr |= event_encoding(enc, idx);
+               if (hwc->state & PERF_HES_STOPPED)
+                       pcr |= nop_for_index(idx);
+               else
+                       pcr |= event_encoding(enc, idx);
        }
 out:
        return pcr;
 }
 
-void hw_perf_enable(void)
+static void sparc_pmu_enable(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        u64 pcr;
@@ -691,7 +694,7 @@ void hw_perf_enable(void)
        pcr_ops->write(cpuc->pcr);
 }
 
-void hw_perf_disable(void)
+static void sparc_pmu_disable(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        u64 val;
@@ -710,19 +713,65 @@ void hw_perf_disable(void)
        pcr_ops->write(cpuc->pcr);
 }
 
-static void sparc_pmu_disable(struct perf_event *event)
+static int active_event_index(struct cpu_hw_events *cpuc,
+                             struct perf_event *event)
+{
+       int i;
+
+       for (i = 0; i < cpuc->n_events; i++) {
+               if (cpuc->event[i] == event)
+                       break;
+       }
+       BUG_ON(i == cpuc->n_events);
+       return cpuc->current_idx[i];
+}
+
+static void sparc_pmu_start(struct perf_event *event, int flags)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       int idx = active_event_index(cpuc, event);
+
+       if (flags & PERF_EF_RELOAD) {
+               WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+               sparc_perf_event_set_period(event, &event->hw, idx);
+       }
+
+       event->hw.state = 0;
+
+       sparc_pmu_enable_event(cpuc, &event->hw, idx);
+}
+
+static void sparc_pmu_stop(struct perf_event *event, int flags)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       int idx = active_event_index(cpuc, event);
+
+       if (!(event->hw.state & PERF_HES_STOPPED)) {
+               sparc_pmu_disable_event(cpuc, &event->hw, idx);
+               event->hw.state |= PERF_HES_STOPPED;
+       }
+
+       if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) {
+               sparc_perf_event_update(event, &event->hw, idx);
+               event->hw.state |= PERF_HES_UPTODATE;
+       }
+}
+
+static void sparc_pmu_del(struct perf_event *event, int _flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-       struct hw_perf_event *hwc = &event->hw;
        unsigned long flags;
        int i;
 
        local_irq_save(flags);
-       perf_disable();
+       perf_pmu_disable(event->pmu);
 
        for (i = 0; i < cpuc->n_events; i++) {
                if (event == cpuc->event[i]) {
-                       int idx = cpuc->current_idx[i];
+                       /* Absorb the final count and turn off the
+                        * event.
+                        */
+                       sparc_pmu_stop(event, PERF_EF_UPDATE);
 
                        /* Shift remaining entries down into
                         * the existing slot.
@@ -734,13 +783,6 @@ static void sparc_pmu_disable(struct perf_event *event)
                                        cpuc->current_idx[i];
                        }
 
-                       /* Absorb the final count and turn off the
-                        * event.
-                        */
-                       sparc_pmu_disable_event(cpuc, hwc, idx);
-                       barrier();
-                       sparc_perf_event_update(event, hwc, idx);
-
                        perf_event_update_userpage(event);
 
                        cpuc->n_events--;
@@ -748,23 +790,10 @@ static void sparc_pmu_disable(struct perf_event *event)
                }
        }
 
-       perf_enable();
+       perf_pmu_enable(event->pmu);
        local_irq_restore(flags);
 }
 
-static int active_event_index(struct cpu_hw_events *cpuc,
-                             struct perf_event *event)
-{
-       int i;
-
-       for (i = 0; i < cpuc->n_events; i++) {
-               if (cpuc->event[i] == event)
-                       break;
-       }
-       BUG_ON(i == cpuc->n_events);
-       return cpuc->current_idx[i];
-}
-
 static void sparc_pmu_read(struct perf_event *event)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -774,15 +803,6 @@ static void sparc_pmu_read(struct perf_event *event)
        sparc_perf_event_update(event, hwc, idx);
 }
 
-static void sparc_pmu_unthrottle(struct perf_event *event)
-{
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-       int idx = active_event_index(cpuc, event);
-       struct hw_perf_event *hwc = &event->hw;
-
-       sparc_pmu_enable_event(cpuc, hwc, idx);
-}
-
 static atomic_t active_events = ATOMIC_INIT(0);
 static DEFINE_MUTEX(pmc_grab_mutex);
 
@@ -877,7 +897,7 @@ static int sparc_check_constraints(struct perf_event **evts,
        if (!n_ev)
                return 0;
 
-       if (n_ev > perf_max_events)
+       if (n_ev > MAX_HWEVENTS)
                return -1;
 
        msk0 = perf_event_get_msk(events[0]);
@@ -984,23 +1004,27 @@ static int collect_events(struct perf_event *group, int max_count,
        return n;
 }
 
-static int sparc_pmu_enable(struct perf_event *event)
+static int sparc_pmu_add(struct perf_event *event, int ef_flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int n0, ret = -EAGAIN;
        unsigned long flags;
 
        local_irq_save(flags);
-       perf_disable();
+       perf_pmu_disable(event->pmu);
 
        n0 = cpuc->n_events;
-       if (n0 >= perf_max_events)
+       if (n0 >= MAX_HWEVENTS)
                goto out;
 
        cpuc->event[n0] = event;
        cpuc->events[n0] = event->hw.event_base;
        cpuc->current_idx[n0] = PIC_NO_INDEX;
 
+       event->hw.state = PERF_HES_UPTODATE;
+       if (!(ef_flags & PERF_EF_START))
+               event->hw.state |= PERF_HES_STOPPED;
+
        /*
         * If group events scheduling transaction was started,
         * skip the schedulability test here, it will be peformed
@@ -1020,12 +1044,12 @@ nocheck:
 
        ret = 0;
 out:
-       perf_enable();
+       perf_pmu_enable(event->pmu);
        local_irq_restore(flags);
        return ret;
 }
 
-static int __hw_perf_event_init(struct perf_event *event)
+static int sparc_pmu_event_init(struct perf_event *event)
 {
        struct perf_event_attr *attr = &event->attr;
        struct perf_event *evts[MAX_HWEVENTS];
@@ -1038,16 +1062,37 @@ static int __hw_perf_event_init(struct perf_event *event)
        if (atomic_read(&nmi_active) < 0)
                return -ENODEV;
 
-       if (attr->type == PERF_TYPE_HARDWARE) {
+       switch (attr->type) {
+       case PERF_TYPE_HARDWARE:
                if (attr->config >= sparc_pmu->max_events)
                        return -EINVAL;
                pmap = sparc_pmu->event_map(attr->config);
-       } else if (attr->type == PERF_TYPE_HW_CACHE) {
+               break;
+
+       case PERF_TYPE_HW_CACHE:
                pmap = sparc_map_cache_event(attr->config);
                if (IS_ERR(pmap))
                        return PTR_ERR(pmap);
-       } else
-               return -EOPNOTSUPP;
+               break;
+
+       case PERF_TYPE_RAW:
+               pmap = NULL;
+               break;
+
+       default:
+               return -ENOENT;
+
+       }
+
+       if (pmap) {
+               hwc->event_base = perf_event_encode(pmap);
+       } else {
+               /*
+                * User gives us "(encoding << 16) | pic_mask" for
+                * PERF_TYPE_RAW events.
+                */
+               hwc->event_base = attr->config;
+       }
 
        /* We save the enable bits in the config_base.  */
        hwc->config_base = sparc_pmu->irq_bit;
@@ -1058,12 +1103,10 @@ static int __hw_perf_event_init(struct perf_event *event)
        if (!attr->exclude_hv)
                hwc->config_base |= sparc_pmu->hv_bit;
 
-       hwc->event_base = perf_event_encode(pmap);
-
        n = 0;
        if (event->group_leader != event) {
                n = collect_events(event->group_leader,
-                                  perf_max_events - 1,
+                                  MAX_HWEVENTS - 1,
                                   evts, events, current_idx_dmy);
                if (n < 0)
                        return -EINVAL;
@@ -1099,10 +1142,11 @@ static int __hw_perf_event_init(struct perf_event *event)
  * Set the flag to make pmu::enable() not perform the
  * schedulability test, it will be performed at commit time
  */
-static void sparc_pmu_start_txn(const struct pmu *pmu)
+static void sparc_pmu_start_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 
+       perf_pmu_disable(pmu);
        cpuhw->group_flag |= PERF_EVENT_TXN;
 }
 
@@ -1111,11 +1155,12 @@ static void sparc_pmu_start_txn(const struct pmu *pmu)
  * Clear the flag and pmu::enable() will perform the
  * schedulability test.
  */
-static void sparc_pmu_cancel_txn(const struct pmu *pmu)
+static void sparc_pmu_cancel_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 
        cpuhw->group_flag &= ~PERF_EVENT_TXN;
+       perf_pmu_enable(pmu);
 }
 
 /*
@@ -1123,7 +1168,7 @@ static void sparc_pmu_cancel_txn(const struct pmu *pmu)
  * Perform the group schedulability test as a whole
  * Return 0 if success
  */
-static int sparc_pmu_commit_txn(const struct pmu *pmu)
+static int sparc_pmu_commit_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int n;
@@ -1139,28 +1184,24 @@ static int sparc_pmu_commit_txn(const struct pmu *pmu)
                return -EAGAIN;
 
        cpuc->group_flag &= ~PERF_EVENT_TXN;
+       perf_pmu_enable(pmu);
        return 0;
 }
 
-static const struct pmu pmu = {
-       .enable         = sparc_pmu_enable,
-       .disable        = sparc_pmu_disable,
+static struct pmu pmu = {
+       .pmu_enable     = sparc_pmu_enable,
+       .pmu_disable    = sparc_pmu_disable,
+       .event_init     = sparc_pmu_event_init,
+       .add            = sparc_pmu_add,
+       .del            = sparc_pmu_del,
+       .start          = sparc_pmu_start,
+       .stop           = sparc_pmu_stop,
        .read           = sparc_pmu_read,
-       .unthrottle     = sparc_pmu_unthrottle,
        .start_txn      = sparc_pmu_start_txn,
        .cancel_txn     = sparc_pmu_cancel_txn,
        .commit_txn     = sparc_pmu_commit_txn,
 };
 
-const struct pmu *hw_perf_event_init(struct perf_event *event)
-{
-       int err = __hw_perf_event_init(event);
-
-       if (err)
-               return ERR_PTR(err);
-       return &pmu;
-}
-
 void perf_event_print_debug(void)
 {
        unsigned long flags;
@@ -1236,7 +1277,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
                        continue;
 
                if (perf_event_overflow(event, 1, &data, regs))
-                       sparc_pmu_disable_event(cpuc, hwc, idx);
+                       sparc_pmu_stop(event, 0);
        }
 
        return NOTIFY_STOP;
@@ -1277,28 +1318,21 @@ void __init init_hw_perf_events(void)
 
        pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
 
-       /* All sparc64 PMUs currently have 2 events.  */
-       perf_max_events = 2;
-
+       perf_pmu_register(&pmu);
        register_die_notifier(&perf_event_nmi_notifier);
 }
 
-static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
-{
-       if (entry->nr < PERF_MAX_STACK_DEPTH)
-               entry->ip[entry->nr++] = ip;
-}
-
-static void perf_callchain_kernel(struct pt_regs *regs,
-                                 struct perf_callchain_entry *entry)
+void perf_callchain_kernel(struct perf_callchain_entry *entry,
+                          struct pt_regs *regs)
 {
        unsigned long ksp, fp;
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
        int graph = 0;
 #endif
 
-       callchain_store(entry, PERF_CONTEXT_KERNEL);
-       callchain_store(entry, regs->tpc);
+       stack_trace_flush();
+
+       perf_callchain_store(entry, regs->tpc);
 
        ksp = regs->u_regs[UREG_I6];
        fp = ksp + STACK_BIAS;
@@ -1322,13 +1356,13 @@ static void perf_callchain_kernel(struct pt_regs *regs,
                        pc = sf->callers_pc;
                        fp = (unsigned long)sf->fp + STACK_BIAS;
                }
-               callchain_store(entry, pc);
+               perf_callchain_store(entry, pc);
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
                if ((pc + 8UL) == (unsigned long) &return_to_handler) {
                        int index = current->curr_ret_stack;
                        if (current->ret_stack && index >= graph) {
                                pc = current->ret_stack[index - graph].ret;
-                               callchain_store(entry, pc);
+                               perf_callchain_store(entry, pc);
                                graph++;
                        }
                }
@@ -1336,13 +1370,12 @@ static void perf_callchain_kernel(struct pt_regs *regs,
        } while (entry->nr < PERF_MAX_STACK_DEPTH);
 }
 
-static void perf_callchain_user_64(struct pt_regs *regs,
-                                  struct perf_callchain_entry *entry)
+static void perf_callchain_user_64(struct perf_callchain_entry *entry,
+                                  struct pt_regs *regs)
 {
        unsigned long ufp;
 
-       callchain_store(entry, PERF_CONTEXT_USER);
-       callchain_store(entry, regs->tpc);
+       perf_callchain_store(entry, regs->tpc);
 
        ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
        do {
@@ -1355,17 +1388,16 @@ static void perf_callchain_user_64(struct pt_regs *regs,
 
                pc = sf.callers_pc;
                ufp = (unsigned long)sf.fp + STACK_BIAS;
-               callchain_store(entry, pc);
+               perf_callchain_store(entry, pc);
        } while (entry->nr < PERF_MAX_STACK_DEPTH);
 }
 
-static void perf_callchain_user_32(struct pt_regs *regs,
-                                  struct perf_callchain_entry *entry)
+static void perf_callchain_user_32(struct perf_callchain_entry *entry,
+                                  struct pt_regs *regs)
 {
        unsigned long ufp;
 
-       callchain_store(entry, PERF_CONTEXT_USER);
-       callchain_store(entry, regs->tpc);
+       perf_callchain_store(entry, regs->tpc);
 
        ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
        do {
@@ -1378,34 +1410,16 @@ static void perf_callchain_user_32(struct pt_regs *regs,
 
                pc = sf.callers_pc;
                ufp = (unsigned long)sf.fp;
-               callchain_store(entry, pc);
+               perf_callchain_store(entry, pc);
        } while (entry->nr < PERF_MAX_STACK_DEPTH);
 }
 
-/* Like powerpc we can't get PMU interrupts within the PMU handler,
- * so no need for separate NMI and IRQ chains as on x86.
- */
-static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
-
-struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+void
+perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
-       struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
-
-       entry->nr = 0;
-       if (!user_mode(regs)) {
-               stack_trace_flush();
-               perf_callchain_kernel(regs, entry);
-               if (current->mm)
-                       regs = task_pt_regs(current);
-               else
-                       regs = NULL;
-       }
-       if (regs) {
-               flushw_user();
-               if (test_thread_flag(TIF_32BIT))
-                       perf_callchain_user_32(regs, entry);
-               else
-                       perf_callchain_user_64(regs, entry);
-       }
-       return entry;
+       flushw_user();
+       if (test_thread_flag(TIF_32BIT))
+               perf_callchain_user_32(entry, regs);
+       else
+               perf_callchain_user_64(entry, regs);
 }
index ea22cd373c64f4bc371478d2b7be14f71cd32560..75fad425e249bc40559f98d14ead5699839bbbb8 100644 (file)
@@ -453,8 +453,66 @@ static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
        return err;
 }
 
-static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
-                         int signo, sigset_t *oldset)
+/* The I-cache flush instruction only works in the primary ASI, which
+ * right now is the nucleus, aka. kernel space.
+ *
+ * Therefore we have to kick the instructions out using the kernel
+ * side linear mapping of the physical address backing the user
+ * instructions.
+ */
+static void flush_signal_insns(unsigned long address)
+{
+       unsigned long pstate, paddr;
+       pte_t *ptep, pte;
+       pgd_t *pgdp;
+       pud_t *pudp;
+       pmd_t *pmdp;
+
+       /* Commit all stores of the instructions we are about to flush.  */
+       wmb();
+
+       /* Disable cross-call reception.  In this way even a very wide
+        * munmap() on another cpu can't tear down the page table
+        * hierarchy from underneath us, since that can't complete
+        * until the IPI tlb flush returns.
+        */
+
+       __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
+       __asm__ __volatile__("wrpr %0, %1, %%pstate"
+                               : : "r" (pstate), "i" (PSTATE_IE));
+
+       pgdp = pgd_offset(current->mm, address);
+       if (pgd_none(*pgdp))
+               goto out_irqs_on;
+       pudp = pud_offset(pgdp, address);
+       if (pud_none(*pudp))
+               goto out_irqs_on;
+       pmdp = pmd_offset(pudp, address);
+       if (pmd_none(*pmdp))
+               goto out_irqs_on;
+
+       ptep = pte_offset_map(pmdp, address);
+       pte = *ptep;
+       if (!pte_present(pte))
+               goto out_unmap;
+
+       paddr = (unsigned long) page_address(pte_page(pte));
+
+       __asm__ __volatile__("flush     %0 + %1"
+                            : /* no outputs */
+                            : "r" (paddr),
+                              "r" (address & (PAGE_SIZE - 1))
+                            : "memory");
+
+out_unmap:
+       pte_unmap(ptep);
+out_irqs_on:
+       __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
+
+}
+
+static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
+                        int signo, sigset_t *oldset)
 {
        struct signal_frame32 __user *sf;
        int sigframe_size;
@@ -547,13 +605,7 @@ static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
        if (ka->ka_restorer) {
                regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
        } else {
-               /* Flush instruction space. */
                unsigned long address = ((unsigned long)&(sf->insns[0]));
-               pgd_t *pgdp = pgd_offset(current->mm, address);
-               pud_t *pudp = pud_offset(pgdp, address);
-               pmd_t *pmdp = pmd_offset(pudp, address);
-               pte_t *ptep;
-               pte_t pte;
 
                regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
        
@@ -562,34 +614,22 @@ static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
                if (err)
                        goto sigsegv;
 
-               preempt_disable();
-               ptep = pte_offset_map(pmdp, address);
-               pte = *ptep;
-               if (pte_present(pte)) {
-                       unsigned long page = (unsigned long)
-                               page_address(pte_page(pte));
-
-                       wmb();
-                       __asm__ __volatile__("flush     %0 + %1"
-                                            : /* no outputs */
-                                            : "r" (page),
-                                              "r" (address & (PAGE_SIZE - 1))
-                                            : "memory");
-               }
-               pte_unmap(ptep);
-               preempt_enable();
+               flush_signal_insns(address);
        }
-       return;
+       return 0;
 
 sigill:
        do_exit(SIGILL);
+       return -EINVAL;
+
 sigsegv:
        force_sigsegv(signo, current);
+       return -EFAULT;
 }
 
-static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
-                            unsigned long signr, sigset_t *oldset,
-                            siginfo_t *info)
+static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
+                           unsigned long signr, sigset_t *oldset,
+                           siginfo_t *info)
 {
        struct rt_signal_frame32 __user *sf;
        int sigframe_size;
@@ -687,12 +727,7 @@ static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
        if (ka->ka_restorer)
                regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
        else {
-               /* Flush instruction space. */
                unsigned long address = ((unsigned long)&(sf->insns[0]));
-               pgd_t *pgdp = pgd_offset(current->mm, address);
-               pud_t *pudp = pud_offset(pgdp, address);
-               pmd_t *pmdp = pmd_offset(pudp, address);
-               pte_t *ptep;
 
                regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
        
@@ -704,38 +739,32 @@ static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
                if (err)
                        goto sigsegv;
 
-               preempt_disable();
-               ptep = pte_offset_map(pmdp, address);
-               if (pte_present(*ptep)) {
-                       unsigned long page = (unsigned long)
-                               page_address(pte_page(*ptep));
-
-                       wmb();
-                       __asm__ __volatile__("flush     %0 + %1"
-                                            : /* no outputs */
-                                            : "r" (page),
-                                              "r" (address & (PAGE_SIZE - 1))
-                                            : "memory");
-               }
-               pte_unmap(ptep);
-               preempt_enable();
+               flush_signal_insns(address);
        }
-       return;
+       return 0;
 
 sigill:
        do_exit(SIGILL);
+       return -EINVAL;
+
 sigsegv:
        force_sigsegv(signr, current);
+       return -EFAULT;
 }
 
-static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka,
-                                  siginfo_t *info,
-                                  sigset_t *oldset, struct pt_regs *regs)
+static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka,
+                                 siginfo_t *info,
+                                 sigset_t *oldset, struct pt_regs *regs)
 {
+       int err;
+
        if (ka->sa.sa_flags & SA_SIGINFO)
-               setup_rt_frame32(ka, regs, signr, oldset, info);
+               err = setup_rt_frame32(ka, regs, signr, oldset, info);
        else
-               setup_frame32(ka, regs, signr, oldset);
+               err = setup_frame32(ka, regs, signr, oldset);
+
+       if (err)
+               return err;
 
        spin_lock_irq(&current->sighand->siglock);
        sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
@@ -743,6 +772,10 @@ static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka,
                sigaddset(&current->blocked,signr);
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
+
+       tracehook_signal_handler(signr, info, ka, regs, 0);
+
+       return 0;
 }
 
 static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs,
@@ -789,16 +822,14 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs,
        if (signr > 0) {
                if (restart_syscall)
                        syscall_restart32(orig_i0, regs, &ka.sa);
-               handle_signal32(signr, &ka, &info, oldset, regs);
-
-               /* A signal was successfully delivered; the saved
-                * sigmask will have been stored in the signal frame,
-                * and will be restored by sigreturn, so we can simply
-                * clear the TS_RESTORE_SIGMASK flag.
-                */
-               current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-
-               tracehook_signal_handler(signr, &info, &ka, regs, 0);
+               if (handle_signal32(signr, &ka, &info, oldset, regs) == 0) {
+                       /* A signal was successfully delivered; the saved
+                        * sigmask will have been stored in the signal frame,
+                        * and will be restored by sigreturn, so we can simply
+                        * clear the TS_RESTORE_SIGMASK flag.
+                        */
+                       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+               }
                return;
        }
        if (restart_syscall &&
@@ -809,12 +840,14 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs,
                regs->u_regs[UREG_I0] = orig_i0;
                regs->tpc -= 4;
                regs->tnpc -= 4;
+               pt_regs_clear_syscall(regs);
        }
        if (restart_syscall &&
            regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
                regs->u_regs[UREG_G1] = __NR_restart_syscall;
                regs->tpc -= 4;
                regs->tnpc -= 4;
+               pt_regs_clear_syscall(regs);
        }
 
        /* If there's no signal to deliver, we just put the saved sigmask
index 9882df92ba0a2c8b8da4639f7e181214930c8ed6..5e5c5fd03783c997f5c344025e8f4784182a0ddc 100644 (file)
@@ -315,8 +315,8 @@ save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
        return err;
 }
 
-static void setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
-                       int signo, sigset_t *oldset)
+static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
+                      int signo, sigset_t *oldset)
 {
        struct signal_frame __user *sf;
        int sigframe_size, err;
@@ -384,16 +384,19 @@ static void setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
                /* Flush instruction space. */
                flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
        }
-       return;
+       return 0;
 
 sigill_and_return:
        do_exit(SIGILL);
+       return -EINVAL;
+
 sigsegv:
        force_sigsegv(signo, current);
+       return -EFAULT;
 }
 
-static void setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
-                          int signo, sigset_t *oldset, siginfo_t *info)
+static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
+                         int signo, sigset_t *oldset, siginfo_t *info)
 {
        struct rt_signal_frame __user *sf;
        int sigframe_size;
@@ -466,22 +469,30 @@ static void setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
                /* Flush instruction space. */
                flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
        }
-       return;
+       return 0;
 
 sigill:
        do_exit(SIGILL);
+       return -EINVAL;
+
 sigsegv:
        force_sigsegv(signo, current);
+       return -EFAULT;
 }
 
-static inline void
+static inline int
 handle_signal(unsigned long signr, struct k_sigaction *ka,
              siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
 {
+       int err;
+
        if (ka->sa.sa_flags & SA_SIGINFO)
-               setup_rt_frame(ka, regs, signr, oldset, info);
+               err = setup_rt_frame(ka, regs, signr, oldset, info);
        else
-               setup_frame(ka, regs, signr, oldset);
+               err = setup_frame(ka, regs, signr, oldset);
+
+       if (err)
+               return err;
 
        spin_lock_irq(&current->sighand->siglock);
        sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
@@ -489,6 +500,10 @@ handle_signal(unsigned long signr, struct k_sigaction *ka,
                sigaddset(&current->blocked, signr);
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
+
+       tracehook_signal_handler(signr, info, ka, regs, 0);
+
+       return 0;
 }
 
 static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
@@ -546,17 +561,15 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
        if (signr > 0) {
                if (restart_syscall)
                        syscall_restart(orig_i0, regs, &ka.sa);
-               handle_signal(signr, &ka, &info, oldset, regs);
-
-               /* a signal was successfully delivered; the saved
-                * sigmask will have been stored in the signal frame,
-                * and will be restored by sigreturn, so we can simply
-                * clear the TIF_RESTORE_SIGMASK flag.
-                */
-               if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                       clear_thread_flag(TIF_RESTORE_SIGMASK);
-
-               tracehook_signal_handler(signr, &info, &ka, regs, 0);
+               if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
+                       /* a signal was successfully delivered; the saved
+                        * sigmask will have been stored in the signal frame,
+                        * and will be restored by sigreturn, so we can simply
+                        * clear the TIF_RESTORE_SIGMASK flag.
+                        */
+                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
+                               clear_thread_flag(TIF_RESTORE_SIGMASK);
+               }
                return;
        }
        if (restart_syscall &&
@@ -567,12 +580,14 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
                regs->u_regs[UREG_I0] = orig_i0;
                regs->pc -= 4;
                regs->npc -= 4;
+               pt_regs_clear_syscall(regs);
        }
        if (restart_syscall &&
            regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
                regs->u_regs[UREG_G1] = __NR_restart_syscall;
                regs->pc -= 4;
                regs->npc -= 4;
+               pt_regs_clear_syscall(regs);
        }
 
        /* if there's no signal to deliver, we just put the saved sigmask
index 9fa48c30037e5356c2f686be695ea8bcfb3613f3..006fe4515886dc6ae2a7a8e6cc9b6df9c16fda46 100644 (file)
@@ -409,7 +409,7 @@ static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *
        return (void __user *) sp;
 }
 
-static inline void
+static inline int
 setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
               int signo, sigset_t *oldset, siginfo_t *info)
 {
@@ -483,26 +483,37 @@ setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
        }
        /* 4. return to kernel instructions */
        regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
-       return;
+       return 0;
 
 sigill:
        do_exit(SIGILL);
+       return -EINVAL;
+
 sigsegv:
        force_sigsegv(signo, current);
+       return -EFAULT;
 }
 
-static inline void handle_signal(unsigned long signr, struct k_sigaction *ka,
-                                siginfo_t *info,
-                                sigset_t *oldset, struct pt_regs *regs)
+static inline int handle_signal(unsigned long signr, struct k_sigaction *ka,
+                               siginfo_t *info,
+                               sigset_t *oldset, struct pt_regs *regs)
 {
-       setup_rt_frame(ka, regs, signr, oldset,
-                      (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
+       int err;
+
+       err = setup_rt_frame(ka, regs, signr, oldset,
+                            (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
+       if (err)
+               return err;
        spin_lock_irq(&current->sighand->siglock);
        sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
        if (!(ka->sa.sa_flags & SA_NOMASK))
                sigaddset(&current->blocked,signr);
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
+
+       tracehook_signal_handler(signr, info, ka, regs, 0);
+
+       return 0;
 }
 
 static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
@@ -571,16 +582,14 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
        if (signr > 0) {
                if (restart_syscall)
                        syscall_restart(orig_i0, regs, &ka.sa);
-               handle_signal(signr, &ka, &info, oldset, regs);
-
-               /* A signal was successfully delivered; the saved
-                * sigmask will have been stored in the signal frame,
-                * and will be restored by sigreturn, so we can simply
-                * clear the TS_RESTORE_SIGMASK flag.
-                */
-               current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-
-               tracehook_signal_handler(signr, &info, &ka, regs, 0);
+               if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
+                       /* A signal was successfully delivered; the saved
+                        * sigmask will have been stored in the signal frame,
+                        * and will be restored by sigreturn, so we can simply
+                        * clear the TS_RESTORE_SIGMASK flag.
+                        */
+                       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+               }
                return;
        }
        if (restart_syscall &&
@@ -591,12 +600,14 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
                regs->u_regs[UREG_I0] = orig_i0;
                regs->tpc -= 4;
                regs->tnpc -= 4;
+               pt_regs_clear_syscall(regs);
        }
        if (restart_syscall &&
            regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
                regs->u_regs[UREG_G1] = __NR_restart_syscall;
                regs->tpc -= 4;
                regs->tnpc -= 4;
+               pt_regs_clear_syscall(regs);
        }
 
        /* If there's no signal to deliver, we just put the saved sigmask
index 50794137d710d71bfa197cbb055d14377f2cd770..675c9e11ada5541085e2fd6e6129272c05713df4 100644 (file)
@@ -166,7 +166,6 @@ sparc_breakpoint (struct pt_regs *regs)
 {
        siginfo_t info;
 
-       lock_kernel();
 #ifdef DEBUG_SPARC_BREAKPOINT
         printk ("TRAP: Entering kernel PC=%x, nPC=%x\n", regs->pc, regs->npc);
 #endif
@@ -180,7 +179,6 @@ sparc_breakpoint (struct pt_regs *regs)
 #ifdef DEBUG_SPARC_BREAKPOINT
        printk ("TRAP: Returning to space: PC=%x nPC=%x\n", regs->pc, regs->npc);
 #endif
-       unlock_kernel();
 }
 
 asmlinkage int
index f8514e291e1559ecda6994ff5be883cc6805128c..12b9f352595f44e26c3f5730d3a84e8557d8cca8 100644 (file)
@@ -323,7 +323,6 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
 {
        enum direction dir;
 
-       lock_kernel();
        if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) ||
           (((insn >> 30) & 3) != 3))
                goto kill_user;
@@ -377,5 +376,5 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
 kill_user:
        user_mna_trap_fault(regs, insn);
 out:
-       unlock_kernel();
+       ;
 }
index f24d298bda29d1ca0457b0b7060a366ace463756..b351770cbdd6aded05ca53554b30c8716a24a4ce 100644 (file)
@@ -112,7 +112,6 @@ void try_to_clear_window_buffer(struct pt_regs *regs, int who)
        struct thread_info *tp = current_thread_info();
        int window;
 
-       lock_kernel();
        flush_user_windows();
        for(window = 0; window < tp->w_saved; window++) {
                unsigned long sp = tp->rwbuf_stkptrs[window];
@@ -123,5 +122,4 @@ void try_to_clear_window_buffer(struct pt_regs *regs, int who)
                        do_exit(SIGILL);
        }
        tp->w_saved = 0;
-       unlock_kernel();
 }
index 1246573be59ee78b872ed5d8fa5d583cf16860ac..261aaba092d4bb424d336caa645b222a1fdb1068 100644 (file)
 /** Is the PROC_STATUS SPR supported? */
 #define CHIP_HAS_PROC_STATUS_SPR() 0
 
+/** Is the DSTREAM_PF SPR supported? */
+#define CHIP_HAS_DSTREAM_PF() 0
+
 /** Log of the number of mshims we have. */
 #define CHIP_LOG_NUM_MSHIMS() 2
 
index e864c47fc89cce6cd7c366784e8d76d22a43e280..70017699a74ce0e37e30d0c7c8e62865e9577e3b 100644 (file)
 /** Is the PROC_STATUS SPR supported? */
 #define CHIP_HAS_PROC_STATUS_SPR() 1
 
+/** Is the DSTREAM_PF SPR supported? */
+#define CHIP_HAS_DSTREAM_PF() 0
+
 /** Log of the number of mshims we have. */
 #define CHIP_LOG_NUM_MSHIMS() 2
 
index 5a34da6cdd79a18802d437056393c41d6e2f6fba..8b60ec8b2d194f6e352df18eba598e3a61ef52f3 100644 (file)
@@ -195,7 +195,7 @@ static inline unsigned long ptr_to_compat_reg(void __user *uptr)
        return (long)(int)(long __force)uptr;
 }
 
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
 {
        struct pt_regs *regs = task_pt_regs(current);
        return (void __user *)regs->sp - len;
@@ -214,8 +214,9 @@ extern int compat_setup_rt_frame(int sig, struct k_sigaction *ka,
 struct compat_sigaction;
 struct compat_siginfo;
 struct compat_sigaltstack;
-long compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
-                      compat_uptr_t __user *envp);
+long compat_sys_execve(const char __user *path,
+                      const compat_uptr_t __user *argv,
+                      const compat_uptr_t __user *envp);
 long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act,
                             struct compat_sigaction __user *oact,
                             size_t sigsetsize);
index 8c95bef3fa45a185a1ec5610bc7a1c21e6f96c13..ee43328713abf2ace9f76e885e9e26ce7b615b3a 100644 (file)
@@ -164,22 +164,22 @@ static inline void _tile_writeq(u64 val, unsigned long addr)
 #define iowrite32 writel
 #define iowrite64 writeq
 
-static inline void *memcpy_fromio(void *dst, void *src, int len)
+static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
+                                size_t len)
 {
        int x;
        BUG_ON((unsigned long)src & 0x3);
        for (x = 0; x < len; x += 4)
                *(u32 *)(dst + x) = readl(src + x);
-       return dst;
 }
 
-static inline void *memcpy_toio(void *dst, void *src, int len)
+static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
+                               size_t len)
 {
        int x;
        BUG_ON((unsigned long)dst & 0x3);
        for (x = 0; x < len; x += 4)
                writel(*(u32 *)(src + x), dst + x);
-       return dst;
 }
 
 /*
index d942d09b252e4012d251d6d09eaa1af6c7df7c3f..ccd5f84256886c7526edd78bc58ab621f14ef2ac 100644 (file)
@@ -103,6 +103,18 @@ struct thread_struct {
        /* Any other miscellaneous processor state bits */
        unsigned long proc_status;
 #endif
+#if !CHIP_HAS_FIXED_INTVEC_BASE()
+       /* Interrupt base for PL0 interrupts */
+       unsigned long interrupt_vector_base;
+#endif
+#if CHIP_HAS_TILE_RTF_HWM()
+       /* Tile cache retry fifo high-water mark */
+       unsigned long tile_rtf_hwm;
+#endif
+#if CHIP_HAS_DSTREAM_PF()
+       /* Data stream prefetch control */
+       unsigned long dstream_pf;
+#endif
 #ifdef CONFIG_HARDWALL
        /* Is this task tied to an activated hardwall? */
        struct hardwall_info *hardwall;
index acdae814e0161b967f6289e265f6aad1516b2aac..4a02bb07397993a2eb025ae7cd79caf618ae03d6 100644 (file)
@@ -51,10 +51,7 @@ typedef uint_reg_t pt_reg_t;
 
 /*
  * This struct defines the way the registers are stored on the stack during a
- * system call/exception.  It should be a multiple of 8 bytes to preserve
- * normal stack alignment rules.
- *
- * Must track <sys/ucontext.h> and <sys/procfs.h>
+ * system call or exception.  "struct sigcontext" has the same shape.
  */
 struct pt_regs {
        /* Saved main processor registers; 56..63 are special. */
@@ -80,11 +77,6 @@ struct pt_regs {
 
 #endif /* __ASSEMBLY__ */
 
-/* Flag bits in pt_regs.flags */
-#define PT_FLAGS_DISABLE_IRQ    1  /* on return to kernel, disable irqs */
-#define PT_FLAGS_CALLER_SAVES   2  /* caller-save registers are valid */
-#define PT_FLAGS_RESTORE_REGS   4  /* restore callee-save regs on return */
-
 #define PTRACE_GETREGS         12
 #define PTRACE_SETREGS         13
 #define PTRACE_GETFPREGS       14
@@ -101,6 +93,11 @@ struct pt_regs {
 
 #ifdef __KERNEL__
 
+/* Flag bits in pt_regs.flags */
+#define PT_FLAGS_DISABLE_IRQ    1  /* on return to kernel, disable irqs */
+#define PT_FLAGS_CALLER_SAVES   2  /* caller-save registers are valid */
+#define PT_FLAGS_RESTORE_REGS   4  /* restore callee-save regs on return */
+
 #ifndef __ASSEMBLY__
 
 #define instruction_pointer(regs) ((regs)->pc)
index 7cd7672e3ad4043072a6b6ac9c393491426f08a1..5e2d03336f5335ae99151a383a855e5db7cae6b0 100644 (file)
 #ifndef _ASM_TILE_SIGCONTEXT_H
 #define _ASM_TILE_SIGCONTEXT_H
 
-/* NOTE: we can't include <linux/ptrace.h> due to #include dependencies. */
-#include <asm/ptrace.h>
-
-/* Must track <sys/ucontext.h> */
+#include <arch/abi.h>
 
+/*
+ * struct sigcontext has the same shape as struct pt_regs,
+ * but is simplified since we know the fault is from userspace.
+ */
 struct sigcontext {
-       struct pt_regs regs;
+       uint_reg_t gregs[53];   /* General-purpose registers.  */
+       uint_reg_t tp;          /* Aliases gregs[TREG_TP].  */
+       uint_reg_t sp;          /* Aliases gregs[TREG_SP].  */
+       uint_reg_t lr;          /* Aliases gregs[TREG_LR].  */
+       uint_reg_t pc;          /* Program counter.  */
+       uint_reg_t ics;         /* In Interrupt Critical Section?  */
+       uint_reg_t faultnum;    /* Fault number.  */
+       uint_reg_t pad[5];
 };
 
 #endif /* _ASM_TILE_SIGCONTEXT_H */
index eb0253f32202c0b52b65012b7c4a8c839aae47dc..c1ee1d61d44ca8a07c504b9af425df3a9c7185c7 100644 (file)
@@ -24,6 +24,7 @@
 #include <asm-generic/signal.h>
 
 #if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+struct pt_regs;
 int restore_sigcontext(struct pt_regs *, struct sigcontext __user *, long *);
 int setup_sigcontext(struct sigcontext __user *, struct pt_regs *);
 void do_signal(struct pt_regs *regs);
index af165a74537f84ff1f377ce22951848bba58d9e0..ce99ffefeacff351c5523d574581e1092d57ca06 100644 (file)
@@ -62,10 +62,12 @@ long sys_fork(void);
 long _sys_fork(struct pt_regs *regs);
 long sys_vfork(void);
 long _sys_vfork(struct pt_regs *regs);
-long sys_execve(char __user *filename, char __user * __user *argv,
-               char __user * __user *envp);
-long _sys_execve(char __user *filename, char __user * __user *argv,
-                char __user * __user *envp, struct pt_regs *regs);
+long sys_execve(const char __user *filename,
+               const char __user *const __user *argv,
+               const char __user *const __user *envp);
+long _sys_execve(const char __user *filename,
+                const char __user *const __user *argv,
+                const char __user *const __user *envp, struct pt_regs *regs);
 
 /* kernel/signal.c */
 long sys_sigaltstack(const stack_t __user *, stack_t __user *);
@@ -86,10 +88,13 @@ int _sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *);
 #endif
 
 #ifdef CONFIG_COMPAT
-long compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
-                      compat_uptr_t __user *envp);
-long _compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
-                       compat_uptr_t __user *envp, struct pt_regs *regs);
+long compat_sys_execve(const char __user *path,
+                      const compat_uptr_t __user *argv,
+                      const compat_uptr_t __user *envp);
+long _compat_sys_execve(const char __user *path,
+                       const compat_uptr_t __user *argv,
+                       const compat_uptr_t __user *envp,
+                       struct pt_regs *regs);
 long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
                            struct compat_sigaltstack __user *uoss_ptr);
 long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
index 84f296ca9e63c85da528ee9a8d403cc8362c4097..8f58bdff20d7f7dd9b77d16a6901eb9db82f056b 100644 (file)
@@ -1506,13 +1506,6 @@ handle_ill:
        }
        STD_ENDPROC(handle_ill)
 
-       .pushsection .rodata, "a"
-       .align  8
-bpt_code:
-       bpt
-       ENDPROC(bpt_code)
-       .popsection
-
 /* Various stub interrupt handlers and syscall handlers */
 
 STD_ENTRY_LOCAL(_kernel_double_fault)
index 985cc28c74c5696f91adcf82d6347439999bfa41..84c29111756c2212f02cd5bc841691c1cc0a0fd9 100644 (file)
@@ -408,6 +408,15 @@ static void save_arch_state(struct thread_struct *t)
 #if CHIP_HAS_PROC_STATUS_SPR()
        t->proc_status = __insn_mfspr(SPR_PROC_STATUS);
 #endif
+#if !CHIP_HAS_FIXED_INTVEC_BASE()
+       t->interrupt_vector_base = __insn_mfspr(SPR_INTERRUPT_VECTOR_BASE_0);
+#endif
+#if CHIP_HAS_TILE_RTF_HWM()
+       t->tile_rtf_hwm = __insn_mfspr(SPR_TILE_RTF_HWM);
+#endif
+#if CHIP_HAS_DSTREAM_PF()
+       t->dstream_pf = __insn_mfspr(SPR_DSTREAM_PF);
+#endif
 }
 
 static void restore_arch_state(const struct thread_struct *t)
@@ -428,14 +437,14 @@ static void restore_arch_state(const struct thread_struct *t)
 #if CHIP_HAS_PROC_STATUS_SPR()
        __insn_mtspr(SPR_PROC_STATUS, t->proc_status);
 #endif
+#if !CHIP_HAS_FIXED_INTVEC_BASE()
+       __insn_mtspr(SPR_INTERRUPT_VECTOR_BASE_0, t->interrupt_vector_base);
+#endif
 #if CHIP_HAS_TILE_RTF_HWM()
-       /*
-        * Clear this whenever we switch back to a process in case
-        * the previous process was monkeying with it.  Even if enabled
-        * in CBOX_MSR1 via TILE_RTF_HWM_MIN, it's still just a
-        * performance hint, so isn't worth a full save/restore.
-        */
-       __insn_mtspr(SPR_TILE_RTF_HWM, 0);
+       __insn_mtspr(SPR_TILE_RTF_HWM, t->tile_rtf_hwm);
+#endif
+#if CHIP_HAS_DSTREAM_PF()
+       __insn_mtspr(SPR_DSTREAM_PF, t->dstream_pf);
 #endif
 }
 
@@ -561,8 +570,9 @@ out:
 }
 
 #ifdef CONFIG_COMPAT
-long _compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
-                       compat_uptr_t __user *envp, struct pt_regs *regs)
+long _compat_sys_execve(const char __user *path,
+                       const compat_uptr_t __user *argv,
+                       const compat_uptr_t __user *envp, struct pt_regs *regs)
 {
        long error;
        char *filename;
@@ -657,7 +667,7 @@ void show_regs(struct pt_regs *regs)
               regs->regs[51], regs->regs[52], regs->tp);
        pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
 #else
-       for (i = 0; i < 52; i += 3)
+       for (i = 0; i < 52; i += 4)
                pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
                       " r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
                       i, regs->regs[i], i+1, regs->regs[i+1],
index 45b66a3c991ffbc79a7f93e3f00543e50b20de8a..ce183aa1492c7abfaadfca95d54ec76bddc11455 100644 (file)
@@ -61,13 +61,19 @@ int restore_sigcontext(struct pt_regs *regs,
        /* Always make any pending restarted system calls return -EINTR */
        current_thread_info()->restart_block.fn = do_no_restart_syscall;
 
+       /*
+        * Enforce that sigcontext is like pt_regs, and doesn't mess
+        * up our stack alignment rules.
+        */
+       BUILD_BUG_ON(sizeof(struct sigcontext) != sizeof(struct pt_regs));
+       BUILD_BUG_ON(sizeof(struct sigcontext) % 8 != 0);
+
        for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
-               err |= __get_user(((long *)regs)[i],
-                                 &((long __user *)(&sc->regs))[i]);
+               err |= __get_user(regs->regs[i], &sc->gregs[i]);
 
        regs->faultnum = INT_SWINT_1_SIGRETURN;
 
-       err |= __get_user(*pr0, &sc->regs.regs[0]);
+       err |= __get_user(*pr0, &sc->gregs[0]);
        return err;
 }
 
@@ -112,8 +118,7 @@ int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
        int i, err = 0;
 
        for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
-               err |= __put_user(((long *)regs)[i],
-                                 &((long __user *)(&sc->regs))[i]);
+               err |= __put_user(regs->regs[i], &sc->gregs[i]);
 
        return err;
 }
@@ -203,19 +208,17 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
         * Set up registers for signal handler.
         * Registers that we don't modify keep the value they had from
         * user-space at the time we took the signal.
+        * We always pass siginfo and mcontext, regardless of SA_SIGINFO,
+        * since some things rely on this (e.g. glibc's debug/segfault.c).
         */
        regs->pc = (unsigned long) ka->sa.sa_handler;
        regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */
        regs->sp = (unsigned long) frame;
        regs->lr = restorer;
        regs->regs[0] = (unsigned long) usig;
-
-       if (ka->sa.sa_flags & SA_SIGINFO) {
-               /* Need extra arguments, so mark to restore caller-saves. */
-               regs->regs[1] = (unsigned long) &frame->info;
-               regs->regs[2] = (unsigned long) &frame->uc;
-               regs->flags |= PT_FLAGS_CALLER_SAVES;
-       }
+       regs->regs[1] = (unsigned long) &frame->info;
+       regs->regs[2] = (unsigned long) &frame->uc;
+       regs->flags |= PT_FLAGS_CALLER_SAVES;
 
        /*
         * Notify any tracer that was single-stepping it.
index 38a68b0b45813474a791f18aabaa35eb33b8a38a..ea2e0ce28380a2d3fa59391cfc02cb4ca0022f1e 100644 (file)
@@ -175,7 +175,7 @@ static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt)
                        pr_err("  <received signal %d>\n",
                               frame->info.si_signo);
                }
-               return &frame->uc.uc_mcontext.regs;
+               return (struct pt_regs *)&frame->uc.uc_mcontext;
        }
        return NULL;
 }
index 0c46e398cd8f313d89a3ff07187916aa6021b93f..63c740a85b4cca0ed9333091593885cbdfface2e 100644 (file)
@@ -40,6 +40,11 @@ static char *mixer = HOSTAUDIO_DEV_MIXER;
 "    This is used to specify the host mixer device to the hostaudio driver.\n"\
 "    The default is \"" HOSTAUDIO_DEV_MIXER "\".\n\n"
 
+module_param(dsp, charp, 0644);
+MODULE_PARM_DESC(dsp, DSP_HELP);
+module_param(mixer, charp, 0644);
+MODULE_PARM_DESC(mixer, MIXER_HELP);
+
 #ifndef MODULE
 static int set_dsp(char *name, int *add)
 {
@@ -56,15 +61,6 @@ static int set_mixer(char *name, int *add)
 }
 
 __uml_setup("mixer=", set_mixer, "mixer=<mixer device>\n" MIXER_HELP);
-
-#else /*MODULE*/
-
-module_param(dsp, charp, 0644);
-MODULE_PARM_DESC(dsp, DSP_HELP);
-
-module_param(mixer, charp, 0644);
-MODULE_PARM_DESC(mixer, MIXER_HELP);
-
 #endif
 
 /* /dev/dsp file operations */
index 2ab233ba32c1564f8323884017108b0d53978366..47d0c37897d5874d0bfb95d3d2b87441bd2df6a0 100644 (file)
@@ -255,18 +255,6 @@ static void uml_net_tx_timeout(struct net_device *dev)
        netif_wake_queue(dev);
 }
 
-static int uml_net_set_mac(struct net_device *dev, void *addr)
-{
-       struct uml_net_private *lp = netdev_priv(dev);
-       struct sockaddr *hwaddr = addr;
-
-       spin_lock_irq(&lp->lock);
-       eth_mac_addr(dev, hwaddr->sa_data);
-       spin_unlock_irq(&lp->lock);
-
-       return 0;
-}
-
 static int uml_net_change_mtu(struct net_device *dev, int new_mtu)
 {
        dev->mtu = new_mtu;
@@ -373,7 +361,7 @@ static const struct net_device_ops uml_netdev_ops = {
        .ndo_start_xmit         = uml_net_start_xmit,
        .ndo_set_multicast_list = uml_net_set_multicast_list,
        .ndo_tx_timeout         = uml_net_tx_timeout,
-       .ndo_set_mac_address    = uml_net_set_mac,
+       .ndo_set_mac_address    = eth_mac_addr,
        .ndo_change_mtu         = uml_net_change_mtu,
        .ndo_validate_addr      = eth_validate_addr,
 };
@@ -472,7 +460,8 @@ static void eth_configure(int n, void *init, char *mac,
            ((*transport->user->init)(&lp->user, dev) != 0))
                goto out_unregister;
 
-       eth_mac_addr(dev, device->mac);
+       /* don't use eth_mac_addr, it will not work here */
+       memcpy(dev->dev_addr, device->mac, ETH_ALEN);
        dev->mtu = transport->user->mtu;
        dev->netdev_ops = &uml_netdev_ops;
        dev->ethtool_ops = &uml_net_ethtool_ops;
index 1bcd208c459f609ab3634f951e1c11b3b2e50038..9734994cba1e86c53f60dead72952f7feb3ab02d 100644 (file)
@@ -163,6 +163,7 @@ struct ubd {
        struct scatterlist sg[MAX_SG];
        struct request *request;
        int start_sg, end_sg;
+       sector_t rq_pos;
 };
 
 #define DEFAULT_COW { \
@@ -187,6 +188,7 @@ struct ubd {
        .request =              NULL, \
        .start_sg =             0, \
        .end_sg =               0, \
+       .rq_pos =               0, \
 }
 
 /* Protected by ubd_lock */
@@ -1228,7 +1230,6 @@ static void do_ubd_request(struct request_queue *q)
 {
        struct io_thread_req *io_req;
        struct request *req;
-       sector_t sector;
        int n;
 
        while(1){
@@ -1239,12 +1240,12 @@ static void do_ubd_request(struct request_queue *q)
                                return;
 
                        dev->request = req;
+                       dev->rq_pos = blk_rq_pos(req);
                        dev->start_sg = 0;
                        dev->end_sg = blk_rq_map_sg(q, req, dev->sg);
                }
 
                req = dev->request;
-               sector = blk_rq_pos(req);
                while(dev->start_sg < dev->end_sg){
                        struct scatterlist *sg = &dev->sg[dev->start_sg];
 
@@ -1256,10 +1257,9 @@ static void do_ubd_request(struct request_queue *q)
                                return;
                        }
                        prepare_request(req, io_req,
-                                       (unsigned long long)sector << 9,
+                                       (unsigned long long)dev->rq_pos << 9,
                                        sg->offset, sg->length, sg_page(sg));
 
-                       sector += sg->length >> 9;
                        n = os_write_file(thread_fd, &io_req,
                                          sizeof(struct io_thread_req *));
                        if(n != sizeof(struct io_thread_req *)){
@@ -1272,6 +1272,7 @@ static void do_ubd_request(struct request_queue *q)
                                return;
                        }
 
+                       dev->rq_pos += sg->length >> 9;
                        dev->start_sg++;
                }
                dev->end_sg = 0;
index cd145eda357950b66b1ad2f05f55bd0094df6006..49b5e1eb32622abbdab4b3631207460bc80a1e14 100644 (file)
@@ -62,7 +62,7 @@ static long execve1(const char *file,
        return error;
 }
 
-long um_execve(const char *file, char __user *__user *argv, char __user *__user *env)
+long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env)
 {
        long err;
 
@@ -72,8 +72,8 @@ long um_execve(const char *file, char __user *__user *argv, char __user *__user
        return err;
 }
 
-long sys_execve(const char __user *file, char __user *__user *argv,
-               char __user *__user *env)
+long sys_execve(const char __user *file, const char __user *const __user *argv,
+               const char __user *const __user *env)
 {
        long error;
        char *filename;
index 1303a105fe91dc5aabca314e4e94faf8573b9c65..5bf97db24a046283f8704e2f7e0ee91e7b4d4844 100644 (file)
@@ -1 +1 @@
-extern long um_execve(const char *file, char __user *__user *argv, char __user *__user *env);
+extern long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env);
index 5ddb246626dbb87afe7484b87c0c495c8643de8f..f958cb876ee3d3e47ddff71094e8026c0a110f5d 100644 (file)
@@ -60,8 +60,8 @@ int kernel_execve(const char *filename,
 
        fs = get_fs();
        set_fs(KERNEL_DS);
-       ret = um_execve(filename, (char __user *__user *)argv,
-                       (char __user *__user *) envp);
+       ret = um_execve(filename, (const char __user *const __user *)argv,
+                       (const char __user *const __user *) envp);
        set_fs(fs);
 
        return ret;
index cea0cd9a316fb987bfa611a1dffa06cdba1f0332..8c9e609a175b1d91865176ac0d2ce5d5a2fb0034 100644 (file)
@@ -25,6 +25,7 @@ config X86
        select HAVE_IDE
        select HAVE_OPROFILE
        select HAVE_PERF_EVENTS if (!M386 && !M486)
+       select HAVE_IRQ_WORK
        select HAVE_IOREMAP_PROT
        select HAVE_KPROBES
        select ARCH_WANT_OPTIONAL_GPIOLIB
@@ -33,6 +34,7 @@ config X86
        select HAVE_KRETPROBES
        select HAVE_OPTPROBES
        select HAVE_FTRACE_MCOUNT_RECORD
+       select HAVE_C_RECORDMCOUNT
        select HAVE_DYNAMIC_FTRACE
        select HAVE_FUNCTION_TRACER
        select HAVE_FUNCTION_GRAPH_TRACER
@@ -59,6 +61,8 @@ config X86
        select ANON_INODES
        select HAVE_ARCH_KMEMCHECK
        select HAVE_USER_RETURN_NOTIFIER
+       select HAVE_ARCH_JUMP_LABEL
+       select HAVE_TEXT_POKE_SMP
 
 config INSTRUCTION_DECODER
        def_bool (KPROBES || PERF_EVENTS)
@@ -517,25 +521,6 @@ if PARAVIRT_GUEST
 
 source "arch/x86/xen/Kconfig"
 
-config VMI
-       bool "VMI Guest support (DEPRECATED)"
-       select PARAVIRT
-       depends on X86_32
-       ---help---
-         VMI provides a paravirtualized interface to the VMware ESX server
-         (it could be used by other hypervisors in theory too, but is not
-         at the moment), by linking the kernel to a GPL-ed ROM module
-         provided by the hypervisor.
-
-         As of September 2009, VMware has started a phased retirement
-         of this feature from VMware's products. Please see
-         feature-removal-schedule.txt for details.  If you are
-         planning to enable this option, please note that you cannot
-         live migrate a VMI enabled VM to a future VMware product,
-         which doesn't support VMI. So if you expect your kernel to
-         seamlessly migrate to newer VMware products, keep this
-         disabled.
-
 config KVM_CLOCK
        bool "KVM paravirtualized clock"
        select PARAVIRT
@@ -670,7 +655,7 @@ config GART_IOMMU
        bool "GART IOMMU support" if EMBEDDED
        default y
        select SWIOTLB
-       depends on X86_64 && PCI && K8_NB
+       depends on X86_64 && PCI && AMD_NB
        ---help---
          Support for full DMA access of devices with 32bit memory access only
          on systems with more than 3GB. This is usually needed for USB,
@@ -795,6 +780,17 @@ config SCHED_MC
          making when dealing with multi-core CPU chips at a cost of slightly
          increased overhead in some places. If unsure say N here.
 
+config IRQ_TIME_ACCOUNTING
+       bool "Fine granularity task level IRQ time accounting"
+       default n
+       ---help---
+         Select this option to enable fine granularity task irq time
+         accounting. This is done by reading a timestamp on each
+         transitions between softirq and hardirq state, so there can be a
+         small performance impact.
+
+         If in doubt, say N here.
+
 source "kernel/Kconfig.preempt"
 
 config X86_UP_APIC
@@ -1148,6 +1144,9 @@ config X86_PAE
 config ARCH_PHYS_ADDR_T_64BIT
        def_bool X86_64 || X86_PAE
 
+config ARCH_DMA_ADDR_T_64BIT
+       def_bool X86_64 || HIGHMEM64G
+
 config DIRECT_GBPAGES
        bool "Enable 1GB pages for kernel pagetables" if EMBEDDED
        default y
@@ -1326,25 +1325,34 @@ config X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK
          Set whether the default state of memory_corruption_check is
          on or off.
 
-config X86_RESERVE_LOW_64K
-       bool "Reserve low 64K of RAM on AMI/Phoenix BIOSen"
-       default y
+config X86_RESERVE_LOW
+       int "Amount of low memory, in kilobytes, to reserve for the BIOS"
+       default 64
+       range 4 640
        ---help---
-         Reserve the first 64K of physical RAM on BIOSes that are known
-         to potentially corrupt that memory range. A numbers of BIOSes are
-         known to utilize this area during suspend/resume, so it must not
-         be used by the kernel.
+         Specify the amount of low memory to reserve for the BIOS.
+
+         The first page contains BIOS data structures that the kernel
+         must not use, so that page must always be reserved.
+
+         By default we reserve the first 64K of physical RAM, as a
+         number of BIOSes are known to corrupt that memory range
+         during events such as suspend/resume or monitor cable
+         insertion, so it must not be used by the kernel.
 
-         Set this to N if you are absolutely sure that you trust the BIOS
-         to get all its memory reservations and usages right.
+         You can set this to 4 if you are absolutely sure that you
+         trust the BIOS to get all its memory reservations and usages
+         right.  If you know your BIOS have problems beyond the
+         default 64K area, you can set this to 640 to avoid using the
+         entire low memory range.
 
-         If you have doubts about the BIOS (e.g. suspend/resume does not
-         work or there's kernel crashes after certain hardware hotplug
-         events) and it's not AMI or Phoenix, then you might want to enable
-         X86_CHECK_BIOS_CORRUPTION=y to allow the kernel to check typical
-         corruption patterns.
+         If you have doubts about the BIOS (e.g. suspend/resume does
+         not work or there's kernel crashes after certain hardware
+         hotplug events) then you might want to enable
+         X86_CHECK_BIOS_CORRUPTION=y to allow the kernel to check
+         typical corruption patterns.
 
-         Say Y if unsure.
+         Leave this to the default value of 64 if you are unsure.
 
 config MATH_EMULATION
        bool
@@ -1900,7 +1908,7 @@ config PCI_GODIRECT
        bool "Direct"
 
 config PCI_GOOLPC
-       bool "OLPC"
+       bool "OLPC XO-1"
        depends on OLPC
 
 config PCI_GOANY
@@ -2061,14 +2069,21 @@ config SCx200HR_TIMER
 config OLPC
        bool "One Laptop Per Child support"
        select GPIOLIB
+       select OLPC_OPENFIRMWARE
        ---help---
          Add support for detecting the unique features of the OLPC
          XO hardware.
 
+config OLPC_XO1
+       tristate "OLPC XO-1 support"
+       depends on OLPC && PCI
+       ---help---
+         Add support for non-essential features of the OLPC XO-1 laptop.
+
 config OLPC_OPENFIRMWARE
        bool "Support for OLPC's Open Firmware"
        depends on !X86_64 && !X86_PAE
-       default y if OLPC
+       default n
        help
          This option adds support for the implementation of Open Firmware
          that is used on the OLPC XO-1 Children's Machine.
@@ -2076,7 +2091,7 @@ config OLPC_OPENFIRMWARE
 
 endif # X86_32
 
-config K8_NB
+config AMD_NB
        def_bool y
        depends on CPU_SUP_AMD && PCI
 
@@ -2125,6 +2140,10 @@ config HAVE_ATOMIC_IOMAP
        def_bool y
        depends on X86_32
 
+config HAVE_TEXT_POKE_SMP
+       bool
+       select STOP_MACHINE if SMP
+
 source "net/Kconfig"
 
 source "drivers/Kconfig"
index 75085080b63e2f74d32d4fa2b4bd2f8972d2b7de..e5bb96b10f1af5bad62466789d7f2d10e389edca 100644 (file)
@@ -43,6 +43,10 @@ config EARLY_PRINTK
          with klogd/syslogd or the X server. You should normally N here,
          unless you want to debug such a crash.
 
+config EARLY_PRINTK_MRST
+       bool "Early printk for MRST platform support"
+       depends on EARLY_PRINTK && X86_MRST
+
 config EARLY_PRINTK_DBGP
        bool "Early printk via EHCI debug port"
        depends on EARLY_PRINTK && PCI
index 8aa1b59b9074586e1fe9930b85d0cf945f14a695..b02e509072a790b1fbea3387f8749b5326beb822 100644 (file)
@@ -74,7 +74,7 @@ endif
 
 ifdef CONFIG_CC_STACKPROTECTOR
        cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
-        ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(biarch)),y)
+        ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y)
                 stackp-y := -fstack-protector
                 KBUILD_CFLAGS += $(stackp-y)
         else
@@ -96,8 +96,12 @@ cfi := $(call as-instr,.cfi_startproc\n.cfi_rel_offset $(sp-y)$(comma)0\n.cfi_en
 # is .cfi_signal_frame supported too?
 cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1)
 cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTIONS=1)
-KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections)
-KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections)
+
+# does binutils support specific instructions?
+asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1)
+
+KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr)
+KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr)
 
 LDFLAGS := -m elf_$(UTS_MACHINE)
 
index 030f4b93e255ac00c9cd646a733eda26733a5324..5df2869c874baced33de00a78a7b693f3237ea0f 100644 (file)
@@ -58,7 +58,19 @@ static void parse_earlyprintk(void)
                if (arg[pos] == ',')
                        pos++;
 
-               if (!strncmp(arg, "ttyS", 4)) {
+               /*
+                * make sure we have
+                *      "serial,0x3f8,115200"
+                *      "serial,ttyS0,115200"
+                *      "ttyS0,115200"
+                */
+               if (pos == 7 && !strncmp(arg + pos, "0x", 2)) {
+                       port = simple_strtoull(arg + pos, &e, 16);
+                       if (port == 0 || arg + pos == e)
+                               port = DEFAULT_SERIAL_PORT;
+                       else
+                               pos = e - arg;
+               } else if (!strncmp(arg + pos, "ttyS", 4)) {
                        static const int bases[] = { 0x3f8, 0x2f8 };
                        int idx = 0;
 
index 0350311906ae731ca91e9ecedbc3e7acbaa668d0..2d93bdbc9ac026f2c0ef1e3fcd9c9a208b609787 100644 (file)
@@ -34,7 +34,7 @@
 #include <asm/ia32.h>
 
 #undef WARN_OLD
-#undef CORE_DUMP /* probably broken */
+#undef CORE_DUMP /* definitely broken */
 
 static int load_aout_binary(struct linux_binprm *, struct pt_regs *regs);
 static int load_aout_library(struct file *);
@@ -131,21 +131,15 @@ static void set_brk(unsigned long start, unsigned long end)
  * macros to write out all the necessary info.
  */
 
-static int dump_write(struct file *file, const void *addr, int nr)
-{
-       return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
-}
+#include <linux/coredump.h>
 
 #define DUMP_WRITE(addr, nr)                        \
        if (!dump_write(file, (void *)(addr), (nr))) \
                goto end_coredump;
 
-#define DUMP_SEEK(offset)                                              \
-       if (file->f_op->llseek) {                                       \
-               if (file->f_op->llseek(file, (offset), 0) != (offset))  \
-                       goto end_coredump;                              \
-       } else                                                          \
-               file->f_pos = (offset)
+#define DUMP_SEEK(offset)              \
+       if (!dump_seek(file, offset))   \
+               goto end_coredump;
 
 #define START_DATA()   (u.u_tsize << PAGE_SHIFT)
 #define START_STACK(u) (u.start_stack)
@@ -217,12 +211,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
                dump_size = dump.u_ssize << PAGE_SHIFT;
                DUMP_WRITE(dump_start, dump_size);
        }
-       /*
-        * Finally dump the task struct.  Not be used by gdb, but
-        * could be useful
-        */
-       set_fs(KERNEL_DS);
-       DUMP_WRITE(current, sizeof(*current));
 end_coredump:
        set_fs(fs);
        return has_dumped;
index b86feabed69bfe8e74f81f179c91fbe3a4b799d8..518bb99c339480820fc3995b1456d29704d67f07 100644 (file)
        /*
         * Reload arg registers from stack in case ptrace changed them.
         * We don't reload %eax because syscall_trace_enter() returned
-        * the value it wants us to use in the table lookup.
+        * the %rax value we should see.  Instead, we just truncate that
+        * value to 32 bits again as we did on entry from user mode.
+        * If it's a new value set by user_regset during entry tracing,
+        * this matches the normal truncation of the user-mode value.
+        * If it's -1 to make us punt the syscall, then (u32)-1 is still
+        * an appropriately invalid value.
         */
        .macro LOAD_ARGS32 offset, _r9=0
        .if \_r9
@@ -60,6 +65,7 @@
        movl \offset+48(%rsp),%edx
        movl \offset+56(%rsp),%esi
        movl \offset+64(%rsp),%edi
+       movl %eax,%eax                  /* zero extension */
        .endm
        
        .macro CFI_STARTPROC32 simple
@@ -153,7 +159,7 @@ ENTRY(ia32_sysenter_target)
        testl  $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
        CFI_REMEMBER_STATE
        jnz  sysenter_tracesys
-       cmpl    $(IA32_NR_syscalls-1),%eax
+       cmpq    $(IA32_NR_syscalls-1),%rax
        ja      ia32_badsys
 sysenter_do_call:
        IA32_ARG_FIXUP
@@ -195,7 +201,7 @@ sysexit_from_sys_call:
        movl $AUDIT_ARCH_I386,%edi      /* 1st arg: audit arch */
        call audit_syscall_entry
        movl RAX-ARGOFFSET(%rsp),%eax   /* reload syscall number */
-       cmpl $(IA32_NR_syscalls-1),%eax
+       cmpq $(IA32_NR_syscalls-1),%rax
        ja ia32_badsys
        movl %ebx,%edi                  /* reload 1st syscall arg */
        movl RCX-ARGOFFSET(%rsp),%esi   /* reload 2nd syscall arg */
@@ -248,7 +254,7 @@ sysenter_tracesys:
        call    syscall_trace_enter
        LOAD_ARGS32 ARGOFFSET  /* reload args from stack in case ptrace changed it */
        RESTORE_REST
-       cmpl    $(IA32_NR_syscalls-1),%eax
+       cmpq    $(IA32_NR_syscalls-1),%rax
        ja      int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
        jmp     sysenter_do_call
        CFI_ENDPROC
@@ -314,7 +320,7 @@ ENTRY(ia32_cstar_target)
        testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
        CFI_REMEMBER_STATE
        jnz   cstar_tracesys
-       cmpl $IA32_NR_syscalls-1,%eax
+       cmpq $IA32_NR_syscalls-1,%rax
        ja  ia32_badsys
 cstar_do_call:
        IA32_ARG_FIXUP 1
@@ -367,7 +373,7 @@ cstar_tracesys:
        LOAD_ARGS32 ARGOFFSET, 1  /* reload args from stack in case ptrace changed it */
        RESTORE_REST
        xchgl %ebp,%r9d
-       cmpl $(IA32_NR_syscalls-1),%eax
+       cmpq $(IA32_NR_syscalls-1),%rax
        ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
        jmp cstar_do_call
 END(ia32_cstar_target)
@@ -425,7 +431,7 @@ ENTRY(ia32_syscall)
        orl   $TS_COMPAT,TI_status(%r10)
        testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
        jnz ia32_tracesys
-       cmpl $(IA32_NR_syscalls-1),%eax
+       cmpq $(IA32_NR_syscalls-1),%rax
        ja ia32_badsys
 ia32_do_call:
        IA32_ARG_FIXUP
@@ -444,7 +450,7 @@ ia32_tracesys:
        call syscall_trace_enter
        LOAD_ARGS32 ARGOFFSET  /* reload args from stack in case ptrace changed it */
        RESTORE_REST
-       cmpl $(IA32_NR_syscalls-1),%eax
+       cmpq $(IA32_NR_syscalls-1),%rax
        ja  int_ret_from_sys_call       /* ia32_tracesys has set RAX(%rsp) */
        jmp ia32_do_call
 END(ia32_syscall)
index bc6abb7bc7ee3084aa8b5d87ae19244715469990..76561d20ea2f27f0edfd0eee6d043b98c6aa6e90 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/types.h>
 #include <linux/stddef.h>
 #include <linux/stringify.h>
+#include <linux/jump_label.h>
 #include <asm/asm.h>
 
 /*
@@ -160,6 +161,8 @@ static inline void apply_paravirt(struct paravirt_patch_site *start,
 #define __parainstructions_end NULL
 #endif
 
+extern void *text_poke_early(void *addr, const void *opcode, size_t len);
+
 /*
  * Clear and restore the kernel write-protection flag on the local CPU.
  * Allows the kernel to edit read-only pages.
@@ -180,4 +183,12 @@ static inline void apply_paravirt(struct paravirt_patch_site *start,
 extern void *text_poke(void *addr, const void *opcode, size_t len);
 extern void *text_poke_smp(void *addr, const void *opcode, size_t len);
 
+#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
+#define IDEAL_NOP_SIZE_5 5
+extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5];
+extern void arch_init_ideal_nop5(void);
+#else
+static inline void arch_init_ideal_nop5(void) {}
+#endif
+
 #endif /* _ASM_X86_ALTERNATIVE_H */
index 5af2982133b5435b492372c447eef8c849b0eac4..f16a2caca1e0346ce62b47b1d7e0c866c8ddcfcf 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2009 Advanced Micro Devices, Inc.
+ * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
  * Author: Joerg Roedel <joerg.roedel@amd.com>
  *         Leo Duran <leo.duran@amd.com>
  *
index d2544f1d705d3eb9cad310ac3486f94c2358b51d..916bc8111a01fa5de3a5df9f35ce3b8b60113342 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2009 Advanced Micro Devices, Inc.
+ * Copyright (C) 2009-2010 Advanced Micro Devices, Inc.
  * Author: Joerg Roedel <joerg.roedel@amd.com>
  *
  * This program is free software; you can redistribute it and/or modify it
@@ -38,4 +38,10 @@ static inline void amd_iommu_stats_init(void) { }
 
 #endif /* !CONFIG_AMD_IOMMU_STATS */
 
+static inline bool is_rd890_iommu(struct pci_dev *pdev)
+{
+       return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
+              (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
+}
+
 #endif /* _ASM_X86_AMD_IOMMU_PROTO_H  */
index 7014e88bc7798af33f681724ff90cf09eda52af5..e3509fc303bf5a54069e5b9b6b6edbc9ec390a49 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2009 Advanced Micro Devices, Inc.
+ * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
  * Author: Joerg Roedel <joerg.roedel@amd.com>
  *         Leo Duran <leo.duran@amd.com>
  *
@@ -368,6 +368,9 @@ struct amd_iommu {
        /* capabilities of that IOMMU read from ACPI */
        u32 cap;
 
+       /* flags read from acpi table */
+       u8 acpi_flags;
+
        /*
         * Capability pointer. There could be more than one IOMMU per PCI
         * device function if there are more than one AMD IOMMU capability
@@ -411,6 +414,24 @@ struct amd_iommu {
 
        /* default dma_ops domain for that IOMMU */
        struct dma_ops_domain *default_dom;
+
+       /*
+        * We can't rely on the BIOS to restore all values on reinit, so we
+        * need to stash them
+        */
+
+       /* The iommu BAR */
+       u32 stored_addr_lo;
+       u32 stored_addr_hi;
+
+       /*
+        * Each iommu has 6 l1s, each of which is documented as having 0x12
+        * registers
+        */
+       u32 stored_l1[6][0x12];
+
+       /* The l2 indirect registers */
+       u32 stored_l2[0x83];
 };
 
 /*
similarity index 61%
rename from arch/x86/include/asm/k8.h
rename to arch/x86/include/asm/amd_nb.h
index af00bd1d208934941f00ba7e911c20dd868a3f2b..c8517f81b21e73f9f2c428a26f2fb8995f73011f 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef _ASM_X86_K8_H
-#define _ASM_X86_K8_H
+#ifndef _ASM_X86_AMD_NB_H
+#define _ASM_X86_AMD_NB_H
 
 #include <linux/pci.h>
 
@@ -7,24 +7,27 @@ extern struct pci_device_id k8_nb_ids[];
 struct bootnode;
 
 extern int early_is_k8_nb(u32 value);
-extern struct pci_dev **k8_northbridges;
-extern int num_k8_northbridges;
 extern int cache_k8_northbridges(void);
 extern void k8_flush_garts(void);
 extern int k8_get_nodes(struct bootnode *nodes);
 extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn);
 extern int k8_scan_nodes(void);
 
-#ifdef CONFIG_K8_NB
-extern int num_k8_northbridges;
+struct k8_northbridge_info {
+       u16 num;
+       u8 gart_supported;
+       struct pci_dev **nb_misc;
+};
+extern struct k8_northbridge_info k8_northbridges;
+
+#ifdef CONFIG_AMD_NB
 
 static inline struct pci_dev *node_to_k8_nb_misc(int node)
 {
-       return (node < num_k8_northbridges) ? k8_northbridges[node] : NULL;
+       return (node < k8_northbridges.num) ? k8_northbridges.nb_misc[node] : NULL;
 }
 
 #else
-#define num_k8_northbridges 0
 
 static inline struct pci_dev *node_to_k8_nb_misc(int node)
 {
@@ -33,4 +36,4 @@ static inline struct pci_dev *node_to_k8_nb_misc(int node)
 #endif
 
 
-#endif /* _ASM_X86_K8_H */
+#endif /* _ASM_X86_AMD_NB_H */
index a69b1ac9eaf82d639fd0ae51459d2dfdc79fbd1b..2fefa501d3ba64ee5db2e3541555a28ebe27598e 100644 (file)
@@ -54,7 +54,6 @@ extern struct clock_event_device *global_clock_event;
 extern unsigned long apbt_quick_calibrate(void);
 extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu);
 extern void apbt_setup_secondary_clock(void);
-extern unsigned int boot_cpu_id;
 
 extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint);
 extern void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr);
index 545776efeb164c72d523f2c78f2c501e1535344c..bafd80defa4328ed2f49e808daee7a624434345d 100644 (file)
@@ -309,7 +309,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
 static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
 {
        return ((1UL << (nr % BITS_PER_LONG)) &
-               (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
+               (addr[nr / BITS_PER_LONG])) != 0;
 }
 
 static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
index 306160e58b48772ccef1f65c803a3a3813ff5d1e..1d9cd27c2920a326e5ac2440ebdbd998d0b925c0 100644 (file)
@@ -205,7 +205,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
        return (u32)(unsigned long)uptr;
 }
 
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
 {
        struct pt_regs *regs = task_pt_regs(current);
        return (void __user *)regs->sp - len;
index b185091bf19ce39f67a325ecd6aafb7c13f8cd23..4fab24de26b18404069994b908c79d3e4e481c50 100644 (file)
@@ -32,6 +32,5 @@ extern void arch_unregister_cpu(int);
 
 DECLARE_PER_CPU(int, cpu_state);
 
-extern unsigned int boot_cpu_id;
 
 #endif /* _ASM_X86_CPU_H */
index 781a50b29a4917545e71c3e74bdcbbda7faa383d..220e2ea08e80b3b2f40b33771913c4d80e2d815e 100644 (file)
 #define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */
 #define X86_FEATURE_OSVW       (6*32+ 9) /* OS Visible Workaround */
 #define X86_FEATURE_IBS                (6*32+10) /* Instruction Based Sampling */
-#define X86_FEATURE_SSE5       (6*32+11) /* SSE-5 */
+#define X86_FEATURE_XOP                (6*32+11) /* extended AVX instructions */
 #define X86_FEATURE_SKINIT     (6*32+12) /* SKINIT/STGI instructions */
 #define X86_FEATURE_WDT                (6*32+13) /* Watchdog timer */
+#define X86_FEATURE_LWP                (6*32+15) /* Light Weight Profiling */
+#define X86_FEATURE_FMA4       (6*32+16) /* 4 operands MAC instructions */
 #define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */
+#define X86_FEATURE_TBM                (6*32+21) /* trailing bit manipulations */
+#define X86_FEATURE_TOPOEXT    (6*32+22) /* topology extensions CPUID leafs */
 
 /*
  * Auxiliary flags: Linux defined - For features scattered in various
 #define X86_FEATURE_XSAVEOPT   (7*32+ 4) /* Optimized Xsave */
 #define X86_FEATURE_PLN                (7*32+ 5) /* Intel Power Limit Notification */
 #define X86_FEATURE_PTS                (7*32+ 6) /* Intel Package Thermal Status */
+#define X86_FEATURE_DTS                (7*32+ 7) /* Digital Thermal Sensor */
 
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW  (8*32+ 0) /* Intel TPR Shadow */
 #define X86_FEATURE_LBRV       (8*32+ 6) /* AMD LBR Virtualization support */
 #define X86_FEATURE_SVML       (8*32+ 7) /* "svm_lock" AMD SVM locking MSR */
 #define X86_FEATURE_NRIPS      (8*32+ 8) /* "nrip_save" AMD SVM next_rip save */
+#define X86_FEATURE_TSCRATEMSR  (8*32+ 9) /* "tsc_scale" AMD TSC scaling support */
+#define X86_FEATURE_VMCBCLEAN   (8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */
+#define X86_FEATURE_FLUSHBYASID (8*32+11) /* AMD flush-by-ASID support */
+#define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
+#define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
+#define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
+
 
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
 #define X86_FEATURE_FSGSBASE   (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
@@ -296,6 +308,7 @@ extern const char * const x86_power_flags[32];
 
 #endif /* CONFIG_X86_64 */
 
+#if __GNUC__ >= 4
 /*
  * Static testing of CPU features.  Used the same as boot_cpu_has().
  * These are only valid after alternatives have run, but will statically
@@ -304,7 +317,7 @@ extern const char * const x86_power_flags[32];
  */
 static __always_inline __pure bool __static_cpu_has(u16 bit)
 {
-#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
+#if __GNUC__ > 4 || __GNUC_MINOR__ >= 5
                asm goto("1: jmp %l[t_no]\n"
                         "2:\n"
                         ".section .altinstructions,\"a\"\n"
@@ -345,7 +358,6 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
 #endif
 }
 
-#if __GNUC__ >= 4
 #define static_cpu_has(bit)                                    \
 (                                                              \
        __builtin_constant_p(boot_cpu_has(bit)) ?               \
index 733f7e91e7a99f45435a6b46c831ff3e6e9c05da..326099199318c4ce1066a03a04220c08914ca890 100644 (file)
        CFI_ADJUST_CFA_OFFSET -8
        .endm
 
+       .macro pushfq_cfi
+       pushfq
+       CFI_ADJUST_CFA_OFFSET 8
+       .endm
+
+       .macro popfq_cfi
+       popfq
+       CFI_ADJUST_CFA_OFFSET -8
+       .endm
+
        .macro movq_cfi reg offset=0
        movq %\reg, \offset(%rsp)
        CFI_REL_OFFSET \reg, \offset
        CFI_ADJUST_CFA_OFFSET -4
        .endm
 
+       .macro pushfl_cfi
+       pushfl
+       CFI_ADJUST_CFA_OFFSET 4
+       .endm
+
+       .macro popfl_cfi
+       popfl
+       CFI_ADJUST_CFA_OFFSET -4
+       .endm
+
        .macro movl_cfi reg offset=0
        movl %\reg, \offset(%esp)
        CFI_REL_OFFSET \reg, \offset
index 8e8ec663a98fab4b771123fa1bfd6f7aef2f5931..b8e96a18676b872e751c4635921fb479f615c74e 100644 (file)
@@ -49,8 +49,8 @@ BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
 BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
 BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
 
-#ifdef CONFIG_PERF_EVENTS
-BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR)
+#ifdef CONFIG_IRQ_WORK
+BUILD_INTERRUPT(irq_work_interrupt, IRQ_WORK_VECTOR)
 #endif
 
 #ifdef CONFIG_X86_THERMAL_VECTOR
index d07b44f7d1dc014b3d1cb77e49138ef5f97f5d24..4d293dced62f4c178cd19e6cb2eae882678afb33 100644 (file)
@@ -214,5 +214,20 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
        BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
        return __virt_to_fix(vaddr);
 }
+
+/* Return an pointer with offset calculated */
+static inline unsigned long __set_fixmap_offset(enum fixed_addresses idx,
+                               phys_addr_t phys, pgprot_t flags)
+{
+       __set_fixmap(idx, phys, flags);
+       return fix_to_virt(idx) + (phys & (PAGE_SIZE - 1));
+}
+
+#define set_fixmap_offset(idx, phys)                   \
+       __set_fixmap_offset(idx, phys, PAGE_KERNEL)
+
+#define set_fixmap_offset_nocache(idx, phys)                   \
+       __set_fixmap_offset(idx, phys, PAGE_KERNEL_NOCACHE)
+
 #endif /* !__ASSEMBLY__ */
 #endif /* _ASM_X86_FIXMAP_H */
index 4ac5b0f33fc1017b3760eb01b1a4ed7f7155dbce..bf357f9b25f0a69ac70b3fd73ea943437fc43e9b 100644 (file)
@@ -17,6 +17,7 @@ extern int fix_aperture;
 #define GARTEN         (1<<0)
 #define DISGARTCPU     (1<<4)
 #define DISGARTIO      (1<<5)
+#define DISTLBWALKPRB  (1<<6)
 
 /* GART cache control register bits. */
 #define INVGART                (1<<0)
@@ -27,7 +28,6 @@ extern int fix_aperture;
 #define AMD64_GARTAPERTUREBASE 0x94
 #define AMD64_GARTTABLEBASE    0x98
 #define AMD64_GARTCACHECTL     0x9c
-#define AMD64_GARTEN           (1<<0)
 
 #ifdef CONFIG_GART_IOMMU
 extern int gart_iommu_aperture;
@@ -57,6 +57,19 @@ static inline void gart_iommu_hole_init(void)
 
 extern int agp_amd64_init(void);
 
+static inline void gart_set_size_and_enable(struct pci_dev *dev, u32 order)
+{
+       u32 ctl;
+
+       /*
+        * Don't enable translation but enable GART IO and CPU accesses.
+        * Also, set DISTLBWALKPRB since GART tables memory is UC.
+        */
+       ctl = DISTLBWALKPRB | order << 1;
+
+       pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
+}
+
 static inline void enable_gart_translation(struct pci_dev *dev, u64 addr)
 {
        u32 tmp, ctl;
index aeab29aee617240fbf479d2945572879be4525ec..55e4de613f0ee72471fd3cfaf28dee21150b276c 100644 (file)
@@ -14,7 +14,7 @@ typedef struct {
 #endif
        unsigned int x86_platform_ipis; /* arch dependent */
        unsigned int apic_perf_irqs;
-       unsigned int apic_pending_irqs;
+       unsigned int apic_irq_work_irqs;
 #ifdef CONFIG_SMP
        unsigned int irq_resched_count;
        unsigned int irq_call_count;
index 004e6e25e91301dd8a6d188d84a9c9ee795a7100..1d5c08a1bdfdb5b61c72cb286dfb0ac382c17e76 100644 (file)
@@ -68,7 +68,6 @@ extern unsigned long force_hpet_address;
 extern u8 hpet_blockid;
 extern int hpet_force_user;
 extern u8 hpet_msi_disable;
-extern u8 hpet_readback_cmp;
 extern int is_hpet_enabled(void);
 extern int hpet_enable(void);
 extern void hpet_disable(void);
index 528a11e8d3e35f64fea90202d6f196d77d48e708..824ca07860d012cdcc9c46886500d056fdb2aeed 100644 (file)
@@ -20,7 +20,7 @@ struct arch_hw_breakpoint {
 #include <linux/list.h>
 
 /* Available HW breakpoint length encodings */
-#define X86_BREAKPOINT_LEN_X           0x00
+#define X86_BREAKPOINT_LEN_X           0x40
 #define X86_BREAKPOINT_LEN_1           0x40
 #define X86_BREAKPOINT_LEN_2           0x44
 #define X86_BREAKPOINT_LEN_4           0x4c
index 46c0fe05f230112b5aa5e176d4292e56526f8279..3a54a1ca1a0234c0b19897f423bcde64be1f9dce 100644 (file)
@@ -29,7 +29,7 @@
 extern void apic_timer_interrupt(void);
 extern void x86_platform_ipi(void);
 extern void error_interrupt(void);
-extern void perf_pending_interrupt(void);
+extern void irq_work_interrupt(void);
 
 extern void spurious_interrupt(void);
 extern void thermal_interrupt(void);
index a73a8d5a5e6963e6fef9b52c28666e913a74160c..4aa2bb3b242ab76733e0f7e5ba95454471297c1a 100644 (file)
@@ -55,6 +55,12 @@ extern int save_i387_xstate_ia32(void __user *buf);
 extern int restore_i387_xstate_ia32(void __user *buf);
 #endif
 
+#ifdef CONFIG_MATH_EMULATION
+extern void finit_soft_fpu(struct i387_soft_struct *soft);
+#else
+static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
+#endif
+
 #define X87_FSW_ES (1 << 7)    /* Exception Summary */
 
 static __always_inline __pure bool use_xsaveopt(void)
@@ -67,6 +73,11 @@ static __always_inline __pure bool use_xsave(void)
        return static_cpu_has(X86_FEATURE_XSAVE);
 }
 
+static __always_inline __pure bool use_fxsr(void)
+{
+        return static_cpu_has(X86_FEATURE_FXSR);
+}
+
 extern void __sanitize_i387_state(struct task_struct *);
 
 static inline void sanitize_i387_state(struct task_struct *tsk)
@@ -77,19 +88,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
 }
 
 #ifdef CONFIG_X86_64
-
-/* Ignore delayed exceptions from user space */
-static inline void tolerant_fwait(void)
-{
-       asm volatile("1: fwait\n"
-                    "2:\n"
-                    _ASM_EXTABLE(1b, 2b));
-}
-
 static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
 {
        int err;
 
+       /* See comment in fxsave() below. */
        asm volatile("1:  rex64/fxrstor (%[fx])\n\t"
                     "2:\n"
                     ".section .fixup,\"ax\"\n"
@@ -98,44 +101,10 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
                     ".previous\n"
                     _ASM_EXTABLE(1b, 3b)
                     : [err] "=r" (err)
-#if 0 /* See comment in fxsave() below. */
-                    : [fx] "r" (fx), "m" (*fx), "0" (0));
-#else
-                    : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0));
-#endif
+                    : [fx] "R" (fx), "m" (*fx), "0" (0));
        return err;
 }
 
-/* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception
-   is pending. Clear the x87 state here by setting it to fixed
-   values. The kernel data segment can be sometimes 0 and sometimes
-   new user value. Both should be ok.
-   Use the PDA as safe address because it should be already in L1. */
-static inline void fpu_clear(struct fpu *fpu)
-{
-       struct xsave_struct *xstate = &fpu->state->xsave;
-       struct i387_fxsave_struct *fx = &fpu->state->fxsave;
-
-       /*
-        * xsave header may indicate the init state of the FP.
-        */
-       if (use_xsave() &&
-           !(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
-               return;
-
-       if (unlikely(fx->swd & X87_FSW_ES))
-               asm volatile("fnclex");
-       alternative_input(ASM_NOP8 ASM_NOP2,
-                         "    emms\n"          /* clear stack tags */
-                         "    fildl %%gs:0",   /* load to clear state */
-                         X86_FEATURE_FXSAVE_LEAK);
-}
-
-static inline void clear_fpu_state(struct task_struct *tsk)
-{
-       fpu_clear(&tsk->thread.fpu);
-}
-
 static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
 {
        int err;
@@ -149,6 +118,7 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
        if (unlikely(err))
                return -EFAULT;
 
+       /* See comment in fxsave() below. */
        asm volatile("1:  rex64/fxsave (%[fx])\n\t"
                     "2:\n"
                     ".section .fixup,\"ax\"\n"
@@ -157,11 +127,7 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
                     ".previous\n"
                     _ASM_EXTABLE(1b, 3b)
                     : [err] "=r" (err), "=m" (*fx)
-#if 0 /* See comment in fxsave() below. */
-                    : [fx] "r" (fx), "0" (0));
-#else
-                    : [fx] "cdaSDb" (fx), "0" (0));
-#endif
+                    : [fx] "R" (fx), "0" (0));
        if (unlikely(err) &&
            __clear_user(fx, sizeof(struct i387_fxsave_struct)))
                err = -EFAULT;
@@ -175,56 +141,29 @@ static inline void fpu_fxsave(struct fpu *fpu)
           uses any extended registers for addressing, a second REX prefix
           will be generated (to the assembler, rex64 followed by semicolon
           is a separate instruction), and hence the 64-bitness is lost. */
-#if 0
+
+#ifdef CONFIG_AS_FXSAVEQ
        /* Using "fxsaveq %0" would be the ideal choice, but is only supported
           starting with gas 2.16. */
        __asm__ __volatile__("fxsaveq %0"
                             : "=m" (fpu->state->fxsave));
-#elif 0
+#else
        /* Using, as a workaround, the properly prefixed form below isn't
           accepted by any binutils version so far released, complaining that
           the same type of prefix is used twice if an extended register is
-          needed for addressing (fix submitted to mainline 2005-11-21). */
-       __asm__ __volatile__("rex64/fxsave %0"
-                            : "=m" (fpu->state->fxsave));
-#else
-       /* This, however, we can work around by forcing the compiler to select
+          needed for addressing (fix submitted to mainline 2005-11-21).
+       asm volatile("rex64/fxsave %0"
+                    : "=m" (fpu->state->fxsave));
+          This, however, we can work around by forcing the compiler to select
           an addressing mode that doesn't require extended registers. */
-       __asm__ __volatile__("rex64/fxsave (%1)"
-                            : "=m" (fpu->state->fxsave)
-                            : "cdaSDb" (&fpu->state->fxsave));
+       asm volatile("rex64/fxsave (%[fx])"
+                    : "=m" (fpu->state->fxsave)
+                    : [fx] "R" (&fpu->state->fxsave));
 #endif
 }
 
-static inline void fpu_save_init(struct fpu *fpu)
-{
-       if (use_xsave())
-               fpu_xsave(fpu);
-       else
-               fpu_fxsave(fpu);
-
-       fpu_clear(fpu);
-}
-
-static inline void __save_init_fpu(struct task_struct *tsk)
-{
-       fpu_save_init(&tsk->thread.fpu);
-       task_thread_info(tsk)->status &= ~TS_USEDFPU;
-}
-
 #else  /* CONFIG_X86_32 */
 
-#ifdef CONFIG_MATH_EMULATION
-extern void finit_soft_fpu(struct i387_soft_struct *soft);
-#else
-static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
-#endif
-
-static inline void tolerant_fwait(void)
-{
-       asm volatile("fnclex ; fwait");
-}
-
 /* perform fxrstor iff the processor has extended states, otherwise frstor */
 static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
 {
@@ -241,6 +180,14 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
        return 0;
 }
 
+static inline void fpu_fxsave(struct fpu *fpu)
+{
+       asm volatile("fxsave %[fx]"
+                    : [fx] "=m" (fpu->state->fxsave));
+}
+
+#endif /* CONFIG_X86_64 */
+
 /* We need a safe address that is cheap to find and that is already
    in L1 during context switch. The best choices are unfortunately
    different for UP and SMP */
@@ -256,47 +203,33 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
 static inline void fpu_save_init(struct fpu *fpu)
 {
        if (use_xsave()) {
-               struct xsave_struct *xstate = &fpu->state->xsave;
-               struct i387_fxsave_struct *fx = &fpu->state->fxsave;
-
                fpu_xsave(fpu);
 
                /*
                 * xsave header may indicate the init state of the FP.
                 */
-               if (!(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
-                       goto end;
-
-               if (unlikely(fx->swd & X87_FSW_ES))
-                       asm volatile("fnclex");
-
-               /*
-                * we can do a simple return here or be paranoid :)
-                */
-               goto clear_state;
+               if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
+                       return;
+       } else if (use_fxsr()) {
+               fpu_fxsave(fpu);
+       } else {
+               asm volatile("fsave %[fx]; fwait"
+                            : [fx] "=m" (fpu->state->fsave));
+               return;
        }
 
-       /* Use more nops than strictly needed in case the compiler
-          varies code */
-       alternative_input(
-               "fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4,
-               "fxsave %[fx]\n"
-               "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:",
-               X86_FEATURE_FXSR,
-               [fx] "m" (fpu->state->fxsave),
-               [fsw] "m" (fpu->state->fxsave.swd) : "memory");
-clear_state:
+       if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES))
+               asm volatile("fnclex");
+
        /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
           is pending.  Clear the x87 state here by setting it to fixed
           values. safe_address is a random variable that should be in L1 */
        alternative_input(
-               GENERIC_NOP8 GENERIC_NOP2,
+               ASM_NOP8 ASM_NOP2,
                "emms\n\t"              /* clear stack tags */
-               "fildl %[addr]",        /* set F?P to defined value */
+               "fildl %P[addr]",       /* set F?P to defined value */
                X86_FEATURE_FXSAVE_LEAK,
                [addr] "m" (safe_address));
-end:
-       ;
 }
 
 static inline void __save_init_fpu(struct task_struct *tsk)
@@ -305,9 +238,6 @@ static inline void __save_init_fpu(struct task_struct *tsk)
        task_thread_info(tsk)->status &= ~TS_USEDFPU;
 }
 
-
-#endif /* CONFIG_X86_64 */
-
 static inline int fpu_fxrstor_checking(struct fpu *fpu)
 {
        return fxrstor_checking(&fpu->state->fxsave);
@@ -344,7 +274,10 @@ static inline void __unlazy_fpu(struct task_struct *tsk)
 static inline void __clear_fpu(struct task_struct *tsk)
 {
        if (task_thread_info(tsk)->status & TS_USEDFPU) {
-               tolerant_fwait();
+               /* Ignore delayed exceptions from user space */
+               asm volatile("1: fwait\n"
+                            "2:\n"
+                            _ASM_EXTABLE(1b, 2b));
                task_thread_info(tsk)->status &= ~TS_USEDFPU;
                stts();
        }
@@ -405,19 +338,6 @@ static inline void irq_ts_restore(int TS_state)
                stts();
 }
 
-#ifdef CONFIG_X86_64
-
-static inline void save_init_fpu(struct task_struct *tsk)
-{
-       __save_init_fpu(tsk);
-       stts();
-}
-
-#define unlazy_fpu     __unlazy_fpu
-#define clear_fpu      __clear_fpu
-
-#else  /* CONFIG_X86_32 */
-
 /*
  * These disable preemption on their own and are safe
  */
@@ -443,8 +363,6 @@ static inline void clear_fpu(struct task_struct *tsk)
        preempt_enable();
 }
 
-#endif /* CONFIG_X86_64 */
-
 /*
  * i387 state interaction
  */
@@ -508,7 +426,4 @@ extern void fpu_finit(struct fpu *fpu);
 
 #endif /* __ASSEMBLY__ */
 
-#define PSHUFB_XMM5_XMM0 .byte 0x66, 0x0f, 0x38, 0x00, 0xc5
-#define PSHUFB_XMM5_XMM6 .byte 0x66, 0x0f, 0x38, 0x00, 0xf5
-
 #endif /* _ASM_X86_I387_H */
index 30a3e977612306033c9647487dde3298b31a4f73..6a45ec41ec26170e14277635a4ccbdeb5ebbbbf2 100644 (file)
@@ -206,6 +206,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
 
 extern void iounmap(volatile void __iomem *addr);
 
+extern void set_iounmap_nonlazy(void);
 
 #ifdef __KERNEL__
 
index f35eb45d6576258e7dba242934d76376380b531c..c4191b3b7056c6ad16563375f529d1480668a26e 100644 (file)
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 
-void *
+void __iomem *
 iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
 
 void
-iounmap_atomic(void *kvaddr, enum km_type type);
+iounmap_atomic(void __iomem *kvaddr, enum km_type type);
 
 int
 iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
index f275e2244505b98308ca72e66e26c250d29c61a8..8d841505344e1009e6e7a7ff685ab72242a68e54 100644 (file)
@@ -3,4 +3,31 @@
 
 #define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8)
 
+#ifdef CONFIG_INTR_REMAP
+static inline void prepare_irte(struct irte *irte, int vector,
+                               unsigned int dest)
+{
+       memset(irte, 0, sizeof(*irte));
+
+       irte->present = 1;
+       irte->dst_mode = apic->irq_dest_mode;
+       /*
+        * Trigger mode in the IRTE will always be edge, and for IO-APIC, the
+        * actual level or edge trigger will be setup in the IO-APIC
+        * RTE. This will help simplify level triggered irq migration.
+        * For more details, see the comments (in io_apic.c) explainig IO-APIC
+        * irq migration in the presence of interrupt-remapping.
+       */
+       irte->trigger_mode = 0;
+       irte->dlvry_mode = apic->irq_delivery_mode;
+       irte->vector = vector;
+       irte->dest_id = IRTE_DEST(dest);
+       irte->redir_hint = 1;
+}
+#else
+static void prepare_irte(struct irte *irte, int vector, unsigned int dest)
+{
+}
+#endif
+
 #endif /* _ASM_X86_IRQ_REMAPPING_H */
index e2ca3009255706910d1ddc407232f4d09f011f24..6af0894dafb445cdaedceb1630249c47e955633f 100644 (file)
 #define X86_PLATFORM_IPI_VECTOR                0xed
 
 /*
- * Performance monitoring pending work vector:
+ * IRQ work vector:
  */
-#define LOCAL_PENDING_VECTOR           0xec
+#define IRQ_WORK_VECTOR                        0xec
 
 #define UV_BAU_MESSAGE                 0xea
 
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
new file mode 100644 (file)
index 0000000..f52d42e
--- /dev/null
@@ -0,0 +1,37 @@
+#ifndef _ASM_X86_JUMP_LABEL_H
+#define _ASM_X86_JUMP_LABEL_H
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <asm/nops.h>
+
+#define JUMP_LABEL_NOP_SIZE 5
+
+# define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t"
+
+# define JUMP_LABEL(key, label)                                        \
+       do {                                                    \
+               asm goto("1:"                                   \
+                       JUMP_LABEL_INITIAL_NOP                  \
+                       ".pushsection __jump_table,  \"a\" \n\t"\
+                       _ASM_PTR "1b, %l[" #label "], %c0 \n\t" \
+                       ".popsection \n\t"                      \
+                       : :  "i" (key) :  : label);             \
+       } while (0)
+
+#endif /* __KERNEL__ */
+
+#ifdef CONFIG_X86_64
+typedef u64 jump_label_t;
+#else
+typedef u32 jump_label_t;
+#endif
+
+struct jump_entry {
+       jump_label_t code;
+       jump_label_t target;
+       jump_label_t key;
+};
+
+#endif
index 51cfd730ac5d145ed9f184f643c254d441dbeaab..1f99ecfc48e178312860f177160b735c2655946f 100644 (file)
@@ -152,9 +152,14 @@ struct x86_emulate_ops {
 struct operand {
        enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type;
        unsigned int bytes;
-       unsigned long orig_val, *ptr;
+       union {
+               unsigned long orig_val;
+               u64 orig_val64;
+       };
+       unsigned long *ptr;
        union {
                unsigned long val;
+               u64 val64;
                char valptr[sizeof(unsigned long) + 2];
        };
 };
index 502e53f999cf28a25cc2b00f1766bd1ffb2f9ed2..c52e2eb40a1e254339658481621be634b427a8f0 100644 (file)
@@ -652,20 +652,6 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
        return (struct kvm_mmu_page *)page_private(page);
 }
 
-static inline u16 kvm_read_fs(void)
-{
-       u16 seg;
-       asm("mov %%fs, %0" : "=g"(seg));
-       return seg;
-}
-
-static inline u16 kvm_read_gs(void)
-{
-       u16 seg;
-       asm("mov %%gs, %0" : "=g"(seg));
-       return seg;
-}
-
 static inline u16 kvm_read_ldt(void)
 {
        u16 ldt;
@@ -673,16 +659,6 @@ static inline u16 kvm_read_ldt(void)
        return ldt;
 }
 
-static inline void kvm_load_fs(u16 sel)
-{
-       asm("mov %0, %%fs" : : "rm"(sel));
-}
-
-static inline void kvm_load_gs(u16 sel)
-{
-       asm("mov %0, %%gs" : : "rm"(sel));
-}
-
 static inline void kvm_load_ldt(u16 sel)
 {
        asm("lldt %0" : : "rm"(sel));
index 16350740edf600436d4990fec8a5fc9ce62abafc..4a711a684b174435bd5aae838515a836101eb389 100644 (file)
@@ -10,6 +10,9 @@
  */
 #ifndef _ASM_X86_MRST_H
 #define _ASM_X86_MRST_H
+
+#include <linux/sfi.h>
+
 extern int pci_mrst_init(void);
 int __init sfi_parse_mrtc(struct sfi_table_header *table);
 
@@ -26,7 +29,7 @@ enum mrst_cpu_type {
 };
 
 extern enum mrst_cpu_type __mrst_cpu_chip;
-static enum mrst_cpu_type mrst_identify_cpu(void)
+static inline enum mrst_cpu_type mrst_identify_cpu(void)
 {
        return __mrst_cpu_chip;
 }
@@ -42,4 +45,9 @@ extern enum mrst_timer_options mrst_timer_options;
 #define SFI_MTMR_MAX_NUM 8
 #define SFI_MRTC_MAX   8
 
+extern struct console early_mrst_console;
+extern void mrst_early_console_init(void);
+
+extern struct console early_hsu_console;
+extern void hsu_early_console_init(void);
 #endif /* _ASM_X86_MRST_H */
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
new file mode 100644 (file)
index 0000000..bcdff99
--- /dev/null
@@ -0,0 +1,15 @@
+#ifndef _ASM_X86_MWAIT_H
+#define _ASM_X86_MWAIT_H
+
+#define MWAIT_SUBSTATE_MASK            0xf
+#define MWAIT_CSTATE_MASK              0xf
+#define MWAIT_SUBSTATE_SIZE            4
+#define MWAIT_MAX_NUM_CSTATES          8
+
+#define CPUID_MWAIT_LEAF               5
+#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1
+#define CPUID5_ECX_INTERRUPT_BREAK     0x2
+
+#define MWAIT_ECX_INTERRUPT_BREAK      0x1
+
+#endif /* _ASM_X86_MWAIT_H */
index 08fde475cb3b38fcf75e2c41ba3f14ced47dc8a9..2a8478140bb38b205e32624b7bda5118c43e8133 100644 (file)
@@ -21,10 +21,14 @@ extern void olpc_ofw_detect(void);
 /* install OFW's pde permanently into the kernel's pgtable */
 extern void setup_olpc_ofw_pgd(void);
 
+/* check if OFW was detected during boot */
+extern bool olpc_ofw_present(void);
+
 #else /* !CONFIG_OLPC_OPENFIRMWARE */
 
 static inline void olpc_ofw_detect(void) { }
 static inline void setup_olpc_ofw_pgd(void) { }
+static inline bool olpc_ofw_present(void) { return false; }
 
 #endif /* !CONFIG_OLPC_OPENFIRMWARE */
 
index a667f24c72549e6c8171892fc6fbf63ac54006d0..1df66211fd1b53d4d6233cc1e481e88009622aab 100644 (file)
@@ -8,7 +8,7 @@
 #define PAGE_SIZE      (_AC(1,UL) << PAGE_SHIFT)
 #define PAGE_MASK      (~(PAGE_SIZE-1))
 
-#define __PHYSICAL_MASK                ((phys_addr_t)(1ULL << __PHYSICAL_MASK_SHIFT) - 1)
+#define __PHYSICAL_MASK                ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1))
 #define __VIRTUAL_MASK         ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
 
 /* Cast PAGE_MASK to a signed type so that it is sign-extended if
index 5653f43d90e534670974735f5d4578afdb4fc900..edecb4ed22106866328a33b9d11676eff0ec823c 100644 (file)
@@ -416,11 +416,6 @@ static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
        PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
 }
 
-static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
-                                           unsigned long start, unsigned long count)
-{
-       PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
-}
 static inline void paravirt_release_pmd(unsigned long pfn)
 {
        PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
index db9ef55323417812190eafd454cff45e136aec29..b82bac975250e53ac53363c2a04b724a32ea9929 100644 (file)
@@ -255,7 +255,6 @@ struct pv_mmu_ops {
         */
        void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
        void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
-       void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count);
        void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
        void (*release_pte)(unsigned long pfn);
        void (*release_pmd)(unsigned long pfn);
index def500776b16a3b63d34da569021722e4d82f18a..a70cd216be5d729db1f364340f911d632819f18d 100644 (file)
 #define P4_ESCR_EMASK(v)       ((v) << P4_ESCR_EVENTMASK_SHIFT)
 #define P4_ESCR_TAG(v)         ((v) << P4_ESCR_TAG_SHIFT)
 
-/* Non HT mask */
-#define P4_ESCR_MASK                   \
-       (P4_ESCR_EVENT_MASK     |       \
-       P4_ESCR_EVENTMASK_MASK  |       \
-       P4_ESCR_TAG_MASK        |       \
-       P4_ESCR_TAG_ENABLE      |       \
-       P4_ESCR_T0_OS           |       \
-       P4_ESCR_T0_USR)
-
-/* HT mask */
-#define P4_ESCR_MASK_HT                        \
-       (P4_ESCR_MASK | P4_ESCR_T1_OS | P4_ESCR_T1_USR)
-
 #define P4_CCCR_OVF                    0x80000000U
 #define P4_CCCR_CASCADE                        0x40000000U
 #define P4_CCCR_OVF_PMI_T0             0x04000000U
 #define P4_CCCR_THRESHOLD(v)           ((v) << P4_CCCR_THRESHOLD_SHIFT)
 #define P4_CCCR_ESEL(v)                        ((v) << P4_CCCR_ESCR_SELECT_SHIFT)
 
-/* Non HT mask */
-#define P4_CCCR_MASK                           \
-       (P4_CCCR_OVF                    |       \
-       P4_CCCR_CASCADE                 |       \
-       P4_CCCR_OVF_PMI_T0              |       \
-       P4_CCCR_FORCE_OVF               |       \
-       P4_CCCR_EDGE                    |       \
-       P4_CCCR_THRESHOLD_MASK          |       \
-       P4_CCCR_COMPLEMENT              |       \
-       P4_CCCR_COMPARE                 |       \
-       P4_CCCR_ESCR_SELECT_MASK        |       \
-       P4_CCCR_ENABLE)
-
-/* HT mask */
-#define P4_CCCR_MASK_HT                                \
-       (P4_CCCR_MASK | P4_CCCR_OVF_PMI_T1 | P4_CCCR_THREAD_ANY)
-
 #define P4_GEN_ESCR_EMASK(class, name, bit)    \
        class##__##name = ((1 << bit) << P4_ESCR_EVENTMASK_SHIFT)
 #define P4_ESCR_EMASK_BIT(class, name)         class##__##name
 #define P4_CONFIG_HT_SHIFT             63
 #define P4_CONFIG_HT                   (1ULL << P4_CONFIG_HT_SHIFT)
 
+/*
+ * The bits we allow to pass for RAW events
+ */
+#define P4_CONFIG_MASK_ESCR            \
+       P4_ESCR_EVENT_MASK      |       \
+       P4_ESCR_EVENTMASK_MASK  |       \
+       P4_ESCR_TAG_MASK        |       \
+       P4_ESCR_TAG_ENABLE
+
+#define P4_CONFIG_MASK_CCCR            \
+       P4_CCCR_EDGE            |       \
+       P4_CCCR_THRESHOLD_MASK  |       \
+       P4_CCCR_COMPLEMENT      |       \
+       P4_CCCR_COMPARE         |       \
+       P4_CCCR_THREAD_ANY      |       \
+       P4_CCCR_RESERVED
+
+/* some dangerous bits are reserved for kernel internals */
+#define P4_CONFIG_MASK                                   \
+       (p4_config_pack_escr(P4_CONFIG_MASK_ESCR))      | \
+       (p4_config_pack_cccr(P4_CONFIG_MASK_CCCR))
+
 static inline bool p4_is_event_cascaded(u64 config)
 {
        u32 cccr = p4_config_unpack_cccr(config);
index a34c785c5a63b88ecd9fb3f07ee26c8fd3b4a7df..ada823a13c7c9460a06e330e00de2456cab9e66f 100644 (file)
@@ -28,6 +28,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 extern spinlock_t pgd_lock;
 extern struct list_head pgd_list;
 
+extern struct mm_struct *pgd_page_get_mm(struct page *page);
+
 #ifdef CONFIG_PARAVIRT
 #include <asm/paravirt.h>
 #else  /* !CONFIG_PARAVIRT */
@@ -603,6 +605,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
        pte_update(mm, addr, ptep);
 }
 
+#define flush_tlb_fix_spurious_fault(vma, address)
+
 /*
  * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
  *
index 076052cd62bef2d08af47a725ad3b187cd735be6..f96ac9bedf75db0ca247326ab580301832761d82 100644 (file)
@@ -102,6 +102,8 @@ static inline void native_pgd_clear(pgd_t *pgd)
        native_set_pgd(pgd, native_make_pgd(0));
 }
 
+extern void sync_global_pgds(unsigned long start, unsigned long end);
+
 /*
  * Conversion functions: convert a page and protection to a page entry,
  * and a page entry and page directory to the page they refer to.
index 325b7bdbebaa9494b3e3e4c08a314aed72f88dc4..cae9c3cb95cf160e4e00f8c0b1c29fac298bb48f 100644 (file)
@@ -110,6 +110,8 @@ struct cpuinfo_x86 {
        u16                     phys_proc_id;
        /* Core id: */
        u16                     cpu_core_id;
+       /* Compute unit id */
+       u8                      compute_unit_id;
        /* Index into per_cpu list: */
        u16                     cpu_index;
 #endif
@@ -602,7 +604,7 @@ extern unsigned long                mmu_cr4_features;
 
 static inline void set_in_cr4(unsigned long mask)
 {
-       unsigned cr4;
+       unsigned long cr4;
 
        mmu_cr4_features |= mask;
        cr4 = read_cr4();
@@ -612,7 +614,7 @@ static inline void set_in_cr4(unsigned long mask)
 
 static inline void clear_in_cr4(unsigned long mask)
 {
-       unsigned cr4;
+       unsigned long cr4;
 
        mmu_cr4_features &= ~mask;
        cr4 = read_cr4();
@@ -764,29 +766,6 @@ extern unsigned long               idle_halt;
 extern unsigned long           idle_nomwait;
 extern bool                    c1e_detected;
 
-/*
- * on systems with caches, caches must be flashed as the absolute
- * last instruction before going into a suspended halt.  Otherwise,
- * dirty data can linger in the cache and become stale on resume,
- * leading to strange errors.
- *
- * perform a variety of operations to guarantee that the compiler
- * will not reorder instructions.  wbinvd itself is serializing
- * so the processor will not reorder.
- *
- * Systems without cache can just go into halt.
- */
-static inline void wbinvd_halt(void)
-{
-       mb();
-       /* check for clflush to determine if wbinvd is legal */
-       if (cpu_has_clflush)
-               asm volatile("cli; wbinvd; 1: hlt; jmp 1b" : : : "memory");
-       else
-               while (1)
-                       halt();
-}
-
 extern void enable_sep_cpu(void);
 extern int sysenter_setup(void);
 
index ef292c792d742cb35b1a48ac7f18f97a6546860c..d6763b139a844243b9fbb8dc620e633fe7b5825a 100644 (file)
@@ -93,6 +93,11 @@ void *extend_brk(size_t size, size_t align);
                        : : "i" (sz));                                  \
        }
 
+/* Helper for reserving space for arrays of things */
+#define RESERVE_BRK_ARRAY(type, name, entries)         \
+       type *name;                                     \
+       RESERVE_BRK(name, sizeof(type) * entries)
+
 #ifdef __i386__
 
 void __init i386_start_kernel(void);
diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h
deleted file mode 100644 (file)
index 61e08c0..0000000
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
- * VMI interface definition
- *
- * Copyright (C) 2005, VMware, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT.  See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Maintained by: Zachary Amsden zach@vmware.com
- *
- */
-#include <linux/types.h>
-
-/*
- *---------------------------------------------------------------------
- *
- *  VMI Option ROM API
- *
- *---------------------------------------------------------------------
- */
-#define VMI_SIGNATURE 0x696d5663   /* "cVmi" */
-
-#define PCI_VENDOR_ID_VMWARE            0x15AD
-#define PCI_DEVICE_ID_VMWARE_VMI        0x0801
-
-/*
- * We use two version numbers for compatibility, with the major
- * number signifying interface breakages, and the minor number
- * interface extensions.
- */
-#define VMI_API_REV_MAJOR       3
-#define VMI_API_REV_MINOR       0
-
-#define VMI_CALL_CPUID                 0
-#define VMI_CALL_WRMSR                 1
-#define VMI_CALL_RDMSR                 2
-#define VMI_CALL_SetGDT                        3
-#define VMI_CALL_SetLDT                        4
-#define VMI_CALL_SetIDT                        5
-#define VMI_CALL_SetTR                 6
-#define VMI_CALL_GetGDT                        7
-#define VMI_CALL_GetLDT                        8
-#define VMI_CALL_GetIDT                        9
-#define VMI_CALL_GetTR                 10
-#define VMI_CALL_WriteGDTEntry         11
-#define VMI_CALL_WriteLDTEntry         12
-#define VMI_CALL_WriteIDTEntry         13
-#define VMI_CALL_UpdateKernelStack     14
-#define VMI_CALL_SetCR0                        15
-#define VMI_CALL_SetCR2                        16
-#define VMI_CALL_SetCR3                        17
-#define VMI_CALL_SetCR4                        18
-#define VMI_CALL_GetCR0                        19
-#define VMI_CALL_GetCR2                        20
-#define VMI_CALL_GetCR3                        21
-#define VMI_CALL_GetCR4                        22
-#define VMI_CALL_WBINVD                        23
-#define VMI_CALL_SetDR                 24
-#define VMI_CALL_GetDR                 25
-#define VMI_CALL_RDPMC                 26
-#define VMI_CALL_RDTSC                 27
-#define VMI_CALL_CLTS                  28
-#define VMI_CALL_EnableInterrupts      29
-#define VMI_CALL_DisableInterrupts     30
-#define VMI_CALL_GetInterruptMask      31
-#define VMI_CALL_SetInterruptMask      32
-#define VMI_CALL_IRET                  33
-#define VMI_CALL_SYSEXIT               34
-#define VMI_CALL_Halt                  35
-#define VMI_CALL_Reboot                        36
-#define VMI_CALL_Shutdown              37
-#define VMI_CALL_SetPxE                        38
-#define VMI_CALL_SetPxELong            39
-#define VMI_CALL_UpdatePxE             40
-#define VMI_CALL_UpdatePxELong         41
-#define VMI_CALL_MachineToPhysical     42
-#define VMI_CALL_PhysicalToMachine     43
-#define VMI_CALL_AllocatePage          44
-#define VMI_CALL_ReleasePage           45
-#define VMI_CALL_InvalPage             46
-#define VMI_CALL_FlushTLB              47
-#define VMI_CALL_SetLinearMapping      48
-
-#define VMI_CALL_SetIOPLMask           61
-#define VMI_CALL_SetInitialAPState     62
-#define VMI_CALL_APICWrite             63
-#define VMI_CALL_APICRead              64
-#define VMI_CALL_IODelay               65
-#define VMI_CALL_SetLazyMode           73
-
-/*
- *---------------------------------------------------------------------
- *
- * MMU operation flags
- *
- *---------------------------------------------------------------------
- */
-
-/* Flags used by VMI_{Allocate|Release}Page call */
-#define VMI_PAGE_PAE             0x10  /* Allocate PAE shadow */
-#define VMI_PAGE_CLONE           0x20  /* Clone from another shadow */
-#define VMI_PAGE_ZEROED          0x40  /* Page is pre-zeroed */
-
-
-/* Flags shared by Allocate|Release Page and PTE updates */
-#define VMI_PAGE_PT              0x01
-#define VMI_PAGE_PD              0x02
-#define VMI_PAGE_PDP             0x04
-#define VMI_PAGE_PML4            0x08
-
-#define VMI_PAGE_NORMAL          0x00 /* for debugging */
-
-/* Flags used by PTE updates */
-#define VMI_PAGE_CURRENT_AS      0x10 /* implies VMI_PAGE_VA_MASK is valid */
-#define VMI_PAGE_DEFER           0x20 /* may queue update until TLB inval */
-#define VMI_PAGE_VA_MASK         0xfffff000
-
-#ifdef CONFIG_X86_PAE
-#define VMI_PAGE_L1            (VMI_PAGE_PT | VMI_PAGE_PAE | VMI_PAGE_ZEROED)
-#define VMI_PAGE_L2            (VMI_PAGE_PD | VMI_PAGE_PAE | VMI_PAGE_ZEROED)
-#else
-#define VMI_PAGE_L1            (VMI_PAGE_PT | VMI_PAGE_ZEROED)
-#define VMI_PAGE_L2            (VMI_PAGE_PD | VMI_PAGE_ZEROED)
-#endif
-
-/* Flags used by VMI_FlushTLB call */
-#define VMI_FLUSH_TLB            0x01
-#define VMI_FLUSH_GLOBAL         0x02
-
-/*
- *---------------------------------------------------------------------
- *
- *  VMI relocation definitions for ROM call get_reloc
- *
- *---------------------------------------------------------------------
- */
-
-/* VMI Relocation types */
-#define VMI_RELOCATION_NONE     0
-#define VMI_RELOCATION_CALL_REL 1
-#define VMI_RELOCATION_JUMP_REL 2
-#define VMI_RELOCATION_NOP     3
-
-#ifndef __ASSEMBLY__
-struct vmi_relocation_info {
-       unsigned char           *eip;
-       unsigned char           type;
-       unsigned char           reserved[3];
-};
-#endif
-
-
-/*
- *---------------------------------------------------------------------
- *
- *  Generic ROM structures and definitions
- *
- *---------------------------------------------------------------------
- */
-
-#ifndef __ASSEMBLY__
-
-struct vrom_header {
-       u16     rom_signature;  /* option ROM signature */
-       u8      rom_length;     /* ROM length in 512 byte chunks */
-       u8      rom_entry[4];   /* 16-bit code entry point */
-       u8      rom_pad0;       /* 4-byte align pad */
-       u32     vrom_signature; /* VROM identification signature */
-       u8      api_version_min;/* Minor version of API */
-       u8      api_version_maj;/* Major version of API */
-       u8      jump_slots;     /* Number of jump slots */
-       u8      reserved1;      /* Reserved for expansion */
-       u32     virtual_top;    /* Hypervisor virtual address start */
-       u16     reserved2;      /* Reserved for expansion */
-       u16     license_offs;   /* Offset to License string */
-       u16     pci_header_offs;/* Offset to PCI OPROM header */
-       u16     pnp_header_offs;/* Offset to PnP OPROM header */
-       u32     rom_pad3;       /* PnP reserverd / VMI reserved */
-       u8      reserved[96];   /* Reserved for headers */
-       char    vmi_init[8];    /* VMI_Init jump point */
-       char    get_reloc[8];   /* VMI_GetRelocationInfo jump point */
-} __attribute__((packed));
-
-struct pnp_header {
-       char sig[4];
-       char rev;
-       char size;
-       short next;
-       short res;
-       long devID;
-       unsigned short manufacturer_offset;
-       unsigned short product_offset;
-} __attribute__((packed));
-
-struct pci_header {
-       char sig[4];
-       short vendorID;
-       short deviceID;
-       short vpdData;
-       short size;
-       char rev;
-       char class;
-       char subclass;
-       char interface;
-       short chunks;
-       char rom_version_min;
-       char rom_version_maj;
-       char codetype;
-       char lastRom;
-       short reserved;
-} __attribute__((packed));
-
-/* Function prototypes for bootstrapping */
-#ifdef CONFIG_VMI
-extern void vmi_init(void);
-extern void vmi_activate(void);
-extern void vmi_bringup(void);
-#else
-static inline void vmi_init(void) {}
-static inline void vmi_activate(void) {}
-static inline void vmi_bringup(void) {}
-#endif
-
-/* State needed to start an application processor in an SMP system. */
-struct vmi_ap_state {
-       u32 cr0;
-       u32 cr2;
-       u32 cr3;
-       u32 cr4;
-
-       u64 efer;
-
-       u32 eip;
-       u32 eflags;
-       u32 eax;
-       u32 ebx;
-       u32 ecx;
-       u32 edx;
-       u32 esp;
-       u32 ebp;
-       u32 esi;
-       u32 edi;
-       u16 cs;
-       u16 ss;
-       u16 ds;
-       u16 es;
-       u16 fs;
-       u16 gs;
-       u16 ldtr;
-
-       u16 gdtr_limit;
-       u32 gdtr_base;
-       u32 idtr_base;
-       u16 idtr_limit;
-};
-
-#endif
diff --git a/arch/x86/include/asm/vmi_time.h b/arch/x86/include/asm/vmi_time.h
deleted file mode 100644 (file)
index c6e0bee..0000000
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * VMI Time wrappers
- *
- * Copyright (C) 2006, VMware, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT.  See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Send feedback to dhecht@vmware.com
- *
- */
-
-#ifndef _ASM_X86_VMI_TIME_H
-#define _ASM_X86_VMI_TIME_H
-
-/*
- * Raw VMI call indices for timer functions
- */
-#define VMI_CALL_GetCycleFrequency     66
-#define VMI_CALL_GetCycleCounter       67
-#define VMI_CALL_SetAlarm              68
-#define VMI_CALL_CancelAlarm           69
-#define VMI_CALL_GetWallclockTime      70
-#define VMI_CALL_WallclockUpdated      71
-
-/* Cached VMI timer operations */
-extern struct vmi_timer_ops {
-       u64 (*get_cycle_frequency)(void);
-       u64 (*get_cycle_counter)(int);
-       u64 (*get_wallclock)(void);
-       int (*wallclock_updated)(void);
-       void (*set_alarm)(u32 flags, u64 expiry, u64 period);
-       void (*cancel_alarm)(u32 flags);
-} vmi_timer_ops;
-
-/* Prototypes */
-extern void __init vmi_time_init(void);
-extern unsigned long vmi_get_wallclock(void);
-extern int vmi_set_wallclock(unsigned long now);
-extern unsigned long long vmi_sched_clock(void);
-extern unsigned long vmi_tsc_khz(void);
-
-#ifdef CONFIG_X86_LOCAL_APIC
-extern void __devinit vmi_time_bsp_init(void);
-extern void __devinit vmi_time_ap_init(void);
-#endif
-
-/*
- * When run under a hypervisor, a vcpu is always in one of three states:
- * running, halted, or ready.  The vcpu is in the 'running' state if it
- * is executing.  When the vcpu executes the halt interface, the vcpu
- * enters the 'halted' state and remains halted until there is some work
- * pending for the vcpu (e.g. an alarm expires, host I/O completes on
- * behalf of virtual I/O).  At this point, the vcpu enters the 'ready'
- * state (waiting for the hypervisor to reschedule it).  Finally, at any
- * time when the vcpu is not in the 'running' state nor the 'halted'
- * state, it is in the 'ready' state.
- *
- * Real time is advances while the vcpu is 'running', 'ready', or
- * 'halted'.  Stolen time is the time in which the vcpu is in the
- * 'ready' state.  Available time is the remaining time -- the vcpu is
- * either 'running' or 'halted'.
- *
- * All three views of time are accessible through the VMI cycle
- * counters.
- */
-
-/* The cycle counters. */
-#define VMI_CYCLES_REAL         0
-#define VMI_CYCLES_AVAILABLE    1
-#define VMI_CYCLES_STOLEN       2
-
-/* The alarm interface 'flags' bits */
-#define VMI_ALARM_COUNTERS      2
-
-#define VMI_ALARM_COUNTER_MASK  0x000000ff
-
-#define VMI_ALARM_WIRED_IRQ0    0x00000000
-#define VMI_ALARM_WIRED_LVTT    0x00010000
-
-#define VMI_ALARM_IS_ONESHOT    0x00000000
-#define VMI_ALARM_IS_PERIODIC   0x00000100
-
-#define CONFIG_VMI_ALARM_HZ    100
-
-#endif /* _ASM_X86_VMI_TIME_H */
index 0925676266bdbc9cbcf03811916b9f8b9b9e627e..80a93dc99076c3732a620b2f51a5560369ff3cb0 100644 (file)
@@ -11,6 +11,8 @@ ifdef CONFIG_FUNCTION_TRACER
 CFLAGS_REMOVE_tsc.o = -pg
 CFLAGS_REMOVE_rtc.o = -pg
 CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
+CFLAGS_REMOVE_pvclock.o = -pg
+CFLAGS_REMOVE_kvmclock.o = -pg
 CFLAGS_REMOVE_ftrace.o = -pg
 CFLAGS_REMOVE_early_printk.o = -pg
 endif
@@ -32,7 +34,8 @@ GCOV_PROFILE_paravirt.o               := n
 obj-y                  := process_$(BITS).o signal.o entry_$(BITS).o
 obj-y                  += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
 obj-y                  += time.o ioport.o ldt.o dumpstack.o
-obj-y                  += setup.o x86_init.o i8259.o irqinit.o
+obj-y                  += setup.o x86_init.o i8259.o irqinit.o jump_label.o
+obj-$(CONFIG_IRQ_WORK)  += irq_work.o
 obj-$(CONFIG_X86_VISWS)        += visws_quirks.o
 obj-$(CONFIG_X86_32)   += probe_roms_32.o
 obj-$(CONFIG_X86_32)   += sys_i386_32.o i386_ksyms_32.o
@@ -83,15 +86,15 @@ obj-$(CONFIG_DOUBLEFAULT)   += doublefault_32.o
 obj-$(CONFIG_KGDB)             += kgdb.o
 obj-$(CONFIG_VM86)             += vm86_32.o
 obj-$(CONFIG_EARLY_PRINTK)     += early_printk.o
+obj-$(CONFIG_EARLY_PRINTK_MRST)        += early_printk_mrst.o
 
 obj-$(CONFIG_HPET_TIMER)       += hpet.o
 obj-$(CONFIG_APB_TIMER)                += apb_timer.o
 
-obj-$(CONFIG_K8_NB)            += k8.o
+obj-$(CONFIG_AMD_NB)           += amd_nb.o
 obj-$(CONFIG_DEBUG_RODATA_TEST)        += test_rodata.o
 obj-$(CONFIG_DEBUG_NX_TEST)    += test_nx.o
 
-obj-$(CONFIG_VMI)              += vmi_32.o vmiclock_32.o
 obj-$(CONFIG_KVM_GUEST)                += kvm.o
 obj-$(CONFIG_KVM_CLOCK)                += kvmclock.o
 obj-$(CONFIG_PARAVIRT)         += paravirt.o paravirt_patch_$(BITS).o
@@ -104,6 +107,7 @@ obj-$(CONFIG_SCx200)                += scx200.o
 scx200-y                       += scx200_32.o
 
 obj-$(CONFIG_OLPC)             += olpc.o
+obj-$(CONFIG_OLPC_XO1)         += olpc-xo1.o
 obj-$(CONFIG_OLPC_OPENFIRMWARE)        += olpc_ofw.o
 obj-$(CONFIG_X86_MRST)         += mrst.o
 
@@ -120,7 +124,6 @@ obj-$(CONFIG_SWIOTLB)                       += pci-swiotlb.o
 # 64 bit specific files
 ifeq ($(CONFIG_X86_64),y)
        obj-$(CONFIG_X86_UV)            += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o uv_time.o
-       obj-$(CONFIG_X86_PM_TIMER)      += pmtimer_64.o
        obj-$(CONFIG_AUDIT)             += audit_64.o
 
        obj-$(CONFIG_GART_IOMMU)        += pci-gart_64.o aperture_64.o
index fb7a5f052e2b8766d11115e3f7fc174fadf6ac2f..5812404a0d4ce5e3eb815675f591f18b95762329 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <acpi/processor.h>
 #include <asm/acpi.h>
+#include <asm/mwait.h>
 
 /*
  * Initialize bm_flags based on the CPU cache properties
@@ -61,20 +62,10 @@ struct cstate_entry {
                unsigned int ecx;
        } states[ACPI_PROCESSOR_MAX_POWER];
 };
-static struct cstate_entry *cpu_cstate_entry;  /* per CPU ptr */
+static struct cstate_entry __percpu *cpu_cstate_entry; /* per CPU ptr */
 
 static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
 
-#define MWAIT_SUBSTATE_MASK    (0xf)
-#define MWAIT_CSTATE_MASK      (0xf)
-#define MWAIT_SUBSTATE_SIZE    (4)
-
-#define CPUID_MWAIT_LEAF (5)
-#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
-#define CPUID5_ECX_INTERRUPT_BREAK     (0x2)
-
-#define MWAIT_ECX_INTERRUPT_BREAK      (0x1)
-
 #define NATIVE_CSTATE_BEYOND_HALT      (2)
 
 static long acpi_processor_ffh_cstate_probe_cpu(void *_cx)
index f65ab8b014c4f42421d77200243c39c7fd252165..a36bb90aef5383d68bcf4af0b0c33749d572163a 100644 (file)
@@ -195,7 +195,7 @@ static void __init_or_module add_nops(void *insns, unsigned int len)
 
 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
 extern s32 __smp_locks[], __smp_locks_end[];
-static void *text_poke_early(void *addr, const void *opcode, size_t len);
+void *text_poke_early(void *addr, const void *opcode, size_t len);
 
 /* Replace instructions with better alternatives for this CPU type.
    This runs before SMP is initialized to avoid SMP problems with
@@ -522,7 +522,7 @@ void __init alternative_instructions(void)
  * instructions. And on the local CPU you need to be protected again NMI or MCE
  * handlers seeing an inconsistent instruction while you patch.
  */
-static void *__init_or_module text_poke_early(void *addr, const void *opcode,
+void *__init_or_module text_poke_early(void *addr, const void *opcode,
                                              size_t len)
 {
        unsigned long flags;
@@ -637,7 +637,72 @@ void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len)
        tpp.len = len;
        atomic_set(&stop_machine_first, 1);
        wrote_text = 0;
-       stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
+       /* Use __stop_machine() because the caller already got online_cpus. */
+       __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
        return addr;
 }
 
+#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
+
+unsigned char ideal_nop5[IDEAL_NOP_SIZE_5];
+
+void __init arch_init_ideal_nop5(void)
+{
+       extern const unsigned char ftrace_test_p6nop[];
+       extern const unsigned char ftrace_test_nop5[];
+       extern const unsigned char ftrace_test_jmp[];
+       int faulted = 0;
+
+       /*
+        * There is no good nop for all x86 archs.
+        * We will default to using the P6_NOP5, but first we
+        * will test to make sure that the nop will actually
+        * work on this CPU. If it faults, we will then
+        * go to a lesser efficient 5 byte nop. If that fails
+        * we then just use a jmp as our nop. This isn't the most
+        * efficient nop, but we can not use a multi part nop
+        * since we would then risk being preempted in the middle
+        * of that nop, and if we enabled tracing then, it might
+        * cause a system crash.
+        *
+        * TODO: check the cpuid to determine the best nop.
+        */
+       asm volatile (
+               "ftrace_test_jmp:"
+               "jmp ftrace_test_p6nop\n"
+               "nop\n"
+               "nop\n"
+               "nop\n"  /* 2 byte jmp + 3 bytes */
+               "ftrace_test_p6nop:"
+               P6_NOP5
+               "jmp 1f\n"
+               "ftrace_test_nop5:"
+               ".byte 0x66,0x66,0x66,0x66,0x90\n"
+               "1:"
+               ".section .fixup, \"ax\"\n"
+               "2:     movl $1, %0\n"
+               "       jmp ftrace_test_nop5\n"
+               "3:     movl $2, %0\n"
+               "       jmp 1b\n"
+               ".previous\n"
+               _ASM_EXTABLE(ftrace_test_p6nop, 2b)
+               _ASM_EXTABLE(ftrace_test_nop5, 3b)
+               : "=r"(faulted) : "0" (faulted));
+
+       switch (faulted) {
+       case 0:
+               pr_info("converting mcount calls to 0f 1f 44 00 00\n");
+               memcpy(ideal_nop5, ftrace_test_p6nop, IDEAL_NOP_SIZE_5);
+               break;
+       case 1:
+               pr_info("converting mcount calls to 66 66 66 66 90\n");
+               memcpy(ideal_nop5, ftrace_test_nop5, IDEAL_NOP_SIZE_5);
+               break;
+       case 2:
+               pr_info("converting mcount calls to jmp . + 5\n");
+               memcpy(ideal_nop5, ftrace_test_jmp, IDEAL_NOP_SIZE_5);
+               break;
+       }
+
+}
+#endif
index fa044e1e30a2ed081175480dccec352a7e381392..d2fdb0826df25654fa94cc7ee307c23e235c5c91 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2009 Advanced Micro Devices, Inc.
+ * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
  * Author: Joerg Roedel <joerg.roedel@amd.com>
  *         Leo Duran <leo.duran@amd.com>
  *
@@ -1953,6 +1953,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
                           size_t size,
                           int dir)
 {
+       dma_addr_t flush_addr;
        dma_addr_t i, start;
        unsigned int pages;
 
@@ -1960,6 +1961,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
            (dma_addr + size > dma_dom->aperture_size))
                return;
 
+       flush_addr = dma_addr;
        pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
        dma_addr &= PAGE_MASK;
        start = dma_addr;
@@ -1974,7 +1976,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
        dma_ops_free_addresses(dma_dom, dma_addr, pages);
 
        if (amd_iommu_unmap_flush || dma_dom->need_flush) {
-               iommu_flush_pages(&dma_dom->domain, dma_addr, size);
+               iommu_flush_pages(&dma_dom->domain, flush_addr, size);
                dma_dom->need_flush = false;
        }
 }
index 3cc63e2b8dd4c4acc4ee7f77c3ad432d97beb169..3cb482e123de75f9044e0f843d23fb4348e513a7 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2009 Advanced Micro Devices, Inc.
+ * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
  * Author: Joerg Roedel <joerg.roedel@amd.com>
  *         Leo Duran <leo.duran@amd.com>
  *
@@ -194,6 +194,39 @@ static inline unsigned long tbl_size(int entry_size)
        return 1UL << shift;
 }
 
+/* Access to l1 and l2 indexed register spaces */
+
+static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
+{
+       u32 val;
+
+       pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
+       pci_read_config_dword(iommu->dev, 0xfc, &val);
+       return val;
+}
+
+static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
+{
+       pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
+       pci_write_config_dword(iommu->dev, 0xfc, val);
+       pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
+}
+
+static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
+{
+       u32 val;
+
+       pci_write_config_dword(iommu->dev, 0xf0, address);
+       pci_read_config_dword(iommu->dev, 0xf4, &val);
+       return val;
+}
+
+static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
+{
+       pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
+       pci_write_config_dword(iommu->dev, 0xf4, val);
+}
+
 /****************************************************************************
  *
  * AMD IOMMU MMIO register space handling functions
@@ -619,6 +652,7 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu)
 {
        int cap_ptr = iommu->cap_ptr;
        u32 range, misc;
+       int i, j;
 
        pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
                              &iommu->cap);
@@ -632,6 +666,30 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu)
        iommu->last_device = calc_devid(MMIO_GET_BUS(range),
                                        MMIO_GET_LD(range));
        iommu->evt_msi_num = MMIO_MSI_NUM(misc);
+
+       if (!is_rd890_iommu(iommu->dev))
+               return;
+
+       /*
+        * Some rd890 systems may not be fully reconfigured by the BIOS, so
+        * it's necessary for us to store this information so it can be
+        * reprogrammed on resume
+        */
+
+       pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
+                             &iommu->stored_addr_lo);
+       pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
+                             &iommu->stored_addr_hi);
+
+       /* Low bit locks writes to configuration space */
+       iommu->stored_addr_lo &= ~1;
+
+       for (i = 0; i < 6; i++)
+               for (j = 0; j < 0x12; j++)
+                       iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
+
+       for (i = 0; i < 0x83; i++)
+               iommu->stored_l2[i] = iommu_read_l2(iommu, i);
 }
 
 /*
@@ -649,29 +707,9 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
        struct ivhd_entry *e;
 
        /*
-        * First set the recommended feature enable bits from ACPI
-        * into the IOMMU control registers
-        */
-       h->flags & IVHD_FLAG_HT_TUN_EN_MASK ?
-               iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
-               iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
-
-       h->flags & IVHD_FLAG_PASSPW_EN_MASK ?
-               iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
-               iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
-
-       h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
-               iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
-               iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
-
-       h->flags & IVHD_FLAG_ISOC_EN_MASK ?
-               iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
-               iommu_feature_disable(iommu, CONTROL_ISOC_EN);
-
-       /*
-        * make IOMMU memory accesses cache coherent
+        * First save the recommended feature enable bits from ACPI
         */
-       iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
+       iommu->acpi_flags = h->flags;
 
        /*
         * Done. Now parse the device entries
@@ -1116,6 +1154,79 @@ static void init_device_table(void)
        }
 }
 
+static void iommu_init_flags(struct amd_iommu *iommu)
+{
+       iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
+               iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
+               iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
+
+       iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
+               iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
+               iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
+
+       iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
+               iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
+               iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
+
+       iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
+               iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
+               iommu_feature_disable(iommu, CONTROL_ISOC_EN);
+
+       /*
+        * make IOMMU memory accesses cache coherent
+        */
+       iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
+}
+
+static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
+{
+       int i, j;
+       u32 ioc_feature_control;
+       struct pci_dev *pdev = NULL;
+
+       /* RD890 BIOSes may not have completely reconfigured the iommu */
+       if (!is_rd890_iommu(iommu->dev))
+               return;
+
+       /*
+        * First, we need to ensure that the iommu is enabled. This is
+        * controlled by a register in the northbridge
+        */
+       pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0));
+
+       if (!pdev)
+               return;
+
+       /* Select Northbridge indirect register 0x75 and enable writing */
+       pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
+       pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
+
+       /* Enable the iommu */
+       if (!(ioc_feature_control & 0x1))
+               pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
+
+       pci_dev_put(pdev);
+
+       /* Restore the iommu BAR */
+       pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
+                              iommu->stored_addr_lo);
+       pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
+                              iommu->stored_addr_hi);
+
+       /* Restore the l1 indirect regs for each of the 6 l1s */
+       for (i = 0; i < 6; i++)
+               for (j = 0; j < 0x12; j++)
+                       iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
+
+       /* Restore the l2 indirect regs */
+       for (i = 0; i < 0x83; i++)
+               iommu_write_l2(iommu, i, iommu->stored_l2[i]);
+
+       /* Lock PCI setup registers */
+       pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
+                              iommu->stored_addr_lo | 1);
+}
+
 /*
  * This function finally enables all IOMMUs found in the system after
  * they have been initialized
@@ -1126,6 +1237,7 @@ static void enable_iommus(void)
 
        for_each_iommu(iommu) {
                iommu_disable(iommu);
+               iommu_init_flags(iommu);
                iommu_set_device_table(iommu);
                iommu_enable_command_buffer(iommu);
                iommu_enable_event_buffer(iommu);
@@ -1150,6 +1262,11 @@ static void disable_iommus(void)
 
 static int amd_iommu_resume(struct sys_device *dev)
 {
+       struct amd_iommu *iommu;
+
+       for_each_iommu(iommu)
+               iommu_apply_resume_quirks(iommu);
+
        /* re-load the hardware */
        enable_iommus();
 
similarity index 66%
rename from arch/x86/kernel/k8.c
rename to arch/x86/kernel/amd_nb.c
index 0f7bc20cfcdedd58f66b6b036bfb7d824e599e37..8f6463d8ed0de1ebfece6cb1138a15697f657197 100644 (file)
@@ -8,21 +8,19 @@
 #include <linux/errno.h>
 #include <linux/module.h>
 #include <linux/spinlock.h>
-#include <asm/k8.h>
-
-int num_k8_northbridges;
-EXPORT_SYMBOL(num_k8_northbridges);
+#include <asm/amd_nb.h>
 
 static u32 *flush_words;
 
 struct pci_device_id k8_nb_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
+       { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) },
        {}
 };
 EXPORT_SYMBOL(k8_nb_ids);
 
-struct pci_dev **k8_northbridges;
+struct k8_northbridge_info k8_northbridges;
 EXPORT_SYMBOL(k8_northbridges);
 
 static struct pci_dev *next_k8_northbridge(struct pci_dev *dev)
@@ -40,36 +38,45 @@ int cache_k8_northbridges(void)
        int i;
        struct pci_dev *dev;
 
-       if (num_k8_northbridges)
+       if (k8_northbridges.num)
                return 0;
 
        dev = NULL;
        while ((dev = next_k8_northbridge(dev)) != NULL)
-               num_k8_northbridges++;
+               k8_northbridges.num++;
+
+       /* some CPU families (e.g. family 0x11) do not support GART */
+       if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
+           boot_cpu_data.x86 == 0x15)
+               k8_northbridges.gart_supported = 1;
 
-       k8_northbridges = kmalloc((num_k8_northbridges + 1) * sizeof(void *),
-                                 GFP_KERNEL);
-       if (!k8_northbridges)
+       k8_northbridges.nb_misc = kmalloc((k8_northbridges.num + 1) *
+                                         sizeof(void *), GFP_KERNEL);
+       if (!k8_northbridges.nb_misc)
                return -ENOMEM;
 
-       if (!num_k8_northbridges) {
-               k8_northbridges[0] = NULL;
+       if (!k8_northbridges.num) {
+               k8_northbridges.nb_misc[0] = NULL;
                return 0;
        }
 
-       flush_words = kmalloc(num_k8_northbridges * sizeof(u32), GFP_KERNEL);
-       if (!flush_words) {
-               kfree(k8_northbridges);
-               return -ENOMEM;
+       if (k8_northbridges.gart_supported) {
+               flush_words = kmalloc(k8_northbridges.num * sizeof(u32),
+                                     GFP_KERNEL);
+               if (!flush_words) {
+                       kfree(k8_northbridges.nb_misc);
+                       return -ENOMEM;
+               }
        }
 
        dev = NULL;
        i = 0;
        while ((dev = next_k8_northbridge(dev)) != NULL) {
-               k8_northbridges[i] = dev;
-               pci_read_config_dword(dev, 0x9c, &flush_words[i++]);
+               k8_northbridges.nb_misc[i] = dev;
+               if (k8_northbridges.gart_supported)
+                       pci_read_config_dword(dev, 0x9c, &flush_words[i++]);
        }
-       k8_northbridges[i] = NULL;
+       k8_northbridges.nb_misc[i] = NULL;
        return 0;
 }
 EXPORT_SYMBOL_GPL(cache_k8_northbridges);
@@ -93,22 +100,25 @@ void k8_flush_garts(void)
        unsigned long flags;
        static DEFINE_SPINLOCK(gart_lock);
 
+       if (!k8_northbridges.gart_supported)
+               return;
+
        /* Avoid races between AGP and IOMMU. In theory it's not needed
           but I'm not sure if the hardware won't lose flush requests
           when another is pending. This whole thing is so expensive anyways
           that it doesn't matter to serialize more. -AK */
        spin_lock_irqsave(&gart_lock, flags);
        flushed = 0;
-       for (i = 0; i < num_k8_northbridges; i++) {
-               pci_write_config_dword(k8_northbridges[i], 0x9c,
+       for (i = 0; i < k8_northbridges.num; i++) {
+               pci_write_config_dword(k8_northbridges.nb_misc[i], 0x9c,
                                       flush_words[i]|1);
                flushed++;
        }
-       for (i = 0; i < num_k8_northbridges; i++) {
+       for (i = 0; i < k8_northbridges.num; i++) {
                u32 w;
                /* Make sure the hardware actually executed the flush*/
                for (;;) {
-                       pci_read_config_dword(k8_northbridges[i],
+                       pci_read_config_dword(k8_northbridges.nb_misc[i],
                                              0x9c, &w);
                        if (!(w & 1))
                                break;
index 8dd77800ff5d7b444742ae02cc5168ccf8855fcd..6fe2b5cb4f3ce1ec0721f6698075764e9a93a91b 100644 (file)
@@ -343,7 +343,7 @@ void apbt_setup_secondary_clock(void)
 
        /* Don't register boot CPU clockevent */
        cpu = smp_processor_id();
-       if (cpu == boot_cpu_id)
+       if (!cpu)
                return;
        /*
         * We need to calculate the scaled math multiplication factor for
@@ -398,7 +398,7 @@ static int apbt_cpuhp_notify(struct notifier_block *n,
                }
                break;
        default:
-               pr_debug(KERN_INFO "APBT notified %lu, no action\n", action);
+               pr_debug("APBT notified %lu, no action\n", action);
        }
        return NOTIFY_OK;
 }
@@ -552,7 +552,7 @@ bad_count:
                pr_debug("APB CS going back %lx:%lx:%lx ",
                         t2, last_read, t2 - last_read);
 bad_count_x3:
-               pr_debug(KERN_INFO "tripple check enforced\n");
+               pr_debug("triple check enforced\n");
                t0 = apbt_readl(phy_cs_timer_id,
                                APBTMR_N_CURRENT_VALUE);
                udelay(1);
index a2e0caf26e172c8f7b18231e4f4a072161ed66fe..377f5db3b8b4092d4926000703c3433231fe86e0 100644 (file)
@@ -27,7 +27,7 @@
 #include <asm/gart.h>
 #include <asm/pci-direct.h>
 #include <asm/dma.h>
-#include <asm/k8.h>
+#include <asm/amd_nb.h>
 #include <asm/x86_init.h>
 
 int gart_iommu_aperture;
@@ -307,7 +307,7 @@ void __init early_gart_iommu_check(void)
                                continue;
 
                        ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL);
-                       aper_enabled = ctl & AMD64_GARTEN;
+                       aper_enabled = ctl & GARTEN;
                        aper_order = (ctl >> 1) & 7;
                        aper_size = (32 * 1024 * 1024) << aper_order;
                        aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff;
@@ -362,7 +362,7 @@ void __init early_gart_iommu_check(void)
                                continue;
 
                        ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL);
-                       ctl &= ~AMD64_GARTEN;
+                       ctl &= ~GARTEN;
                        write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl);
                }
        }
@@ -505,8 +505,13 @@ out:
 
        /* Fix up the north bridges */
        for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
-               int bus;
-               int dev_base, dev_limit;
+               int bus, dev_base, dev_limit;
+
+               /*
+                * Don't enable translation yet but enable GART IO and CPU
+                * accesses and set DISTLBWALKPRB since GART table memory is UC.
+                */
+               u32 ctl = DISTLBWALKPRB | aper_order << 1;
 
                bus = bus_dev_ranges[i].bus;
                dev_base = bus_dev_ranges[i].dev_base;
@@ -515,10 +520,7 @@ out:
                        if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00)))
                                continue;
 
-                       /* Don't enable translation yet. That is done later.
-                          Assume this BIOS didn't initialise the GART so
-                          just overwrite all previous bits */
-                       write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, aper_order << 1);
+                       write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl);
                        write_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE, aper_alloc >> 25);
                }
        }
index e3b534cda49a8097dde55400083d7eeb8f9c694c..8cf86fb3b4e38a122f6bfd480e3faaf08466aaa9 100644 (file)
@@ -1665,10 +1665,7 @@ int __init APIC_init_uniprocessor(void)
        }
 #endif
 
-#ifndef CONFIG_SMP
-       enable_IR_x2apic();
        default_setup_apic_routing();
-#endif
 
        verify_local_APIC();
        connect_bsp_APIC();
index f1efebaf55105fa835ac7938c1654295fdd81562..9508811e8448b2d147386758605a6bc214839926 100644 (file)
@@ -162,7 +162,7 @@ int __init arch_early_irq_init(void)
 
        cfg = irq_cfgx;
        count = ARRAY_SIZE(irq_cfgx);
-       node= cpu_to_node(boot_cpu_id);
+       node = cpu_to_node(0);
 
        for (i = 0; i < count; i++) {
                desc = irq_to_desc(i);
@@ -306,14 +306,19 @@ void arch_init_copy_chip_data(struct irq_desc *old_desc,
 
        old_cfg = old_desc->chip_data;
 
-       memcpy(cfg, old_cfg, sizeof(struct irq_cfg));
+       cfg->vector = old_cfg->vector;
+       cfg->move_in_progress = old_cfg->move_in_progress;
+       cpumask_copy(cfg->domain, old_cfg->domain);
+       cpumask_copy(cfg->old_domain, old_cfg->old_domain);
 
        init_copy_irq_2_pin(old_cfg, cfg, node);
 }
 
-static void free_irq_cfg(struct irq_cfg *old_cfg)
+static void free_irq_cfg(struct irq_cfg *cfg)
 {
-       kfree(old_cfg);
+       free_cpumask_var(cfg->domain);
+       free_cpumask_var(cfg->old_domain);
+       kfree(cfg);
 }
 
 void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
@@ -1377,21 +1382,7 @@ int setup_ioapic_entry(int apic_id, int irq,
                if (index < 0)
                        panic("Failed to allocate IRTE for ioapic %d\n", apic_id);
 
-               memset(&irte, 0, sizeof(irte));
-
-               irte.present = 1;
-               irte.dst_mode = apic->irq_dest_mode;
-               /*
-                * Trigger mode in the IRTE will always be edge, and the
-                * actual level or edge trigger will be setup in the IO-APIC
-                * RTE. This will help simplify level triggered irq migration.
-                * For more details, see the comments above explainig IO-APIC
-                * irq migration in the presence of interrupt-remapping.
-                */
-               irte.trigger_mode = 0;
-               irte.dlvry_mode = apic->irq_delivery_mode;
-               irte.vector = vector;
-               irte.dest_id = IRTE_DEST(destination);
+               prepare_irte(&irte, vector, destination);
 
                /* Set source-id of interrupt request */
                set_ioapic_sid(&irte, apic_id);
@@ -1483,7 +1474,7 @@ static void __init setup_IO_APIC_irqs(void)
        int notcon = 0;
        struct irq_desc *desc;
        struct irq_cfg *cfg;
-       int node = cpu_to_node(boot_cpu_id);
+       int node = cpu_to_node(0);
 
        apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
 
@@ -1548,7 +1539,7 @@ static void __init setup_IO_APIC_irqs(void)
 void setup_IO_APIC_irq_extra(u32 gsi)
 {
        int apic_id = 0, pin, idx, irq;
-       int node = cpu_to_node(boot_cpu_id);
+       int node = cpu_to_node(0);
        struct irq_desc *desc;
        struct irq_cfg *cfg;
 
@@ -2927,7 +2918,7 @@ static inline void __init check_timer(void)
 {
        struct irq_desc *desc = irq_to_desc(0);
        struct irq_cfg *cfg = desc->chip_data;
-       int node = cpu_to_node(boot_cpu_id);
+       int node = cpu_to_node(0);
        int apic1, pin1, apic2, pin2;
        unsigned long flags;
        int no_pin1 = 0;
@@ -3281,7 +3272,7 @@ unsigned int create_irq_nr(unsigned int irq_want, int node)
 
 int create_irq(void)
 {
-       int node = cpu_to_node(boot_cpu_id);
+       int node = cpu_to_node(0);
        unsigned int irq_want;
        int irq;
 
@@ -3335,14 +3326,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
                ir_index = map_irq_to_irte_handle(irq, &sub_handle);
                BUG_ON(ir_index == -1);
 
-               memset (&irte, 0, sizeof(irte));
-
-               irte.present = 1;
-               irte.dst_mode = apic->irq_dest_mode;
-               irte.trigger_mode = 0; /* edge */
-               irte.dlvry_mode = apic->irq_delivery_mode;
-               irte.vector = cfg->vector;
-               irte.dest_id = IRTE_DEST(dest);
+               prepare_irte(&irte, cfg->vector, dest);
 
                /* Set source-id of interrupt request */
                if (pdev)
@@ -3903,7 +3887,7 @@ static int __io_apic_set_pci_routing(struct device *dev, int irq,
        if (dev)
                node = dev_to_node(dev);
        else
-               node = cpu_to_node(boot_cpu_id);
+               node = cpu_to_node(0);
 
        desc = irq_to_desc_alloc_node(irq, node);
        if (!desc) {
index 83e9be4778e2b597791306a85ca9ca527003e2c2..f9e4e6a54073e3d901d0475da9c5deceb21d55d8 100644 (file)
@@ -54,6 +54,9 @@ static int apicid_phys_pkg_id(int initial_apic_id, int index_msb)
  */
 void __init default_setup_apic_routing(void)
 {
+
+       enable_IR_x2apic();
+
 #ifdef CONFIG_X86_X2APIC
        if (x2apic_mode
 #ifdef CONFIG_X86_UV
index 7b598b84c902e62f9f852d78c896acb03d4ba318..f744f54cb248e7ac0cd6defaa84a8e673468b30a 100644 (file)
@@ -698,9 +698,11 @@ void __init uv_system_init(void)
                for (j = 0; j < 64; j++) {
                        if (!test_bit(j, &present))
                                continue;
-                       uv_blade_info[blade].pnode = (i * 64 + j);
+                       pnode = (i * 64 + j);
+                       uv_blade_info[blade].pnode = pnode;
                        uv_blade_info[blade].nr_possible_cpus = 0;
                        uv_blade_info[blade].nr_online_cpus = 0;
+                       max_pnode = max(pnode, max_pnode);
                        blade++;
                }
        }
@@ -738,7 +740,6 @@ void __init uv_system_init(void)
                uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid);
                uv_node_to_blade[nid] = blade;
                uv_cpu_to_blade[cpu] = blade;
-               max_pnode = max(pnode, max_pnode);
        }
 
        /* Add blade/pnode info for nodes without cpus */
@@ -750,7 +751,6 @@ void __init uv_system_init(void)
                pnode = (paddr >> m_val) & pnode_mask;
                blade = boot_pnode_to_blade(pnode);
                uv_node_to_blade[nid] = blade;
-               max_pnode = max(pnode, max_pnode);
        }
 
        map_gru_high(max_pnode);
index ba5f62f45f01e136e849894076917684ffcd8c40..9e093f8fe78c4713aec4f91a62394eb0efa1f6c0 100644 (file)
@@ -148,7 +148,7 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
 {
 #ifdef CONFIG_SMP
        /* calling is from identify_secondary_cpu() ? */
-       if (c->cpu_index == boot_cpu_id)
+       if (!c->cpu_index)
                return;
 
        /*
@@ -253,37 +253,51 @@ static int __cpuinit nearby_node(int apicid)
 #endif
 
 /*
- * Fixup core topology information for AMD multi-node processors.
- * Assumption: Number of cores in each internal node is the same.
+ * Fixup core topology information for
+ * (1) AMD multi-node processors
+ *     Assumption: Number of cores in each internal node is the same.
+ * (2) AMD processors supporting compute units
  */
 #ifdef CONFIG_X86_HT
-static void __cpuinit amd_fixup_dcm(struct cpuinfo_x86 *c)
+static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
 {
-       unsigned long long value;
-       u32 nodes, cores_per_node;
+       u32 nodes;
+       u8 node_id;
        int cpu = smp_processor_id();
 
-       if (!cpu_has(c, X86_FEATURE_NODEID_MSR))
-               return;
+       /* get information required for multi-node processors */
+       if (cpu_has(c, X86_FEATURE_TOPOEXT)) {
+               u32 eax, ebx, ecx, edx;
 
-       /* fixup topology information only once for a core */
-       if (cpu_has(c, X86_FEATURE_AMD_DCM))
-               return;
+               cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
+               nodes = ((ecx >> 8) & 7) + 1;
+               node_id = ecx & 7;
 
-       rdmsrl(MSR_FAM10H_NODE_ID, value);
+               /* get compute unit information */
+               smp_num_siblings = ((ebx >> 8) & 3) + 1;
+               c->compute_unit_id = ebx & 0xff;
+       } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
+               u64 value;
 
-       nodes = ((value >> 3) & 7) + 1;
-       if (nodes == 1)
+               rdmsrl(MSR_FAM10H_NODE_ID, value);
+               nodes = ((value >> 3) & 7) + 1;
+               node_id = value & 7;
+       } else
                return;
 
-       set_cpu_cap(c, X86_FEATURE_AMD_DCM);
-       cores_per_node = c->x86_max_cores / nodes;
+       /* fixup multi-node processor information */
+       if (nodes > 1) {
+               u32 cores_per_node;
+
+               set_cpu_cap(c, X86_FEATURE_AMD_DCM);
+               cores_per_node = c->x86_max_cores / nodes;
 
-       /* store NodeID, use llc_shared_map to store sibling info */
-       per_cpu(cpu_llc_id, cpu) = value & 7;
+               /* store NodeID, use llc_shared_map to store sibling info */
+               per_cpu(cpu_llc_id, cpu) = node_id;
 
-       /* fixup core id to be in range from 0 to (cores_per_node - 1) */
-       c->cpu_core_id = c->cpu_core_id % cores_per_node;
+               /* core id to be in range from 0 to (cores_per_node - 1) */
+               c->cpu_core_id = c->cpu_core_id % cores_per_node;
+       }
 }
 #endif
 
@@ -304,9 +318,7 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
        c->phys_proc_id = c->initial_apicid >> bits;
        /* use socket ID also for last level cache */
        per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
-       /* fixup topology information on multi-node processors */
-       if ((c->x86 == 0x10) && (c->x86_model == 9))
-               amd_fixup_dcm(c);
+       amd_get_topology(c);
 #endif
 }
 
@@ -412,6 +424,23 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
                        set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
        }
 #endif
+
+       /* We need to do the following only once */
+       if (c != &boot_cpu_data)
+               return;
+
+       if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
+
+               if (c->x86 > 0x10 ||
+                   (c->x86 == 0x10 && c->x86_model >= 0x2)) {
+                       u64 val;
+
+                       rdmsrl(MSR_K7_HWCR, val);
+                       if (!(val & BIT(24)))
+                               printk(KERN_WARNING FW_BUG "TSC doesn't count "
+                                       "with P0 frequency!\n");
+               }
+       }
 }
 
 static void __cpuinit init_amd(struct cpuinfo_x86 *c)
@@ -523,7 +552,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
 #endif
 
        if (c->extended_cpuid_level >= 0x80000006) {
-               if ((c->x86 >= 0x0f) && (cpuid_edx(0x80000006) & 0xf000))
+               if (cpuid_edx(0x80000006) & 0xf000)
                        num_cache_leaves = 4;
                else
                        num_cache_leaves = 3;
index 490dac63c2d21e90ab3af70c543b012f1c3f43b5..4b68bda30938d0a55ed39eeaeff68157266a9ea0 100644 (file)
@@ -545,7 +545,7 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
        }
 }
 
-static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
+void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
 {
        u32 tfms, xlvl;
        u32 ebx;
@@ -665,7 +665,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
                this_cpu->c_early_init(c);
 
 #ifdef CONFIG_SMP
-       c->cpu_index = boot_cpu_id;
+       c->cpu_index = 0;
 #endif
        filter_cpuid_features(c, false);
 }
@@ -704,16 +704,21 @@ void __init early_cpu_init(void)
 }
 
 /*
- * The NOPL instruction is supposed to exist on all CPUs with
- * family >= 6; unfortunately, that's not true in practice because
- * of early VIA chips and (more importantly) broken virtualizers that
- * are not easy to detect.  In the latter case it doesn't even *fail*
- * reliably, so probing for it doesn't even work.  Disable it completely
+ * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
+ * unfortunately, that's not true in practice because of early VIA
+ * chips and (more importantly) broken virtualizers that are not easy
+ * to detect. In the latter case it doesn't even *fail* reliably, so
+ * probing for it doesn't even work. Disable it completely on 32-bit
  * unless we can find a reliable way to detect all the broken cases.
+ * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
  */
 static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
 {
+#ifdef CONFIG_X86_32
        clear_cpu_cap(c, X86_FEATURE_NOPL);
+#else
+       set_cpu_cap(c, X86_FEATURE_NOPL);
+#endif
 }
 
 static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
@@ -1264,13 +1269,6 @@ void __cpuinit cpu_init(void)
        clear_all_debug_regs();
        dbg_restore_debug_regs();
 
-       /*
-        * Force FPU initialization:
-        */
-       current_thread_info()->status = 0;
-       clear_used_math();
-       mxcsr_feature_mask_init();
-
        fpu_init();
        xsave_init();
 }
index 3624e8a0f71bf72e4c3cd86abcedfbb1c53d9b4d..e765633f210ed893f56bc23ba26e6cfea6c074fd 100644 (file)
@@ -32,6 +32,8 @@ struct cpu_dev {
 extern const struct cpu_dev *const __x86_cpu_dev_start[],
                            *const __x86_cpu_dev_end[];
 
+extern void get_cpu_cap(struct cpuinfo_x86 *c);
 extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
+extern void get_cpu_cap(struct cpuinfo_x86 *c);
 
 #endif
index 994230d4dc4e545986a8c982629589905f0f224d..4f6f679f27990198640f9a1e139ea3a838b05eb8 100644 (file)
@@ -368,16 +368,22 @@ static int __init pcc_cpufreq_do_osc(acpi_handle *handle)
                return -ENODEV;
 
        out_obj = output.pointer;
-       if (out_obj->type != ACPI_TYPE_BUFFER)
-               return -ENODEV;
+       if (out_obj->type != ACPI_TYPE_BUFFER) {
+               ret = -ENODEV;
+               goto out_free;
+       }
 
        errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
-       if (errors)
-               return -ENODEV;
+       if (errors) {
+               ret = -ENODEV;
+               goto out_free;
+       }
 
        supported = *((u32 *)(out_obj->buffer.pointer + 4));
-       if (!(supported & 0x1))
-               return -ENODEV;
+       if (!(supported & 0x1)) {
+               ret = -ENODEV;
+               goto out_free;
+       }
 
 out_free:
        kfree(output.pointer);
index 85f69cdeae1020a18e1c9097c8da276a8e50b992..695f17731e2382ce63bbbb4a9caaf2ec7e16e29e 100644 (file)
@@ -39,6 +39,7 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
                        misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
                        wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
                        c->cpuid_level = cpuid_eax(0);
+                       get_cpu_cap(c);
                }
        }
 
@@ -169,7 +170,7 @@ static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
 {
 #ifdef CONFIG_SMP
        /* calling is from identify_secondary_cpu() ? */
-       if (c->cpu_index == boot_cpu_id)
+       if (!c->cpu_index)
                return;
 
        /*
index 898c2f4eab88b28cb74719b943cad01ec06efa29..12cd823c8d038008f8abe77785b4bb475dd3cf20 100644 (file)
@@ -17,7 +17,7 @@
 
 #include <asm/processor.h>
 #include <linux/smp.h>
-#include <asm/k8.h>
+#include <asm/amd_nb.h>
 #include <asm/smp.h>
 
 #define LVL_1_INST     1
@@ -306,7 +306,7 @@ struct _cache_attr {
        ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
 };
 
-#ifdef CONFIG_CPU_SUP_AMD
+#ifdef CONFIG_AMD_NB
 
 /*
  * L3 cache descriptors
@@ -369,7 +369,7 @@ static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf,
                        return;
 
        /* not in virtualized environments */
-       if (num_k8_northbridges == 0)
+       if (k8_northbridges.num == 0)
                return;
 
        /*
@@ -377,7 +377,7 @@ static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf,
         * never freed but this is done only on shutdown so it doesn't matter.
         */
        if (!l3_caches) {
-               int size = num_k8_northbridges * sizeof(struct amd_l3_cache *);
+               int size = k8_northbridges.num * sizeof(struct amd_l3_cache *);
 
                l3_caches = kzalloc(size, GFP_ATOMIC);
                if (!l3_caches)
@@ -556,12 +556,12 @@ static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
 static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
                show_cache_disable_1, store_cache_disable_1);
 
-#else  /* CONFIG_CPU_SUP_AMD */
+#else  /* CONFIG_AMD_NB */
 static void __cpuinit
 amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, int index)
 {
 };
-#endif /* CONFIG_CPU_SUP_AMD */
+#endif /* CONFIG_AMD_NB */
 
 static int
 __cpuinit cpuid4_cache_lookup_regs(int index,
@@ -1000,7 +1000,7 @@ static struct attribute *default_attrs[] = {
 
 static struct attribute *default_l3_attrs[] = {
        DEFAULT_SYSFS_CACHE_ATTRS,
-#ifdef CONFIG_CPU_SUP_AMD
+#ifdef CONFIG_AMD_NB
        &cache_disable_0.attr,
        &cache_disable_1.attr,
 #endif
index 224392d8fe8c095390439a10ea45af2e21bc0930..39aaee5c1ab23f742dd749500ca09834e0d39bc4 100644 (file)
@@ -141,6 +141,7 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
                                address = (low & MASK_BLKPTR_LO) >> 21;
                                if (!address)
                                        break;
+
                                address += MCG_XBLK_ADDR;
                        } else
                                ++address;
@@ -148,12 +149,8 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
                        if (rdmsr_safe(address, &low, &high))
                                break;
 
-                       if (!(high & MASK_VALID_HI)) {
-                               if (block)
-                                       continue;
-                               else
-                                       break;
-                       }
+                       if (!(high & MASK_VALID_HI))
+                               continue;
 
                        if (!(high & MASK_CNTP_HI)  ||
                             (high & MASK_LOCKED_HI))
@@ -530,7 +527,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
                err = -ENOMEM;
                goto out;
        }
-       if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
+       if (!zalloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
                kfree(b);
                err = -ENOMEM;
                goto out;
@@ -543,7 +540,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
 #ifndef CONFIG_SMP
        cpumask_setall(b->cpus);
 #else
-       cpumask_copy(b->cpus, c->llc_shared_map);
+       cpumask_set_cpu(cpu, b->cpus);
 #endif
 
        per_cpu(threshold_banks, cpu)[bank] = b;
index c2a8b26d4feacf4ac6b6b022c0a9c590fbbcef85..4b683267eca5fb982111375ab9e000d7eda3b437 100644 (file)
@@ -202,10 +202,11 @@ static int therm_throt_process(bool new_event, int event, int level)
 
 #ifdef CONFIG_SYSFS
 /* Add/Remove thermal_throttle interface for CPU device: */
-static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev)
+static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev,
+                               unsigned int cpu)
 {
        int err;
-       struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
+       struct cpuinfo_x86 *c = &cpu_data(cpu);
 
        err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group);
        if (err)
@@ -215,7 +216,7 @@ static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev)
                err = sysfs_add_file_to_group(&sys_dev->kobj,
                                              &attr_core_power_limit_count.attr,
                                              thermal_attr_group.name);
-       if (cpu_has(c, X86_FEATURE_PTS))
+       if (cpu_has(c, X86_FEATURE_PTS)) {
                err = sysfs_add_file_to_group(&sys_dev->kobj,
                                              &attr_package_throttle_count.attr,
                                              thermal_attr_group.name);
@@ -223,6 +224,7 @@ static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev)
                        err = sysfs_add_file_to_group(&sys_dev->kobj,
                                        &attr_package_power_limit_count.attr,
                                        thermal_attr_group.name);
+       }
 
        return err;
 }
@@ -251,7 +253,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
        case CPU_UP_PREPARE:
        case CPU_UP_PREPARE_FROZEN:
                mutex_lock(&therm_cpu_lock);
-               err = thermal_throttle_add_dev(sys_dev);
+               err = thermal_throttle_add_dev(sys_dev, cpu);
                mutex_unlock(&therm_cpu_lock);
                WARN_ON(err);
                break;
@@ -287,7 +289,7 @@ static __init int thermal_throttle_init_device(void)
 #endif
        /* connect live CPUs to sysfs */
        for_each_online_cpu(cpu) {
-               err = thermal_throttle_add_dev(get_cpu_sysdev(cpu));
+               err = thermal_throttle_add_dev(get_cpu_sysdev(cpu), cpu);
                WARN_ON(err);
        }
 #ifdef CONFIG_HOTPLUG_CPU
@@ -348,7 +350,7 @@ static void intel_thermal_interrupt(void)
 
 static void unexpected_thermal_interrupt(void)
 {
-       printk(KERN_ERR "CPU%d: Unexpected LVT TMR interrupt!\n",
+       printk(KERN_ERR "CPU%d: Unexpected LVT thermal interrupt!\n",
                        smp_processor_id());
        add_taint(TAINT_MACHINE_CHECK);
 }
index c5f59d07142562e9c673572970ae79fca1e3622e..ac140c7be396b6f55c97521ddcd572df664e95b6 100644 (file)
@@ -827,7 +827,7 @@ int __init amd_special_default_mtrr(void)
 
        if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
                return 0;
-       if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
+       if (boot_cpu_data.x86 < 0xf)
                return 0;
        /* In case some hypervisor doesn't pass SYSCFG through: */
        if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
index 7d28d7d03885a1d28e0785221f8e0197b89f957f..9f27228ceffd4e90da9467498e409e451187ee99 100644 (file)
@@ -64,18 +64,59 @@ static inline void k8_check_syscfg_dram_mod_en(void)
        }
 }
 
+/* Get the size of contiguous MTRR range */
+static u64 get_mtrr_size(u64 mask)
+{
+       u64 size;
+
+       mask >>= PAGE_SHIFT;
+       mask |= size_or_mask;
+       size = -mask;
+       size <<= PAGE_SHIFT;
+       return size;
+}
+
 /*
- * Returns the effective MTRR type for the region
- * Error returns:
- * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR
- * - 0xFF - when MTRR is not enabled
+ * Check and return the effective type for MTRR-MTRR type overlap.
+ * Returns 1 if the effective type is UNCACHEABLE, else returns 0
  */
-u8 mtrr_type_lookup(u64 start, u64 end)
+static int check_type_overlap(u8 *prev, u8 *curr)
+{
+       if (*prev == MTRR_TYPE_UNCACHABLE || *curr == MTRR_TYPE_UNCACHABLE) {
+               *prev = MTRR_TYPE_UNCACHABLE;
+               *curr = MTRR_TYPE_UNCACHABLE;
+               return 1;
+       }
+
+       if ((*prev == MTRR_TYPE_WRBACK && *curr == MTRR_TYPE_WRTHROUGH) ||
+           (*prev == MTRR_TYPE_WRTHROUGH && *curr == MTRR_TYPE_WRBACK)) {
+               *prev = MTRR_TYPE_WRTHROUGH;
+               *curr = MTRR_TYPE_WRTHROUGH;
+       }
+
+       if (*prev != *curr) {
+               *prev = MTRR_TYPE_UNCACHABLE;
+               *curr = MTRR_TYPE_UNCACHABLE;
+               return 1;
+       }
+
+       return 0;
+}
+
+/*
+ * Error/Semi-error returns:
+ * 0xFF - when MTRR is not enabled
+ * *repeat == 1 implies [start:end] spanned across MTRR range and type returned
+ *             corresponds only to [start:*partial_end].
+ *             Caller has to lookup again for [*partial_end:end].
+ */
+static u8 __mtrr_type_lookup(u64 start, u64 end, u64 *partial_end, int *repeat)
 {
        int i;
        u64 base, mask;
        u8 prev_match, curr_match;
 
+       *repeat = 0;
        if (!mtrr_state_set)
                return 0xFF;
 
@@ -126,8 +167,34 @@ u8 mtrr_type_lookup(u64 start, u64 end)
 
                start_state = ((start & mask) == (base & mask));
                end_state = ((end & mask) == (base & mask));
-               if (start_state != end_state)
-                       return 0xFE;
+
+               if (start_state != end_state) {
+                       /*
+                        * We have start:end spanning across an MTRR.
+                        * We split the region into
+                        * either
+                        * (start:mtrr_end) (mtrr_end:end)
+                        * or
+                        * (start:mtrr_start) (mtrr_start:end)
+                        * depending on kind of overlap.
+                        * Return the type for first region and a pointer to
+                        * the start of second region so that caller will
+                        * lookup again on the second region.
+                        * Note: This way we handle multiple overlaps as well.
+                        */
+                       if (start_state)
+                               *partial_end = base + get_mtrr_size(mask);
+                       else
+                               *partial_end = base;
+
+                       if (unlikely(*partial_end <= start)) {
+                               WARN_ON(1);
+                               *partial_end = start + PAGE_SIZE;
+                       }
+
+                       end = *partial_end - 1; /* end is inclusive */
+                       *repeat = 1;
+               }
 
                if ((start & mask) != (base & mask))
                        continue;
@@ -138,21 +205,8 @@ u8 mtrr_type_lookup(u64 start, u64 end)
                        continue;
                }
 
-               if (prev_match == MTRR_TYPE_UNCACHABLE ||
-                   curr_match == MTRR_TYPE_UNCACHABLE) {
-                       return MTRR_TYPE_UNCACHABLE;
-               }
-
-               if ((prev_match == MTRR_TYPE_WRBACK &&
-                    curr_match == MTRR_TYPE_WRTHROUGH) ||
-                   (prev_match == MTRR_TYPE_WRTHROUGH &&
-                    curr_match == MTRR_TYPE_WRBACK)) {
-                       prev_match = MTRR_TYPE_WRTHROUGH;
-                       curr_match = MTRR_TYPE_WRTHROUGH;
-               }
-
-               if (prev_match != curr_match)
-                       return MTRR_TYPE_UNCACHABLE;
+               if (check_type_overlap(&prev_match, &curr_match))
+                       return curr_match;
        }
 
        if (mtrr_tom2) {
@@ -166,6 +220,36 @@ u8 mtrr_type_lookup(u64 start, u64 end)
        return mtrr_state.def_type;
 }
 
+/*
+ * Returns the effective MTRR type for the region
+ * Error return:
+ * 0xFF - when MTRR is not enabled
+ */
+u8 mtrr_type_lookup(u64 start, u64 end)
+{
+       u8 type, prev_type;
+       int repeat;
+       u64 partial_end;
+
+       type = __mtrr_type_lookup(start, end, &partial_end, &repeat);
+
+       /*
+        * Common path is with repeat = 0.
+        * However, we can have cases where [start:end] spans across some
+        * MTRR range. Do repeated lookups for that case here.
+        */
+       while (repeat) {
+               prev_type = type;
+               start = partial_end;
+               type = __mtrr_type_lookup(start, end, &partial_end, &repeat);
+
+               if (check_type_overlap(&prev_type, &type))
+                       return type;
+       }
+
+       return type;
+}
+
 /* Get the MSR pair relating to a var range */
 static void
 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
index f2da20fda02ddf6fcd449a88ba399fe4ed44af2a..fe73c1844a9a5b7c9a0c902bef0b9f993207acee 100644 (file)
@@ -102,6 +102,7 @@ struct cpu_hw_events {
         */
        struct perf_event       *events[X86_PMC_IDX_MAX]; /* in counter order */
        unsigned long           active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+       unsigned long           running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
        int                     enabled;
 
        int                     n_events;
@@ -530,7 +531,7 @@ static int x86_pmu_hw_config(struct perf_event *event)
 /*
  * Setup the hardware configuration for a given attr_type
  */
-static int __hw_perf_event_init(struct perf_event *event)
+static int __x86_pmu_event_init(struct perf_event *event)
 {
        int err;
 
@@ -583,7 +584,7 @@ static void x86_pmu_disable_all(void)
        }
 }
 
-void hw_perf_disable(void)
+static void x86_pmu_disable(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
@@ -618,7 +619,7 @@ static void x86_pmu_enable_all(int added)
        }
 }
 
-static const struct pmu pmu;
+static struct pmu pmu;
 
 static inline int is_x86_event(struct perf_event *event)
 {
@@ -800,10 +801,10 @@ static inline int match_prev_assignment(struct hw_perf_event *hwc,
                hwc->last_tag == cpuc->tags[i];
 }
 
-static int x86_pmu_start(struct perf_event *event);
-static void x86_pmu_stop(struct perf_event *event);
+static void x86_pmu_start(struct perf_event *event, int flags);
+static void x86_pmu_stop(struct perf_event *event, int flags);
 
-void hw_perf_enable(void)
+static void x86_pmu_enable(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct perf_event *event;
@@ -839,7 +840,14 @@ void hw_perf_enable(void)
                            match_prev_assignment(hwc, cpuc, i))
                                continue;
 
-                       x86_pmu_stop(event);
+                       /*
+                        * Ensure we don't accidentally enable a stopped
+                        * counter simply because we rescheduled.
+                        */
+                       if (hwc->state & PERF_HES_STOPPED)
+                               hwc->state |= PERF_HES_ARCH;
+
+                       x86_pmu_stop(event, PERF_EF_UPDATE);
                }
 
                for (i = 0; i < cpuc->n_events; i++) {
@@ -851,7 +859,10 @@ void hw_perf_enable(void)
                        else if (i < n_running)
                                continue;
 
-                       x86_pmu_start(event);
+                       if (hwc->state & PERF_HES_ARCH)
+                               continue;
+
+                       x86_pmu_start(event, PERF_EF_RELOAD);
                }
                cpuc->n_added = 0;
                perf_events_lapic_init();
@@ -952,15 +963,12 @@ static void x86_pmu_enable_event(struct perf_event *event)
 }
 
 /*
- * activate a single event
+ * Add a single event to the PMU.
  *
  * The event is added to the group of enabled events
  * but only if it can be scehduled with existing events.
- *
- * Called with PMU disabled. If successful and return value 1,
- * then guaranteed to call perf_enable() and hw_perf_enable()
  */
-static int x86_pmu_enable(struct perf_event *event)
+static int x86_pmu_add(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc;
@@ -969,57 +977,67 @@ static int x86_pmu_enable(struct perf_event *event)
 
        hwc = &event->hw;
 
+       perf_pmu_disable(event->pmu);
        n0 = cpuc->n_events;
-       n = collect_events(cpuc, event, false);
-       if (n < 0)
-               return n;
+       ret = n = collect_events(cpuc, event, false);
+       if (ret < 0)
+               goto out;
+
+       hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+       if (!(flags & PERF_EF_START))
+               hwc->state |= PERF_HES_ARCH;
 
        /*
         * If group events scheduling transaction was started,
         * skip the schedulability test here, it will be peformed
-        * at commit time(->commit_txn) as a whole
+        * at commit time (->commit_txn) as a whole
         */
        if (cpuc->group_flag & PERF_EVENT_TXN)
-               goto out;
+               goto done_collect;
 
        ret = x86_pmu.schedule_events(cpuc, n, assign);
        if (ret)
-               return ret;
+               goto out;
        /*
         * copy new assignment, now we know it is possible
         * will be used by hw_perf_enable()
         */
        memcpy(cpuc->assign, assign, n*sizeof(int));
 
-out:
+done_collect:
        cpuc->n_events = n;
        cpuc->n_added += n - n0;
        cpuc->n_txn += n - n0;
 
-       return 0;
+       ret = 0;
+out:
+       perf_pmu_enable(event->pmu);
+       return ret;
 }
 
-static int x86_pmu_start(struct perf_event *event)
+static void x86_pmu_start(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int idx = event->hw.idx;
 
-       if (idx == -1)
-               return -EAGAIN;
+       if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
+               return;
+
+       if (WARN_ON_ONCE(idx == -1))
+               return;
+
+       if (flags & PERF_EF_RELOAD) {
+               WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+               x86_perf_event_set_period(event);
+       }
+
+       event->hw.state = 0;
 
-       x86_perf_event_set_period(event);
        cpuc->events[idx] = event;
        __set_bit(idx, cpuc->active_mask);
+       __set_bit(idx, cpuc->running);
        x86_pmu.enable(event);
        perf_event_update_userpage(event);
-
-       return 0;
-}
-
-static void x86_pmu_unthrottle(struct perf_event *event)
-{
-       int ret = x86_pmu_start(event);
-       WARN_ON_ONCE(ret);
 }
 
 void perf_event_print_debug(void)
@@ -1076,27 +1094,29 @@ void perf_event_print_debug(void)
        local_irq_restore(flags);
 }
 
-static void x86_pmu_stop(struct perf_event *event)
+static void x86_pmu_stop(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
-       int idx = hwc->idx;
-
-       if (!__test_and_clear_bit(idx, cpuc->active_mask))
-               return;
-
-       x86_pmu.disable(event);
 
-       /*
-        * Drain the remaining delta count out of a event
-        * that we are disabling:
-        */
-       x86_perf_event_update(event);
+       if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
+               x86_pmu.disable(event);
+               cpuc->events[hwc->idx] = NULL;
+               WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
+               hwc->state |= PERF_HES_STOPPED;
+       }
 
-       cpuc->events[idx] = NULL;
+       if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
+               /*
+                * Drain the remaining delta count out of a event
+                * that we are disabling:
+                */
+               x86_perf_event_update(event);
+               hwc->state |= PERF_HES_UPTODATE;
+       }
 }
 
-static void x86_pmu_disable(struct perf_event *event)
+static void x86_pmu_del(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int i;
@@ -1109,7 +1129,7 @@ static void x86_pmu_disable(struct perf_event *event)
        if (cpuc->group_flag & PERF_EVENT_TXN)
                return;
 
-       x86_pmu_stop(event);
+       x86_pmu_stop(event, PERF_EF_UPDATE);
 
        for (i = 0; i < cpuc->n_events; i++) {
                if (event == cpuc->event_list[i]) {
@@ -1132,7 +1152,6 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
        struct perf_sample_data data;
        struct cpu_hw_events *cpuc;
        struct perf_event *event;
-       struct hw_perf_event *hwc;
        int idx, handled = 0;
        u64 val;
 
@@ -1141,11 +1160,18 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
        cpuc = &__get_cpu_var(cpu_hw_events);
 
        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
-               if (!test_bit(idx, cpuc->active_mask))
+               if (!test_bit(idx, cpuc->active_mask)) {
+                       /*
+                        * Though we deactivated the counter some cpus
+                        * might still deliver spurious interrupts still
+                        * in flight. Catch them:
+                        */
+                       if (__test_and_clear_bit(idx, cpuc->running))
+                               handled++;
                        continue;
+               }
 
                event = cpuc->events[idx];
-               hwc = &event->hw;
 
                val = x86_perf_event_update(event);
                if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
@@ -1154,14 +1180,14 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
                /*
                 * event overflow
                 */
-               handled         = 1;
+               handled++;
                data.period     = event->hw.last_period;
 
                if (!x86_perf_event_set_period(event))
                        continue;
 
                if (perf_event_overflow(event, 1, &data, regs))
-                       x86_pmu_stop(event);
+                       x86_pmu_stop(event, 0);
        }
 
        if (handled)
@@ -1170,25 +1196,6 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
        return handled;
 }
 
-void smp_perf_pending_interrupt(struct pt_regs *regs)
-{
-       irq_enter();
-       ack_APIC_irq();
-       inc_irq_stat(apic_pending_irqs);
-       perf_event_do_pending();
-       irq_exit();
-}
-
-void set_perf_event_pending(void)
-{
-#ifdef CONFIG_X86_LOCAL_APIC
-       if (!x86_pmu.apic || !x86_pmu_initialized())
-               return;
-
-       apic->send_IPI_self(LOCAL_PENDING_VECTOR);
-#endif
-}
-
 void perf_events_lapic_init(void)
 {
        if (!x86_pmu.apic || !x86_pmu_initialized())
@@ -1200,12 +1207,20 @@ void perf_events_lapic_init(void)
        apic_write(APIC_LVTPC, APIC_DM_NMI);
 }
 
+struct pmu_nmi_state {
+       unsigned int    marked;
+       int             handled;
+};
+
+static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
+
 static int __kprobes
 perf_event_nmi_handler(struct notifier_block *self,
                         unsigned long cmd, void *__args)
 {
        struct die_args *args = __args;
-       struct pt_regs *regs;
+       unsigned int this_nmi;
+       int handled;
 
        if (!atomic_read(&active_events))
                return NOTIFY_DONE;
@@ -1214,22 +1229,47 @@ perf_event_nmi_handler(struct notifier_block *self,
        case DIE_NMI:
        case DIE_NMI_IPI:
                break;
-
+       case DIE_NMIUNKNOWN:
+               this_nmi = percpu_read(irq_stat.__nmi_count);
+               if (this_nmi != __get_cpu_var(pmu_nmi).marked)
+                       /* let the kernel handle the unknown nmi */
+                       return NOTIFY_DONE;
+               /*
+                * This one is a PMU back-to-back nmi. Two events
+                * trigger 'simultaneously' raising two back-to-back
+                * NMIs. If the first NMI handles both, the latter
+                * will be empty and daze the CPU. So, we drop it to
+                * avoid false-positive 'unknown nmi' messages.
+                */
+               return NOTIFY_STOP;
        default:
                return NOTIFY_DONE;
        }
 
-       regs = args->regs;
-
        apic_write(APIC_LVTPC, APIC_DM_NMI);
-       /*
-        * Can't rely on the handled return value to say it was our NMI, two
-        * events could trigger 'simultaneously' raising two back-to-back NMIs.
-        *
-        * If the first NMI handles both, the latter will be empty and daze
-        * the CPU.
-        */
-       x86_pmu.handle_irq(regs);
+
+       handled = x86_pmu.handle_irq(args->regs);
+       if (!handled)
+               return NOTIFY_DONE;
+
+       this_nmi = percpu_read(irq_stat.__nmi_count);
+       if ((handled > 1) ||
+               /* the next nmi could be a back-to-back nmi */
+           ((__get_cpu_var(pmu_nmi).marked == this_nmi) &&
+            (__get_cpu_var(pmu_nmi).handled > 1))) {
+               /*
+                * We could have two subsequent back-to-back nmis: The
+                * first handles more than one counter, the 2nd
+                * handles only one counter and the 3rd handles no
+                * counter.
+                *
+                * This is the 2nd nmi because the previous was
+                * handling more than one counter. We will mark the
+                * next (3rd) and then drop it if unhandled.
+                */
+               __get_cpu_var(pmu_nmi).marked   = this_nmi + 1;
+               __get_cpu_var(pmu_nmi).handled  = handled;
+       }
 
        return NOTIFY_STOP;
 }
@@ -1345,7 +1385,6 @@ void __init init_hw_perf_events(void)
                x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
        }
        x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
-       perf_max_events = x86_pmu.num_counters;
 
        if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
                WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
@@ -1381,6 +1420,7 @@ void __init init_hw_perf_events(void)
        pr_info("... fixed-purpose events:   %d\n",     x86_pmu.num_counters_fixed);
        pr_info("... event mask:             %016Lx\n", x86_pmu.intel_ctrl);
 
+       perf_pmu_register(&pmu);
        perf_cpu_notifier(x86_pmu_notifier);
 }
 
@@ -1394,10 +1434,11 @@ static inline void x86_pmu_read(struct perf_event *event)
  * Set the flag to make pmu::enable() not perform the
  * schedulability test, it will be performed at commit time
  */
-static void x86_pmu_start_txn(const struct pmu *pmu)
+static void x86_pmu_start_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
+       perf_pmu_disable(pmu);
        cpuc->group_flag |= PERF_EVENT_TXN;
        cpuc->n_txn = 0;
 }
@@ -1407,7 +1448,7 @@ static void x86_pmu_start_txn(const struct pmu *pmu)
  * Clear the flag and pmu::enable() will perform the
  * schedulability test.
  */
-static void x86_pmu_cancel_txn(const struct pmu *pmu)
+static void x86_pmu_cancel_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
@@ -1417,6 +1458,7 @@ static void x86_pmu_cancel_txn(const struct pmu *pmu)
         */
        cpuc->n_added -= cpuc->n_txn;
        cpuc->n_events -= cpuc->n_txn;
+       perf_pmu_enable(pmu);
 }
 
 /*
@@ -1424,7 +1466,7 @@ static void x86_pmu_cancel_txn(const struct pmu *pmu)
  * Perform the group schedulability test as a whole
  * Return 0 if success
  */
-static int x86_pmu_commit_txn(const struct pmu *pmu)
+static int x86_pmu_commit_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int assign[X86_PMC_IDX_MAX];
@@ -1446,22 +1488,10 @@ static int x86_pmu_commit_txn(const struct pmu *pmu)
        memcpy(cpuc->assign, assign, n*sizeof(int));
 
        cpuc->group_flag &= ~PERF_EVENT_TXN;
-
+       perf_pmu_enable(pmu);
        return 0;
 }
 
-static const struct pmu pmu = {
-       .enable         = x86_pmu_enable,
-       .disable        = x86_pmu_disable,
-       .start          = x86_pmu_start,
-       .stop           = x86_pmu_stop,
-       .read           = x86_pmu_read,
-       .unthrottle     = x86_pmu_unthrottle,
-       .start_txn      = x86_pmu_start_txn,
-       .cancel_txn     = x86_pmu_cancel_txn,
-       .commit_txn     = x86_pmu_commit_txn,
-};
-
 /*
  * validate that we can schedule this event
  */
@@ -1536,12 +1566,22 @@ out:
        return ret;
 }
 
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+int x86_pmu_event_init(struct perf_event *event)
 {
-       const struct pmu *tmp;
+       struct pmu *tmp;
        int err;
 
-       err = __hw_perf_event_init(event);
+       switch (event->attr.type) {
+       case PERF_TYPE_RAW:
+       case PERF_TYPE_HARDWARE:
+       case PERF_TYPE_HW_CACHE:
+               break;
+
+       default:
+               return -ENOENT;
+       }
+
+       err = __x86_pmu_event_init(event);
        if (!err) {
                /*
                 * we temporarily connect event to its pmu
@@ -1561,26 +1601,31 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
        if (err) {
                if (event->destroy)
                        event->destroy(event);
-               return ERR_PTR(err);
        }
 
-       return &pmu;
+       return err;
 }
 
-/*
- * callchain support
- */
+static struct pmu pmu = {
+       .pmu_enable     = x86_pmu_enable,
+       .pmu_disable    = x86_pmu_disable,
 
-static inline
-void callchain_store(struct perf_callchain_entry *entry, u64 ip)
-{
-       if (entry->nr < PERF_MAX_STACK_DEPTH)
-               entry->ip[entry->nr++] = ip;
-}
+       .event_init     = x86_pmu_event_init,
 
-static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
-static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
+       .add            = x86_pmu_add,
+       .del            = x86_pmu_del,
+       .start          = x86_pmu_start,
+       .stop           = x86_pmu_stop,
+       .read           = x86_pmu_read,
 
+       .start_txn      = x86_pmu_start_txn,
+       .cancel_txn     = x86_pmu_cancel_txn,
+       .commit_txn     = x86_pmu_commit_txn,
+};
+
+/*
+ * callchain support
+ */
 
 static void
 backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
@@ -1602,7 +1647,7 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
 {
        struct perf_callchain_entry *entry = data;
 
-       callchain_store(entry, addr);
+       perf_callchain_store(entry, addr);
 }
 
 static const struct stacktrace_ops backtrace_ops = {
@@ -1613,11 +1658,15 @@ static const struct stacktrace_ops backtrace_ops = {
        .walk_stack             = print_context_stack_bp,
 };
 
-static void
-perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
+void
+perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
-       callchain_store(entry, PERF_CONTEXT_KERNEL);
-       callchain_store(entry, regs->ip);
+       if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+               /* TODO: We don't support guest os callchain now */
+               return;
+       }
+
+       perf_callchain_store(entry, regs->ip);
 
        dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
 }
@@ -1646,7 +1695,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
                if (fp < compat_ptr(regs->sp))
                        break;
 
-               callchain_store(entry, frame.return_address);
+               perf_callchain_store(entry, frame.return_address);
                fp = compat_ptr(frame.next_frame);
        }
        return 1;
@@ -1659,19 +1708,20 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
 }
 #endif
 
-static void
-perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
+void
+perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
        struct stack_frame frame;
        const void __user *fp;
 
-       if (!user_mode(regs))
-               regs = task_pt_regs(current);
+       if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+               /* TODO: We don't support guest os callchain now */
+               return;
+       }
 
        fp = (void __user *)regs->bp;
 
-       callchain_store(entry, PERF_CONTEXT_USER);
-       callchain_store(entry, regs->ip);
+       perf_callchain_store(entry, regs->ip);
 
        if (perf_callchain_user32(regs, entry))
                return;
@@ -1688,52 +1738,11 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
                if ((unsigned long)fp < regs->sp)
                        break;
 
-               callchain_store(entry, frame.return_address);
+               perf_callchain_store(entry, frame.return_address);
                fp = frame.next_frame;
        }
 }
 
-static void
-perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
-{
-       int is_user;
-
-       if (!regs)
-               return;
-
-       is_user = user_mode(regs);
-
-       if (is_user && current->state != TASK_RUNNING)
-               return;
-
-       if (!is_user)
-               perf_callchain_kernel(regs, entry);
-
-       if (current->mm)
-               perf_callchain_user(regs, entry);
-}
-
-struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
-{
-       struct perf_callchain_entry *entry;
-
-       if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
-               /* TODO: We don't support guest os callchain now */
-               return NULL;
-       }
-
-       if (in_nmi())
-               entry = &__get_cpu_var(pmc_nmi_entry);
-       else
-               entry = &__get_cpu_var(pmc_irq_entry);
-
-       entry->nr = 0;
-
-       perf_do_callchain(regs, entry);
-
-       return entry;
-}
-
 unsigned long perf_instruction_pointer(struct pt_regs *regs)
 {
        unsigned long ip;
index c2897b7b4a3b1c7a6cfe4fe7e70b3d7378432d4c..46d58448c3aff9039fdf0b909d069d00d80ef75e 100644 (file)
@@ -52,7 +52,7 @@ static __initconst const u64 amd_hw_cache_event_ids
  [ C(DTLB) ] = {
        [ C(OP_READ) ] = {
                [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
-               [ C(RESULT_MISS)   ] = 0x0046, /* L1 DTLB and L2 DLTB Miss   */
+               [ C(RESULT_MISS)   ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
        },
        [ C(OP_WRITE) ] = {
                [ C(RESULT_ACCESS) ] = 0,
@@ -66,7 +66,7 @@ static __initconst const u64 amd_hw_cache_event_ids
  [ C(ITLB) ] = {
        [ C(OP_READ) ] = {
                [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes        */
-               [ C(RESULT_MISS)   ] = 0x0085, /* Instr. fetch ITLB misses   */
+               [ C(RESULT_MISS)   ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
        },
        [ C(OP_WRITE) ] = {
                [ C(RESULT_ACCESS) ] = -1,
index d8d86d01400866c6320001fb715301276747cd52..c8f5c088cad11ae3f245e1e7374bb43c915170d6 100644 (file)
@@ -712,22 +712,24 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
        struct perf_sample_data data;
        struct cpu_hw_events *cpuc;
        int bit, loops;
-       u64 ack, status;
+       u64 status;
+       int handled;
 
        perf_sample_data_init(&data, 0);
 
        cpuc = &__get_cpu_var(cpu_hw_events);
 
        intel_pmu_disable_all();
-       intel_pmu_drain_bts_buffer();
+       handled = intel_pmu_drain_bts_buffer();
        status = intel_pmu_get_status();
        if (!status) {
                intel_pmu_enable_all(0);
-               return 0;
+               return handled;
        }
 
        loops = 0;
 again:
+       intel_pmu_ack_status(status);
        if (++loops > 100) {
                WARN_ONCE(1, "perfevents: irq loop stuck!\n");
                perf_event_print_debug();
@@ -736,19 +738,22 @@ again:
        }
 
        inc_irq_stat(apic_perf_irqs);
-       ack = status;
 
        intel_pmu_lbr_read();
 
        /*
         * PEBS overflow sets bit 62 in the global status register
         */
-       if (__test_and_clear_bit(62, (unsigned long *)&status))
+       if (__test_and_clear_bit(62, (unsigned long *)&status)) {
+               handled++;
                x86_pmu.drain_pebs(regs);
+       }
 
        for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
                struct perf_event *event = cpuc->events[bit];
 
+               handled++;
+
                if (!test_bit(bit, cpuc->active_mask))
                        continue;
 
@@ -758,11 +763,9 @@ again:
                data.period = event->hw.last_period;
 
                if (perf_event_overflow(event, 1, &data, regs))
-                       x86_pmu_stop(event);
+                       x86_pmu_stop(event, 0);
        }
 
-       intel_pmu_ack_status(ack);
-
        /*
         * Repeat if there is more work to be done:
         */
@@ -772,7 +775,7 @@ again:
 
 done:
        intel_pmu_enable_all(0);
-       return 1;
+       return handled;
 }
 
 static struct event_constraint *
index 18018d1311cdf3fa7f550b2fd7894c278505afe6..4977f9c400e5738cb668efc937c69cde22a2772d 100644 (file)
@@ -214,7 +214,7 @@ static void intel_pmu_disable_bts(void)
        update_debugctlmsr(debugctlmsr);
 }
 
-static void intel_pmu_drain_bts_buffer(void)
+static int intel_pmu_drain_bts_buffer(void)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct debug_store *ds = cpuc->ds;
@@ -231,16 +231,16 @@ static void intel_pmu_drain_bts_buffer(void)
        struct pt_regs regs;
 
        if (!event)
-               return;
+               return 0;
 
        if (!ds)
-               return;
+               return 0;
 
        at  = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
        top = (struct bts_record *)(unsigned long)ds->bts_index;
 
        if (top <= at)
-               return;
+               return 0;
 
        ds->bts_index = ds->bts_buffer_base;
 
@@ -256,7 +256,7 @@ static void intel_pmu_drain_bts_buffer(void)
        perf_prepare_sample(&header, &data, event, &regs);
 
        if (perf_output_begin(&handle, event, header.size * (top - at), 1, 1))
-               return;
+               return 1;
 
        for (; at < top; at++) {
                data.ip         = at->from;
@@ -270,6 +270,7 @@ static void intel_pmu_drain_bts_buffer(void)
        /* There's new data available. */
        event->hw.interrupts++;
        event->pending_kill = POLL_IN;
+       return 1;
 }
 
 /*
@@ -491,7 +492,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
                regs.flags &= ~PERF_EFLAGS_EXACT;
 
        if (perf_event_overflow(event, 1, &data, &regs))
-               x86_pmu_stop(event);
+               x86_pmu_stop(event, 0);
 }
 
 static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
index 7e578e9cc58bd5062d30776d431dabdcf724ac67..81400b93e69483e18bedf1c541753e2f915910cd 100644 (file)
@@ -18,6 +18,8 @@
 struct p4_event_bind {
        unsigned int opcode;                    /* Event code and ESCR selector */
        unsigned int escr_msr[2];               /* ESCR MSR for this event */
+       unsigned int escr_emask;                /* valid ESCR EventMask bits */
+       unsigned int shared;                    /* event is shared across threads */
        char cntr[2][P4_CNTR_LIMIT];            /* counter index (offset), -1 on abscence */
 };
 
@@ -66,231 +68,435 @@ static struct p4_event_bind p4_event_bind_map[] = {
        [P4_EVENT_TC_DELIVER_MODE] = {
                .opcode         = P4_OPCODE(P4_EVENT_TC_DELIVER_MODE),
                .escr_msr       = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DD)                 |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DB)                 |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DI)                 |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, BD)                 |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, BB)                 |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, BI)                 |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, ID),
+               .shared         = 1,
                .cntr           = { {4, 5, -1}, {6, 7, -1} },
        },
        [P4_EVENT_BPU_FETCH_REQUEST] = {
                .opcode         = P4_OPCODE(P4_EVENT_BPU_FETCH_REQUEST),
                .escr_msr       = { MSR_P4_BPU_ESCR0, MSR_P4_BPU_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BPU_FETCH_REQUEST, TCMISS),
                .cntr           = { {0, -1, -1}, {2, -1, -1} },
        },
        [P4_EVENT_ITLB_REFERENCE] = {
                .opcode         = P4_OPCODE(P4_EVENT_ITLB_REFERENCE),
                .escr_msr       = { MSR_P4_ITLB_ESCR0, MSR_P4_ITLB_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE, HIT)                 |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE, MISS)                |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE, HIT_UK),
                .cntr           = { {0, -1, -1}, {2, -1, -1} },
        },
        [P4_EVENT_MEMORY_CANCEL] = {
                .opcode         = P4_OPCODE(P4_EVENT_MEMORY_CANCEL),
                .escr_msr       = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_CANCEL, ST_RB_FULL)           |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_CANCEL, 64K_CONF),
                .cntr           = { {8, 9, -1}, {10, 11, -1} },
        },
        [P4_EVENT_MEMORY_COMPLETE] = {
                .opcode         = P4_OPCODE(P4_EVENT_MEMORY_COMPLETE),
                .escr_msr       = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_COMPLETE, LSC)                |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_COMPLETE, SSC),
                .cntr           = { {8, 9, -1}, {10, 11, -1} },
        },
        [P4_EVENT_LOAD_PORT_REPLAY] = {
                .opcode         = P4_OPCODE(P4_EVENT_LOAD_PORT_REPLAY),
                .escr_msr       = { MSR_P4_SAAT_ESCR0, MSR_P4_SAAT_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_LOAD_PORT_REPLAY, SPLIT_LD),
                .cntr           = { {8, 9, -1}, {10, 11, -1} },
        },
        [P4_EVENT_STORE_PORT_REPLAY] = {
                .opcode         = P4_OPCODE(P4_EVENT_STORE_PORT_REPLAY),
                .escr_msr       = { MSR_P4_SAAT_ESCR0 ,  MSR_P4_SAAT_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_STORE_PORT_REPLAY, SPLIT_ST),
                .cntr           = { {8, 9, -1}, {10, 11, -1} },
        },
        [P4_EVENT_MOB_LOAD_REPLAY] = {
                .opcode         = P4_OPCODE(P4_EVENT_MOB_LOAD_REPLAY),
                .escr_msr       = { MSR_P4_MOB_ESCR0, MSR_P4_MOB_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, NO_STA)             |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, NO_STD)             |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, PARTIAL_DATA)       |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, UNALGN_ADDR),
                .cntr           = { {0, -1, -1}, {2, -1, -1} },
        },
        [P4_EVENT_PAGE_WALK_TYPE] = {
                .opcode         = P4_OPCODE(P4_EVENT_PAGE_WALK_TYPE),
                .escr_msr       = { MSR_P4_PMH_ESCR0, MSR_P4_PMH_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_PAGE_WALK_TYPE, DTMISS)              |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_PAGE_WALK_TYPE, ITMISS),
+               .shared         = 1,
                .cntr           = { {0, -1, -1}, {2, -1, -1} },
        },
        [P4_EVENT_BSQ_CACHE_REFERENCE] = {
                .opcode         = P4_OPCODE(P4_EVENT_BSQ_CACHE_REFERENCE),
                .escr_msr       = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITS)   |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITE)   |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITM)   |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITS)   |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITE)   |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITM)   |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_MISS)   |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_MISS)   |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, WR_2ndL_MISS),
                .cntr           = { {0, -1, -1}, {2, -1, -1} },
        },
        [P4_EVENT_IOQ_ALLOCATION] = {
                .opcode         = P4_OPCODE(P4_EVENT_IOQ_ALLOCATION),
                .escr_msr       = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, DEFAULT)             |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, ALL_READ)            |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, ALL_WRITE)           |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_UC)              |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WC)              |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WT)              |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WP)              |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WB)              |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, OWN)                 |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, OTHER)               |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, PREFETCH),
                .cntr           = { {0, -1, -1}, {2, -1, -1} },
        },
        [P4_EVENT_IOQ_ACTIVE_ENTRIES] = {       /* shared ESCR */
                .opcode         = P4_OPCODE(P4_EVENT_IOQ_ACTIVE_ENTRIES),
                .escr_msr       = { MSR_P4_FSB_ESCR1,  MSR_P4_FSB_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, DEFAULT)         |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_READ)        |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_WRITE)       |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_UC)          |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WC)          |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WT)          |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WP)          |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WB)          |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, OWN)             |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, OTHER)           |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, PREFETCH),
                .cntr           = { {2, -1, -1}, {3, -1, -1} },
        },
        [P4_EVENT_FSB_DATA_ACTIVITY] = {
                .opcode         = P4_OPCODE(P4_EVENT_FSB_DATA_ACTIVITY),
                .escr_msr       = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_DRV)         |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OWN)         |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OTHER)       |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_DRV)         |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OWN)         |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OTHER),
+               .shared         = 1,
                .cntr           = { {0, -1, -1}, {2, -1, -1} },
        },
        [P4_EVENT_BSQ_ALLOCATION] = {           /* shared ESCR, broken CCCR1 */
                .opcode         = P4_OPCODE(P4_EVENT_BSQ_ALLOCATION),
                .escr_msr       = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR0 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE0)           |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE1)           |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_LEN0)            |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_LEN1)            |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_IO_TYPE)         |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_LOCK_TYPE)       |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_CACHE_TYPE)      |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_SPLIT_TYPE)      |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_DEM_TYPE)        |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_ORD_TYPE)        |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE0)           |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE1)           |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE2),
                .cntr           = { {0, -1, -1}, {1, -1, -1} },
        },
        [P4_EVENT_BSQ_ACTIVE_ENTRIES] = {       /* shared ESCR */
                .opcode         = P4_OPCODE(P4_EVENT_BSQ_ACTIVE_ENTRIES),
                .escr_msr       = { MSR_P4_BSU_ESCR1 , MSR_P4_BSU_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE0)       |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE1)       |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN0)        |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN1)        |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_IO_TYPE)     |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LOCK_TYPE)   |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_CACHE_TYPE)  |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_SPLIT_TYPE)  |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_DEM_TYPE)    |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_ORD_TYPE)    |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE0)       |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE1)       |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE2),
                .cntr           = { {2, -1, -1}, {3, -1, -1} },
        },
        [P4_EVENT_SSE_INPUT_ASSIST] = {
                .opcode         = P4_OPCODE(P4_EVENT_SSE_INPUT_ASSIST),
                .escr_msr       = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_SSE_INPUT_ASSIST, ALL),
+               .shared         = 1,
                .cntr           = { {8, 9, -1}, {10, 11, -1} },
        },
        [P4_EVENT_PACKED_SP_UOP] = {
                .opcode         = P4_OPCODE(P4_EVENT_PACKED_SP_UOP),
                .escr_msr       = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_PACKED_SP_UOP, ALL),
+               .shared         = 1,
                .cntr           = { {8, 9, -1}, {10, 11, -1} },
        },
        [P4_EVENT_PACKED_DP_UOP] = {
                .opcode         = P4_OPCODE(P4_EVENT_PACKED_DP_UOP),
                .escr_msr       = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_PACKED_DP_UOP, ALL),
+               .shared         = 1,
                .cntr           = { {8, 9, -1}, {10, 11, -1} },
        },
        [P4_EVENT_SCALAR_SP_UOP] = {
                .opcode         = P4_OPCODE(P4_EVENT_SCALAR_SP_UOP),
                .escr_msr       = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_SCALAR_SP_UOP, ALL),
+               .shared         = 1,
                .cntr           = { {8, 9, -1}, {10, 11, -1} },
        },
        [P4_EVENT_SCALAR_DP_UOP] = {
                .opcode         = P4_OPCODE(P4_EVENT_SCALAR_DP_UOP),
                .escr_msr       = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_SCALAR_DP_UOP, ALL),
+               .shared         = 1,
                .cntr           = { {8, 9, -1}, {10, 11, -1} },
        },
        [P4_EVENT_64BIT_MMX_UOP] = {
                .opcode         = P4_OPCODE(P4_EVENT_64BIT_MMX_UOP),
                .escr_msr       = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_64BIT_MMX_UOP, ALL),
+               .shared         = 1,
                .cntr           = { {8, 9, -1}, {10, 11, -1} },
        },
        [P4_EVENT_128BIT_MMX_UOP] = {
                .opcode         = P4_OPCODE(P4_EVENT_128BIT_MMX_UOP),
                .escr_msr       = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_128BIT_MMX_UOP, ALL),
+               .shared         = 1,
                .cntr           = { {8, 9, -1}, {10, 11, -1} },
        },
        [P4_EVENT_X87_FP_UOP] = {
                .opcode         = P4_OPCODE(P4_EVENT_X87_FP_UOP),
                .escr_msr       = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_X87_FP_UOP, ALL),
+               .shared         = 1,
                .cntr           = { {8, 9, -1}, {10, 11, -1} },
        },
        [P4_EVENT_TC_MISC] = {
                .opcode         = P4_OPCODE(P4_EVENT_TC_MISC),
                .escr_msr       = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_TC_MISC, FLUSH),
                .cntr           = { {4, 5, -1}, {6, 7, -1} },
        },
        [P4_EVENT_GLOBAL_POWER_EVENTS] = {
                .opcode         = P4_OPCODE(P4_EVENT_GLOBAL_POWER_EVENTS),
                .escr_msr       = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING),
                .cntr           = { {0, -1, -1}, {2, -1, -1} },
        },
        [P4_EVENT_TC_MS_XFER] = {
                .opcode         = P4_OPCODE(P4_EVENT_TC_MS_XFER),
                .escr_msr       = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_TC_MS_XFER, CISC),
                .cntr           = { {4, 5, -1}, {6, 7, -1} },
        },
        [P4_EVENT_UOP_QUEUE_WRITES] = {
                .opcode         = P4_OPCODE(P4_EVENT_UOP_QUEUE_WRITES),
                .escr_msr       = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_BUILD)     |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_DELIVER)   |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES, FROM_ROM),
                .cntr           = { {4, 5, -1}, {6, 7, -1} },
        },
        [P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE] = {
                .opcode         = P4_OPCODE(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE),
                .escr_msr       = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR0 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CONDITIONAL)    |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CALL)           |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, RETURN)         |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, INDIRECT),
                .cntr           = { {4, 5, -1}, {6, 7, -1} },
        },
        [P4_EVENT_RETIRED_BRANCH_TYPE] = {
                .opcode         = P4_OPCODE(P4_EVENT_RETIRED_BRANCH_TYPE),
                .escr_msr       = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CONDITIONAL)    |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CALL)           |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, RETURN)         |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, INDIRECT),
                .cntr           = { {4, 5, -1}, {6, 7, -1} },
        },
        [P4_EVENT_RESOURCE_STALL] = {
                .opcode         = P4_OPCODE(P4_EVENT_RESOURCE_STALL),
                .escr_msr       = { MSR_P4_ALF_ESCR0, MSR_P4_ALF_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_RESOURCE_STALL, SBFULL),
                .cntr           = { {12, 13, 16}, {14, 15, 17} },
        },
        [P4_EVENT_WC_BUFFER] = {
                .opcode         = P4_OPCODE(P4_EVENT_WC_BUFFER),
                .escr_msr       = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_WC_BUFFER, WCB_EVICTS)               |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_WC_BUFFER, WCB_FULL_EVICTS),
+               .shared         = 1,
                .cntr           = { {8, 9, -1}, {10, 11, -1} },
        },
        [P4_EVENT_B2B_CYCLES] = {
                .opcode         = P4_OPCODE(P4_EVENT_B2B_CYCLES),
                .escr_msr       = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+               .escr_emask     = 0,
                .cntr           = { {0, -1, -1}, {2, -1, -1} },
        },
        [P4_EVENT_BNR] = {
                .opcode         = P4_OPCODE(P4_EVENT_BNR),
                .escr_msr       = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+               .escr_emask     = 0,
                .cntr           = { {0, -1, -1}, {2, -1, -1} },
        },
        [P4_EVENT_SNOOP] = {
                .opcode         = P4_OPCODE(P4_EVENT_SNOOP),
                .escr_msr       = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+               .escr_emask     = 0,
                .cntr           = { {0, -1, -1}, {2, -1, -1} },
        },
        [P4_EVENT_RESPONSE] = {
                .opcode         = P4_OPCODE(P4_EVENT_RESPONSE),
                .escr_msr       = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+               .escr_emask     = 0,
                .cntr           = { {0, -1, -1}, {2, -1, -1} },
        },
        [P4_EVENT_FRONT_END_EVENT] = {
                .opcode         = P4_OPCODE(P4_EVENT_FRONT_END_EVENT),
                .escr_msr       = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_FRONT_END_EVENT, NBOGUS)             |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_FRONT_END_EVENT, BOGUS),
                .cntr           = { {12, 13, 16}, {14, 15, 17} },
        },
        [P4_EVENT_EXECUTION_EVENT] = {
                .opcode         = P4_OPCODE(P4_EVENT_EXECUTION_EVENT),
                .escr_msr       = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS0)            |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS1)            |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS2)            |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS3)            |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS0)             |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS1)             |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS2)             |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS3),
                .cntr           = { {12, 13, 16}, {14, 15, 17} },
        },
        [P4_EVENT_REPLAY_EVENT] = {
                .opcode         = P4_OPCODE(P4_EVENT_REPLAY_EVENT),
                .escr_msr       = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_REPLAY_EVENT, NBOGUS)                |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_REPLAY_EVENT, BOGUS),
                .cntr           = { {12, 13, 16}, {14, 15, 17} },
        },
        [P4_EVENT_INSTR_RETIRED] = {
                .opcode         = P4_OPCODE(P4_EVENT_INSTR_RETIRED),
                .escr_msr       = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, NBOGUSNTAG)           |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, NBOGUSTAG)            |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, BOGUSNTAG)            |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, BOGUSTAG),
                .cntr           = { {12, 13, 16}, {14, 15, 17} },
        },
        [P4_EVENT_UOPS_RETIRED] = {
                .opcode         = P4_OPCODE(P4_EVENT_UOPS_RETIRED),
                .escr_msr       = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_UOPS_RETIRED, NBOGUS)                |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_UOPS_RETIRED, BOGUS),
                .cntr           = { {12, 13, 16}, {14, 15, 17} },
        },
        [P4_EVENT_UOP_TYPE] = {
                .opcode         = P4_OPCODE(P4_EVENT_UOP_TYPE),
                .escr_msr       = { MSR_P4_RAT_ESCR0, MSR_P4_RAT_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_UOP_TYPE, TAGLOADS)                  |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_UOP_TYPE, TAGSTORES),
                .cntr           = { {12, 13, 16}, {14, 15, 17} },
        },
        [P4_EVENT_BRANCH_RETIRED] = {
                .opcode         = P4_OPCODE(P4_EVENT_BRANCH_RETIRED),
                .escr_msr       = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMNP)                |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMNM)                |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMTP)                |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMTM),
                .cntr           = { {12, 13, 16}, {14, 15, 17} },
        },
        [P4_EVENT_MISPRED_BRANCH_RETIRED] = {
                .opcode         = P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED),
                .escr_msr       = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
+               .escr_emask     =
+               P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS),
                .cntr           = { {12, 13, 16}, {14, 15, 17} },
        },
        [P4_EVENT_X87_ASSIST] = {
                .opcode         = P4_OPCODE(P4_EVENT_X87_ASSIST),
                .escr_msr       = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, FPSU)                    |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, FPSO)                    |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, POAO)                    |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, POAU)                    |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, PREA),
                .cntr           = { {12, 13, 16}, {14, 15, 17} },
        },
        [P4_EVENT_MACHINE_CLEAR] = {
                .opcode         = P4_OPCODE(P4_EVENT_MACHINE_CLEAR),
                .escr_msr       = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR, CLEAR)                |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR, MOCLEAR)              |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR, SMCLEAR),
                .cntr           = { {12, 13, 16}, {14, 15, 17} },
        },
        [P4_EVENT_INSTR_COMPLETED] = {
                .opcode         = P4_OPCODE(P4_EVENT_INSTR_COMPLETED),
                .escr_msr       = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
+               .escr_emask     =
+                       P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_COMPLETED, NBOGUS)             |
+                       P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_COMPLETED, BOGUS),
                .cntr           = { {12, 13, 16}, {14, 15, 17} },
        },
 };
@@ -428,29 +634,73 @@ static u64 p4_pmu_event_map(int hw_event)
        return config;
 }
 
+/* check cpu model specifics */
+static bool p4_event_match_cpu_model(unsigned int event_idx)
+{
+       /* INSTR_COMPLETED event only exist for model 3, 4, 6 (Prescott) */
+       if (event_idx == P4_EVENT_INSTR_COMPLETED) {
+               if (boot_cpu_data.x86_model != 3 &&
+                       boot_cpu_data.x86_model != 4 &&
+                       boot_cpu_data.x86_model != 6)
+                       return false;
+       }
+
+       /*
+        * For info
+        * - IQ_ESCR0, IQ_ESCR1 only for models 1 and 2
+        */
+
+       return true;
+}
+
 static int p4_validate_raw_event(struct perf_event *event)
 {
-       unsigned int v;
+       unsigned int v, emask;
 
-       /* user data may have out-of-bound event index */
+       /* User data may have out-of-bound event index */
        v = p4_config_unpack_event(event->attr.config);
-       if (v >= ARRAY_SIZE(p4_event_bind_map)) {
-               pr_warning("P4 PMU: Unknown event code: %d\n", v);
+       if (v >= ARRAY_SIZE(p4_event_bind_map))
+               return -EINVAL;
+
+       /* It may be unsupported: */
+       if (!p4_event_match_cpu_model(v))
                return -EINVAL;
+
+       /*
+        * NOTE: P4_CCCR_THREAD_ANY has not the same meaning as
+        * in Architectural Performance Monitoring, it means not
+        * on _which_ logical cpu to count but rather _when_, ie it
+        * depends on logical cpu state -- count event if one cpu active,
+        * none, both or any, so we just allow user to pass any value
+        * desired.
+        *
+        * In turn we always set Tx_OS/Tx_USR bits bound to logical
+        * cpu without their propagation to another cpu
+        */
+
+       /*
+        * if an event is shared accross the logical threads
+        * the user needs special permissions to be able to use it
+        */
+       if (p4_event_bind_map[v].shared) {
+               if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
+                       return -EACCES;
        }
 
+       /* ESCR EventMask bits may be invalid */
+       emask = p4_config_unpack_escr(event->attr.config) & P4_ESCR_EVENTMASK_MASK;
+       if (emask & ~p4_event_bind_map[v].escr_emask)
+               return -EINVAL;
+
        /*
-        * it may have some screwed PEBS bits
+        * it may have some invalid PEBS bits
         */
-       if (p4_config_pebs_has(event->attr.config, P4_PEBS_CONFIG_ENABLE)) {
-               pr_warning("P4 PMU: PEBS are not supported yet\n");
+       if (p4_config_pebs_has(event->attr.config, P4_PEBS_CONFIG_ENABLE))
                return -EINVAL;
-       }
+
        v = p4_config_unpack_metric(event->attr.config);
-       if (v >= ARRAY_SIZE(p4_pebs_bind_map)) {
-               pr_warning("P4 PMU: Unknown metric code: %d\n", v);
+       if (v >= ARRAY_SIZE(p4_pebs_bind_map))
                return -EINVAL;
-       }
 
        return 0;
 }
@@ -478,27 +728,21 @@ static int p4_hw_config(struct perf_event *event)
 
        if (event->attr.type == PERF_TYPE_RAW) {
 
+               /*
+                * Clear bits we reserve to be managed by kernel itself
+                * and never allowed from a user space
+                */
+                event->attr.config &= P4_CONFIG_MASK;
+
                rc = p4_validate_raw_event(event);
                if (rc)
                        goto out;
 
                /*
-                * We don't control raw events so it's up to the caller
-                * to pass sane values (and we don't count the thread number
-                * on HT machine but allow HT-compatible specifics to be
-                * passed on)
-                *
                 * Note that for RAW events we allow user to use P4_CCCR_RESERVED
                 * bits since we keep additional info here (for cache events and etc)
-                *
-                * XXX: HT wide things should check perf_paranoid_cpu() &&
-                *      CAP_SYS_ADMIN
                 */
-               event->hw.config |= event->attr.config &
-                       (p4_config_pack_escr(P4_ESCR_MASK_HT) |
-                        p4_config_pack_cccr(P4_CCCR_MASK_HT | P4_CCCR_RESERVED));
-
-               event->hw.config &= ~P4_CCCR_FORCE_OVF;
+               event->hw.config |= event->attr.config;
        }
 
        rc = x86_setup_perfctr(event);
@@ -660,8 +904,12 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
                int overflow;
 
-               if (!test_bit(idx, cpuc->active_mask))
+               if (!test_bit(idx, cpuc->active_mask)) {
+                       /* catch in-flight IRQs */
+                       if (__test_and_clear_bit(idx, cpuc->running))
+                               handled++;
                        continue;
+               }
 
                event = cpuc->events[idx];
                hwc = &event->hw;
@@ -692,7 +940,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
                inc_irq_stat(apic_perf_irqs);
        }
 
-       return handled > 0;
+       return handled;
 }
 
 /*
index fb329e9f849443315e19c8db38c01161f5edf73b..d9f4ff8fcd693c509b2d079b381a8e8683d10d9c 100644 (file)
@@ -700,11 +700,10 @@ static void probe_nmi_watchdog(void)
 {
        switch (boot_cpu_data.x86_vendor) {
        case X86_VENDOR_AMD:
-               if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15 &&
-                   boot_cpu_data.x86 != 16 && boot_cpu_data.x86 != 17)
-                       return;
-               wd_ops = &k7_wd_ops;
-               break;
+               if (boot_cpu_data.x86 == 6 ||
+                   (boot_cpu_data.x86 >= 0xf && boot_cpu_data.x86 <= 0x15))
+                       wd_ops = &k7_wd_ops;
+               return;
        case X86_VENDOR_INTEL:
                /* Work around where perfctr1 doesn't have a working enable
                 * bit as described in the following errata:
index 34b4dad6f0b8e35a0fe80d01f8708c230592144d..c7f64e6f537a8dc5195e6d37082b205d6df5cfbd 100644 (file)
@@ -31,6 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
        const struct cpuid_bit *cb;
 
        static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
+               { X86_FEATURE_DTS,              CR_EAX, 0, 0x00000006, 0 },
                { X86_FEATURE_IDA,              CR_EAX, 1, 0x00000006, 0 },
                { X86_FEATURE_ARAT,             CR_EAX, 2, 0x00000006, 0 },
                { X86_FEATURE_PLN,              CR_EAX, 4, 0x00000006, 0 },
@@ -43,6 +44,12 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
                { X86_FEATURE_LBRV,             CR_EDX, 1, 0x8000000a, 0 },
                { X86_FEATURE_SVML,             CR_EDX, 2, 0x8000000a, 0 },
                { X86_FEATURE_NRIPS,            CR_EDX, 3, 0x8000000a, 0 },
+               { X86_FEATURE_TSCRATEMSR,       CR_EDX, 4, 0x8000000a, 0 },
+               { X86_FEATURE_VMCBCLEAN,        CR_EDX, 5, 0x8000000a, 0 },
+               { X86_FEATURE_FLUSHBYASID,      CR_EDX, 6, 0x8000000a, 0 },
+               { X86_FEATURE_DECODEASSISTS,    CR_EDX, 7, 0x8000000a, 0 },
+               { X86_FEATURE_PAUSEFILTER,      CR_EDX,10, 0x8000000a, 0 },
+               { X86_FEATURE_PFTHRESHOLD,      CR_EDX,12, 0x8000000a, 0 },
                { 0, 0, 0, 0, 0 }
        };
 
index 045b36cada655370382231cb186d45d5d8820d95..994828899e098350d12ca73217235af843b0d497 100644 (file)
@@ -34,7 +34,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
        if (!csize)
                return 0;
 
-       vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+       vaddr = ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE);
        if (!vaddr)
                return -ENOMEM;
 
@@ -46,6 +46,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
        } else
                memcpy(buf, vaddr + offset, csize);
 
+       set_iounmap_nonlazy();
        iounmap(vaddr);
        return csize;
 }
index e5cc7e82e60ddbf1bd1ca2871fdb7d7fc7628e34..76b8cd953deed9f8a50d572cdc52b5edb68bc3b7 100644 (file)
@@ -18,7 +18,6 @@
 #include <asm/apic.h>
 #include <asm/iommu.h>
 #include <asm/gart.h>
-#include <asm/hpet.h>
 
 static void __init fix_hypertransport_config(int num, int slot, int func)
 {
@@ -97,7 +96,6 @@ static void __init nvidia_bugs(int num, int slot, int func)
 
 }
 
-#if defined(CONFIG_ACPI) && defined(CONFIG_X86_IO_APIC)
 #if defined(CONFIG_ACPI) && defined(CONFIG_X86_IO_APIC)
 static u32 __init ati_ixp4x0_rev(int num, int slot, int func)
 {
@@ -116,7 +114,6 @@ static u32 __init ati_ixp4x0_rev(int num, int slot, int func)
        d &= 0xff;
        return d;
 }
-#endif
 
 static void __init ati_bugs(int num, int slot, int func)
 {
@@ -192,21 +189,6 @@ static void __init ati_bugs_contd(int num, int slot, int func)
 }
 #endif
 
-/*
- * Force the read back of the CMP register in hpet_next_event()
- * to work around the problem that the CMP register write seems to be
- * delayed. See hpet_next_event() for details.
- *
- * We do this on all SMBUS incarnations for now until we have more
- * information about the affected chipsets.
- */
-static void __init ati_hpet_bugs(int num, int slot, int func)
-{
-#ifdef CONFIG_HPET_TIMER
-       hpet_readback_cmp = 1;
-#endif
-}
-
 #define QFLAG_APPLY_ONCE       0x1
 #define QFLAG_APPLIED          0x2
 #define QFLAG_DONE             (QFLAG_APPLY_ONCE|QFLAG_APPLIED)
@@ -236,8 +218,6 @@ static struct chipset early_qrk[] __initdata = {
          PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs },
        { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
          PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd },
-       { PCI_VENDOR_ID_ATI, PCI_ANY_ID,
-         PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_hpet_bugs },
        {}
 };
 
index fa99bae75acee4d1efb12b141a24d97019e03b5f..4572f25f93255f8bb4a5e5158d3c9949f912b657 100644 (file)
@@ -14,6 +14,7 @@
 #include <xen/hvc-console.h>
 #include <asm/pci-direct.h>
 #include <asm/fixmap.h>
+#include <asm/mrst.h>
 #include <asm/pgtable.h>
 #include <linux/usb/ehci_def.h>
 
@@ -238,6 +239,18 @@ static int __init setup_early_printk(char *buf)
 #ifdef CONFIG_HVC_XEN
                if (!strncmp(buf, "xen", 3))
                        early_console_register(&xenboot_console, keep);
+#endif
+#ifdef CONFIG_X86_MRST_EARLY_PRINTK
+               if (!strncmp(buf, "mrst", 4)) {
+                       mrst_early_console_init();
+                       early_console_register(&early_mrst_console, keep);
+               }
+
+               if (!strncmp(buf, "hsu", 3)) {
+                       hsu_early_console_init();
+                       early_console_register(&early_hsu_console, keep);
+               }
+
 #endif
                buf++;
        }
diff --git a/arch/x86/kernel/early_printk_mrst.c b/arch/x86/kernel/early_printk_mrst.c
new file mode 100644 (file)
index 0000000..65df603
--- /dev/null
@@ -0,0 +1,319 @@
+/*
+ * early_printk_mrst.c - early consoles for Intel MID platforms
+ *
+ * Copyright (c) 2008-2010, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+/*
+ * This file implements two early consoles named mrst and hsu.
+ * mrst is based on Maxim3110 spi-uart device, it exists in both
+ * Moorestown and Medfield platforms, while hsu is based on a High
+ * Speed UART device which only exists in the Medfield platform
+ */
+
+#include <linux/serial_reg.h>
+#include <linux/serial_mfd.h>
+#include <linux/kmsg_dump.h>
+#include <linux/console.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+
+#include <asm/fixmap.h>
+#include <asm/pgtable.h>
+#include <asm/mrst.h>
+
+#define MRST_SPI_TIMEOUT               0x200000
+#define MRST_REGBASE_SPI0              0xff128000
+#define MRST_REGBASE_SPI1              0xff128400
+#define MRST_CLK_SPI0_REG              0xff11d86c
+
+/* Bit fields in CTRLR0 */
+#define SPI_DFS_OFFSET                 0
+
+#define SPI_FRF_OFFSET                 4
+#define SPI_FRF_SPI                    0x0
+#define SPI_FRF_SSP                    0x1
+#define SPI_FRF_MICROWIRE              0x2
+#define SPI_FRF_RESV                   0x3
+
+#define SPI_MODE_OFFSET                        6
+#define SPI_SCPH_OFFSET                        6
+#define SPI_SCOL_OFFSET                        7
+#define SPI_TMOD_OFFSET                        8
+#define        SPI_TMOD_TR                     0x0             /* xmit & recv */
+#define SPI_TMOD_TO                    0x1             /* xmit only */
+#define SPI_TMOD_RO                    0x2             /* recv only */
+#define SPI_TMOD_EPROMREAD             0x3             /* eeprom read mode */
+
+#define SPI_SLVOE_OFFSET               10
+#define SPI_SRL_OFFSET                 11
+#define SPI_CFS_OFFSET                 12
+
+/* Bit fields in SR, 7 bits */
+#define SR_MASK                                0x7f            /* cover 7 bits */
+#define SR_BUSY                                (1 << 0)
+#define SR_TF_NOT_FULL                 (1 << 1)
+#define SR_TF_EMPT                     (1 << 2)
+#define SR_RF_NOT_EMPT                 (1 << 3)
+#define SR_RF_FULL                     (1 << 4)
+#define SR_TX_ERR                      (1 << 5)
+#define SR_DCOL                                (1 << 6)
+
+struct dw_spi_reg {
+       u32     ctrl0;
+       u32     ctrl1;
+       u32     ssienr;
+       u32     mwcr;
+       u32     ser;
+       u32     baudr;
+       u32     txfltr;
+       u32     rxfltr;
+       u32     txflr;
+       u32     rxflr;
+       u32     sr;
+       u32     imr;
+       u32     isr;
+       u32     risr;
+       u32     txoicr;
+       u32     rxoicr;
+       u32     rxuicr;
+       u32     msticr;
+       u32     icr;
+       u32     dmacr;
+       u32     dmatdlr;
+       u32     dmardlr;
+       u32     idr;
+       u32     version;
+
+       /* Currently operates as 32 bits, though only the low 16 bits matter */
+       u32     dr;
+} __packed;
+
+#define dw_readl(dw, name)             __raw_readl(&(dw)->name)
+#define dw_writel(dw, name, val)       __raw_writel((val), &(dw)->name)
+
+/* Default use SPI0 register for mrst, we will detect Penwell and use SPI1 */
+static unsigned long mrst_spi_paddr = MRST_REGBASE_SPI0;
+
+static u32 *pclk_spi0;
+/* Always contains an accessable address, start with 0 */
+static struct dw_spi_reg *pspi;
+
+static struct kmsg_dumper dw_dumper;
+static int dumper_registered;
+
+static void dw_kmsg_dump(struct kmsg_dumper *dumper,
+                       enum kmsg_dump_reason reason,
+                       const char *s1, unsigned long l1,
+                       const char *s2, unsigned long l2)
+{
+       int i;
+
+       /* When run to this, we'd better re-init the HW */
+       mrst_early_console_init();
+
+       for (i = 0; i < l1; i++)
+               early_mrst_console.write(&early_mrst_console, s1 + i, 1);
+       for (i = 0; i < l2; i++)
+               early_mrst_console.write(&early_mrst_console, s2 + i, 1);
+}
+
+/* Set the ratio rate to 115200, 8n1, IRQ disabled */
+static void max3110_write_config(void)
+{
+       u16 config;
+
+       config = 0xc001;
+       dw_writel(pspi, dr, config);
+}
+
+/* Translate char to a eligible word and send to max3110 */
+static void max3110_write_data(char c)
+{
+       u16 data;
+
+       data = 0x8000 | c;
+       dw_writel(pspi, dr, data);
+}
+
+void mrst_early_console_init(void)
+{
+       u32 ctrlr0 = 0;
+       u32 spi0_cdiv;
+       u32 freq; /* Freqency info only need be searched once */
+
+       /* Base clk is 100 MHz, the actual clk = 100M / (clk_divider + 1) */
+       pclk_spi0 = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE,
+                                                       MRST_CLK_SPI0_REG);
+       spi0_cdiv = ((*pclk_spi0) & 0xe00) >> 9;
+       freq = 100000000 / (spi0_cdiv + 1);
+
+       if (mrst_identify_cpu() == MRST_CPU_CHIP_PENWELL)
+               mrst_spi_paddr = MRST_REGBASE_SPI1;
+
+       pspi = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE,
+                                               mrst_spi_paddr);
+
+       /* Disable SPI controller */
+       dw_writel(pspi, ssienr, 0);
+
+       /* Set control param, 8 bits, transmit only mode */
+       ctrlr0 = dw_readl(pspi, ctrl0);
+
+       ctrlr0 &= 0xfcc0;
+       ctrlr0 |= 0xf | (SPI_FRF_SPI << SPI_FRF_OFFSET)
+                     | (SPI_TMOD_TO << SPI_TMOD_OFFSET);
+       dw_writel(pspi, ctrl0, ctrlr0);
+
+       /*
+        * Change the spi0 clk to comply with 115200 bps, use 100000 to
+        * calculate the clk dividor to make the clock a little slower
+        * than real baud rate.
+        */
+       dw_writel(pspi, baudr, freq/100000);
+
+       /* Disable all INT for early phase */
+       dw_writel(pspi, imr, 0x0);
+
+       /* Set the cs to spi-uart */
+       dw_writel(pspi, ser, 0x2);
+
+       /* Enable the HW, the last step for HW init */
+       dw_writel(pspi, ssienr, 0x1);
+
+       /* Set the default configuration */
+       max3110_write_config();
+
+       /* Register the kmsg dumper */
+       if (!dumper_registered) {
+               dw_dumper.dump = dw_kmsg_dump;
+               kmsg_dump_register(&dw_dumper);
+               dumper_registered = 1;
+       }
+}
+
+/* Slave select should be called in the read/write function */
+static void early_mrst_spi_putc(char c)
+{
+       unsigned int timeout;
+       u32 sr;
+
+       timeout = MRST_SPI_TIMEOUT;
+       /* Early putc needs to make sure the TX FIFO is not full */
+       while (--timeout) {
+               sr = dw_readl(pspi, sr);
+               if (!(sr & SR_TF_NOT_FULL))
+                       cpu_relax();
+               else
+                       break;
+       }
+
+       if (!timeout)
+               pr_warning("MRST earlycon: timed out\n");
+       else
+               max3110_write_data(c);
+}
+
+/* Early SPI only uses polling mode */
+static void early_mrst_spi_write(struct console *con, const char *str, unsigned n)
+{
+       int i;
+
+       for (i = 0; i < n && *str; i++) {
+               if (*str == '\n')
+                       early_mrst_spi_putc('\r');
+               early_mrst_spi_putc(*str);
+               str++;
+       }
+}
+
+struct console early_mrst_console = {
+       .name =         "earlymrst",
+       .write =        early_mrst_spi_write,
+       .flags =        CON_PRINTBUFFER,
+       .index =        -1,
+};
+
+/*
+ * Following is the early console based on Medfield HSU (High
+ * Speed UART) device.
+ */
+#define HSU_PORT2_PADDR                0xffa28180
+
+static void __iomem *phsu;
+
+void hsu_early_console_init(void)
+{
+       u8 lcr;
+
+       phsu = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE,
+                                                       HSU_PORT2_PADDR);
+
+       /* Disable FIFO */
+       writeb(0x0, phsu + UART_FCR);
+
+       /* Set to default 115200 bps, 8n1 */
+       lcr = readb(phsu + UART_LCR);
+       writeb((0x80 | lcr), phsu + UART_LCR);
+       writeb(0x18, phsu + UART_DLL);
+       writeb(lcr,  phsu + UART_LCR);
+       writel(0x3600, phsu + UART_MUL*4);
+
+       writeb(0x8, phsu + UART_MCR);
+       writeb(0x7, phsu + UART_FCR);
+       writeb(0x3, phsu + UART_LCR);
+
+       /* Clear IRQ status */
+       readb(phsu + UART_LSR);
+       readb(phsu + UART_RX);
+       readb(phsu + UART_IIR);
+       readb(phsu + UART_MSR);
+
+       /* Enable FIFO */
+       writeb(0x7, phsu + UART_FCR);
+}
+
+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+static void early_hsu_putc(char ch)
+{
+       unsigned int timeout = 10000; /* 10ms */
+       u8 status;
+
+       while (--timeout) {
+               status = readb(phsu + UART_LSR);
+               if (status & BOTH_EMPTY)
+                       break;
+               udelay(1);
+       }
+
+       /* Only write the char when there was no timeout */
+       if (timeout)
+               writeb(ch, phsu + UART_TX);
+}
+
+static void early_hsu_write(struct console *con, const char *str, unsigned n)
+{
+       int i;
+
+       for (i = 0; i < n && *str; i++) {
+               if (*str == '\n')
+                       early_hsu_putc('\r');
+               early_hsu_putc(*str);
+               str++;
+       }
+}
+
+struct console early_hsu_console = {
+       .name =         "earlyhsu",
+       .write =        early_hsu_write,
+       .flags =        CON_PRINTBUFFER,
+       .index =        -1,
+};
index 227d00920d2f8eaa1d90340ced0261ffe0a8f859..9fb188d7bc762a8101bffde7d32f8e4851512360 100644 (file)
 
  /* unfortunately push/pop can't be no-op */
 .macro PUSH_GS
-       pushl $0
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi $0
 .endm
 .macro POP_GS pop=0
        addl $(4 + \pop), %esp
 #else  /* CONFIG_X86_32_LAZY_GS */
 
 .macro PUSH_GS
-       pushl %gs
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %gs
        /*CFI_REL_OFFSET gs, 0*/
 .endm
 
 .macro POP_GS pop=0
-98:    popl %gs
-       CFI_ADJUST_CFA_OFFSET -4
+98:    popl_cfi %gs
        /*CFI_RESTORE gs*/
   .if \pop <> 0
        add $\pop, %esp
 .macro SAVE_ALL
        cld
        PUSH_GS
-       pushl %fs
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %fs
        /*CFI_REL_OFFSET fs, 0;*/
-       pushl %es
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %es
        /*CFI_REL_OFFSET es, 0;*/
-       pushl %ds
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %ds
        /*CFI_REL_OFFSET ds, 0;*/
-       pushl %eax
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %eax
        CFI_REL_OFFSET eax, 0
-       pushl %ebp
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %ebp
        CFI_REL_OFFSET ebp, 0
-       pushl %edi
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %edi
        CFI_REL_OFFSET edi, 0
-       pushl %esi
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %esi
        CFI_REL_OFFSET esi, 0
-       pushl %edx
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %edx
        CFI_REL_OFFSET edx, 0
-       pushl %ecx
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %ecx
        CFI_REL_OFFSET ecx, 0
-       pushl %ebx
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %ebx
        CFI_REL_OFFSET ebx, 0
        movl $(__USER_DS), %edx
        movl %edx, %ds
 .endm
 
 .macro RESTORE_INT_REGS
-       popl %ebx
-       CFI_ADJUST_CFA_OFFSET -4
+       popl_cfi %ebx
        CFI_RESTORE ebx
-       popl %ecx
-       CFI_ADJUST_CFA_OFFSET -4
+       popl_cfi %ecx
        CFI_RESTORE ecx
-       popl %edx
-       CFI_ADJUST_CFA_OFFSET -4
+       popl_cfi %edx
        CFI_RESTORE edx
-       popl %esi
-       CFI_ADJUST_CFA_OFFSET -4
+       popl_cfi %esi
        CFI_RESTORE esi
-       popl %edi
-       CFI_ADJUST_CFA_OFFSET -4
+       popl_cfi %edi
        CFI_RESTORE edi
-       popl %ebp
-       CFI_ADJUST_CFA_OFFSET -4
+       popl_cfi %ebp
        CFI_RESTORE ebp
-       popl %eax
-       CFI_ADJUST_CFA_OFFSET -4
+       popl_cfi %eax
        CFI_RESTORE eax
 .endm
 
 .macro RESTORE_REGS pop=0
        RESTORE_INT_REGS
-1:     popl %ds
-       CFI_ADJUST_CFA_OFFSET -4
+1:     popl_cfi %ds
        /*CFI_RESTORE ds;*/
-2:     popl %es
-       CFI_ADJUST_CFA_OFFSET -4
+2:     popl_cfi %es
        /*CFI_RESTORE es;*/
-3:     popl %fs
-       CFI_ADJUST_CFA_OFFSET -4
+3:     popl_cfi %fs
        /*CFI_RESTORE fs;*/
        POP_GS \pop
 .pushsection .fixup, "ax"
 
 ENTRY(ret_from_fork)
        CFI_STARTPROC
-       pushl %eax
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %eax
        call schedule_tail
        GET_THREAD_INFO(%ebp)
-       popl %eax
-       CFI_ADJUST_CFA_OFFSET -4
-       pushl $0x0202                   # Reset kernel eflags
-       CFI_ADJUST_CFA_OFFSET 4
-       popfl
-       CFI_ADJUST_CFA_OFFSET -4
+       popl_cfi %eax
+       pushl_cfi $0x0202               # Reset kernel eflags
+       popfl_cfi
        jmp syscall_exit
        CFI_ENDPROC
 END(ret_from_fork)
@@ -409,29 +382,23 @@ sysenter_past_esp:
         * enough kernel state to call TRACE_IRQS_OFF can be called - but
         * we immediately enable interrupts at that point anyway.
         */
-       pushl $(__USER_DS)
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi $(__USER_DS)
        /*CFI_REL_OFFSET ss, 0*/
-       pushl %ebp
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %ebp
        CFI_REL_OFFSET esp, 0
-       pushfl
+       pushfl_cfi
        orl $X86_EFLAGS_IF, (%esp)
-       CFI_ADJUST_CFA_OFFSET 4
-       pushl $(__USER_CS)
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi $(__USER_CS)
        /*CFI_REL_OFFSET cs, 0*/
        /*
         * Push current_thread_info()->sysenter_return to the stack.
         * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
         * pushed above; +8 corresponds to copy_thread's esp0 setting.
         */
-       pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
        CFI_REL_OFFSET eip, 0
 
-       pushl %eax
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %eax
        SAVE_ALL
        ENABLE_INTERRUPTS(CLBR_NONE)
 
@@ -486,8 +453,7 @@ sysenter_audit:
        movl %eax,%edx                  /* 2nd arg: syscall number */
        movl $AUDIT_ARCH_I386,%eax      /* 1st arg: audit arch */
        call audit_syscall_entry
-       pushl %ebx
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %ebx
        movl PT_EAX(%esp),%eax          /* reload syscall number */
        jmp sysenter_do_call
 
@@ -529,8 +495,7 @@ ENDPROC(ia32_sysenter_target)
        # system call handler stub
 ENTRY(system_call)
        RING0_INT_FRAME                 # can't unwind into user space anyway
-       pushl %eax                      # save orig_eax
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %eax                  # save orig_eax
        SAVE_ALL
        GET_THREAD_INFO(%ebp)
                                        # system call tracing in operation / emulation
@@ -566,7 +531,6 @@ restore_all_notrace:
        je ldt_ss                       # returning to user-space with LDT SS
 restore_nocheck:
        RESTORE_REGS 4                  # skip orig_eax/error_code
-       CFI_ADJUST_CFA_OFFSET -4
 irq_return:
        INTERRUPT_RETURN
 .section .fixup,"ax"
@@ -619,10 +583,8 @@ ldt_ss:
        shr $16, %edx
        mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
        mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
-       pushl $__ESPFIX_SS
-       CFI_ADJUST_CFA_OFFSET 4
-       push %eax                       /* new kernel esp */
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi $__ESPFIX_SS
+       pushl_cfi %eax                  /* new kernel esp */
        /* Disable interrupts, but do not irqtrace this section: we
         * will soon execute iret and the tracer was already set to
         * the irqstate after the iret */
@@ -666,11 +628,9 @@ work_notifysig:                            # deal with pending signals and
 
        ALIGN
 work_notifysig_v86:
-       pushl %ecx                      # save ti_flags for do_notify_resume
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %ecx                  # save ti_flags for do_notify_resume
        call save_v86_state             # %eax contains pt_regs pointer
-       popl %ecx
-       CFI_ADJUST_CFA_OFFSET -4
+       popl_cfi %ecx
        movl %eax, %esp
 #else
        movl %esp, %eax
@@ -750,14 +710,18 @@ ptregs_##name: \
 #define PTREGSCALL3(name) \
        ALIGN; \
 ptregs_##name: \
+       CFI_STARTPROC; \
        leal 4(%esp),%eax; \
-       pushl %eax; \
+       pushl_cfi %eax; \
        movl PT_EDX(%eax),%ecx; \
        movl PT_ECX(%eax),%edx; \
        movl PT_EBX(%eax),%eax; \
        call sys_##name; \
        addl $4,%esp; \
-       ret
+       CFI_ADJUST_CFA_OFFSET -4; \
+       ret; \
+       CFI_ENDPROC; \
+ENDPROC(ptregs_##name)
 
 PTREGSCALL1(iopl)
 PTREGSCALL0(fork)
@@ -772,15 +736,19 @@ PTREGSCALL1(vm86old)
 /* Clone is an oddball.  The 4th arg is in %edi */
        ALIGN;
 ptregs_clone:
+       CFI_STARTPROC
        leal 4(%esp),%eax
-       pushl %eax
-       pushl PT_EDI(%eax)
+       pushl_cfi %eax
+       pushl_cfi PT_EDI(%eax)
        movl PT_EDX(%eax),%ecx
        movl PT_ECX(%eax),%edx
        movl PT_EBX(%eax),%eax
        call sys_clone
        addl $8,%esp
+       CFI_ADJUST_CFA_OFFSET -8
        ret
+       CFI_ENDPROC
+ENDPROC(ptregs_clone)
 
 .macro FIXUP_ESPFIX_STACK
 /*
@@ -795,10 +763,8 @@ ptregs_clone:
        mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
        shl $16, %eax
        addl %esp, %eax                 /* the adjusted stack pointer */
-       pushl $__KERNEL_DS
-       CFI_ADJUST_CFA_OFFSET 4
-       pushl %eax
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi $__KERNEL_DS
+       pushl_cfi %eax
        lss (%esp), %esp                /* switch to the normal stack segment */
        CFI_ADJUST_CFA_OFFSET -8
 .endm
@@ -835,8 +801,7 @@ vector=FIRST_EXTERNAL_VECTOR
       .if vector <> FIRST_EXTERNAL_VECTOR
        CFI_ADJUST_CFA_OFFSET -4
       .endif
-1:     pushl $(~vector+0x80)   /* Note: always in signed byte range */
-       CFI_ADJUST_CFA_OFFSET 4
+1:     pushl_cfi $(~vector+0x80)       /* Note: always in signed byte range */
       .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
        jmp 2f
       .endif
@@ -876,8 +841,7 @@ ENDPROC(common_interrupt)
 #define BUILD_INTERRUPT3(name, nr, fn) \
 ENTRY(name)                            \
        RING0_INT_FRAME;                \
-       pushl $~(nr);                   \
-       CFI_ADJUST_CFA_OFFSET 4;        \
+       pushl_cfi $~(nr);               \
        SAVE_ALL;                       \
        TRACE_IRQS_OFF                  \
        movl %esp,%eax;                 \
@@ -893,21 +857,18 @@ ENDPROC(name)
 
 ENTRY(coprocessor_error)
        RING0_INT_FRAME
-       pushl $0
-       CFI_ADJUST_CFA_OFFSET 4
-       pushl $do_coprocessor_error
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi $0
+       pushl_cfi $do_coprocessor_error
        jmp error_code
        CFI_ENDPROC
 END(coprocessor_error)
 
 ENTRY(simd_coprocessor_error)
        RING0_INT_FRAME
-       pushl $0
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi $0
 #ifdef CONFIG_X86_INVD_BUG
        /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
-661:   pushl $do_general_protection
+661:   pushl_cfi $do_general_protection
 662:
 .section .altinstructions,"a"
        .balign 4
@@ -922,19 +883,16 @@ ENTRY(simd_coprocessor_error)
 664:
 .previous
 #else
-       pushl $do_simd_coprocessor_error
+       pushl_cfi $do_simd_coprocessor_error
 #endif
-       CFI_ADJUST_CFA_OFFSET 4
        jmp error_code
        CFI_ENDPROC
 END(simd_coprocessor_error)
 
 ENTRY(device_not_available)
        RING0_INT_FRAME
-       pushl $-1                       # mark this as an int
-       CFI_ADJUST_CFA_OFFSET 4
-       pushl $do_device_not_available
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi $-1                   # mark this as an int
+       pushl_cfi $do_device_not_available
        jmp error_code
        CFI_ENDPROC
 END(device_not_available)
@@ -956,82 +914,68 @@ END(native_irq_enable_sysexit)
 
 ENTRY(overflow)
        RING0_INT_FRAME
-       pushl $0
-       CFI_ADJUST_CFA_OFFSET 4
-       pushl $do_overflow
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi $0
+       pushl_cfi $do_overflow
        jmp error_code
        CFI_ENDPROC
 END(overflow)
 
 ENTRY(bounds)
        RING0_INT_FRAME
-       pushl $0
-       CFI_ADJUST_CFA_OFFSET 4
-       pushl $do_bounds
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi $0
+       pushl_cfi $do_bounds
        jmp error_code
        CFI_ENDPROC
 END(bounds)
 
 ENTRY(invalid_op)
        RING0_INT_FRAME
-       pushl $0
-       CFI_ADJUST_CFA_OFFSET 4
-       pushl $do_invalid_op
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi $0
+       pushl_cfi $do_invalid_op
        jmp error_code
        CFI_ENDPROC
 END(invalid_op)
 
 ENTRY(coprocessor_segment_overrun)
        RING0_INT_FRAME
-       pushl $0
-       CFI_ADJUST_CFA_OFFSET 4
-       pushl $do_coprocessor_segment_overrun
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi $0
+       pushl_cfi $do_coprocessor_segment_overrun
        jmp error_code
        CFI_ENDPROC
 END(coprocessor_segment_overrun)
 
 ENTRY(invalid_TSS)
        RING0_EC_FRAME
-       pushl $do_invalid_TSS
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi $do_invalid_TSS
        jmp error_code
        CFI_ENDPROC
 END(invalid_TSS)
 
 ENTRY(segment_not_present)
        RING0_EC_FRAME
-       pushl $do_segment_not_present
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi $do_segment_not_present
        jmp error_code
        CFI_ENDPROC
 END(segment_not_present)
 
 ENTRY(stack_segment)
        RING0_EC_FRAME
-       pushl $do_stack_segment
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi $do_stack_segment
        jmp error_code
        CFI_ENDPROC
 END(stack_segment)
 
 ENTRY(alignment_check)
        RING0_EC_FRAME
-       pushl $do_alignment_check
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi $do_alignment_check
        jmp error_code
        CFI_ENDPROC
 END(alignment_check)
 
 ENTRY(divide_error)
        RING0_INT_FRAME
-       pushl $0                        # no error code
-       CFI_ADJUST_CFA_OFFSET 4
-       pushl $do_divide_error
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi $0                    # no error code
+       pushl_cfi $do_divide_error
        jmp error_code
        CFI_ENDPROC
 END(divide_error)
@@ -1039,10 +983,8 @@ END(divide_error)
 #ifdef CONFIG_X86_MCE
 ENTRY(machine_check)
        RING0_INT_FRAME
-       pushl $0
-       CFI_ADJUST_CFA_OFFSET 4
-       pushl machine_check_vector
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi $0
+       pushl_cfi machine_check_vector
        jmp error_code
        CFI_ENDPROC
 END(machine_check)
@@ -1050,10 +992,8 @@ END(machine_check)
 
 ENTRY(spurious_interrupt_bug)
        RING0_INT_FRAME
-       pushl $0
-       CFI_ADJUST_CFA_OFFSET 4
-       pushl $do_spurious_interrupt_bug
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi $0
+       pushl_cfi $do_spurious_interrupt_bug
        jmp error_code
        CFI_ENDPROC
 END(spurious_interrupt_bug)
@@ -1084,8 +1024,7 @@ ENTRY(xen_sysenter_target)
 
 ENTRY(xen_hypervisor_callback)
        CFI_STARTPROC
-       pushl $0
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi $0
        SAVE_ALL
        TRACE_IRQS_OFF
 
@@ -1121,23 +1060,20 @@ ENDPROC(xen_hypervisor_callback)
 # We distinguish between categories by maintaining a status value in EAX.
 ENTRY(xen_failsafe_callback)
        CFI_STARTPROC
-       pushl %eax
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %eax
        movl $1,%eax
 1:     mov 4(%esp),%ds
 2:     mov 8(%esp),%es
 3:     mov 12(%esp),%fs
 4:     mov 16(%esp),%gs
        testl %eax,%eax
-       popl %eax
-       CFI_ADJUST_CFA_OFFSET -4
+       popl_cfi %eax
        lea 16(%esp),%esp
        CFI_ADJUST_CFA_OFFSET -16
        jz 5f
        addl $16,%esp
        jmp iret_exc            # EAX != 0 => Category 2 (Bad IRET)
-5:     pushl $0                # EAX == 0 => Category 1 (Bad segment)
-       CFI_ADJUST_CFA_OFFSET 4
+5:     pushl_cfi $0            # EAX == 0 => Category 1 (Bad segment)
        SAVE_ALL
        jmp ret_from_exception
        CFI_ENDPROC
@@ -1287,40 +1223,29 @@ syscall_table_size=(.-sys_call_table)
 
 ENTRY(page_fault)
        RING0_EC_FRAME
-       pushl $do_page_fault
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi $do_page_fault
        ALIGN
 error_code:
        /* the function address is in %gs's slot on the stack */
-       pushl %fs
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %fs
        /*CFI_REL_OFFSET fs, 0*/
-       pushl %es
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %es
        /*CFI_REL_OFFSET es, 0*/
-       pushl %ds
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %ds
        /*CFI_REL_OFFSET ds, 0*/
-       pushl %eax
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %eax
        CFI_REL_OFFSET eax, 0
-       pushl %ebp
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %ebp
        CFI_REL_OFFSET ebp, 0
-       pushl %edi
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %edi
        CFI_REL_OFFSET edi, 0
-       pushl %esi
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %esi
        CFI_REL_OFFSET esi, 0
-       pushl %edx
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %edx
        CFI_REL_OFFSET edx, 0
-       pushl %ecx
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %ecx
        CFI_REL_OFFSET ecx, 0
-       pushl %ebx
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %ebx
        CFI_REL_OFFSET ebx, 0
        cld
        movl $(__KERNEL_PERCPU), %ecx
@@ -1362,12 +1287,9 @@ END(page_fault)
        movl TSS_sysenter_sp0 + \offset(%esp), %esp
        CFI_DEF_CFA esp, 0
        CFI_UNDEFINED eip
-       pushfl
-       CFI_ADJUST_CFA_OFFSET 4
-       pushl $__KERNEL_CS
-       CFI_ADJUST_CFA_OFFSET 4
-       pushl $sysenter_past_esp
-       CFI_ADJUST_CFA_OFFSET 4
+       pushfl_cfi
+       pushl_cfi $__KERNEL_CS
+       pushl_cfi $sysenter_past_esp
        CFI_REL_OFFSET eip, 0
 .endm
 
@@ -1377,8 +1299,7 @@ ENTRY(debug)
        jne debug_stack_correct
        FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
 debug_stack_correct:
-       pushl $-1                       # mark this as an int
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi $-1                   # mark this as an int
        SAVE_ALL
        TRACE_IRQS_OFF
        xorl %edx,%edx                  # error code 0
@@ -1398,32 +1319,27 @@ END(debug)
  */
 ENTRY(nmi)
        RING0_INT_FRAME
-       pushl %eax
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %eax
        movl %ss, %eax
        cmpw $__ESPFIX_SS, %ax
-       popl %eax
-       CFI_ADJUST_CFA_OFFSET -4
+       popl_cfi %eax
        je nmi_espfix_stack
        cmpl $ia32_sysenter_target,(%esp)
        je nmi_stack_fixup
-       pushl %eax
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %eax
        movl %esp,%eax
        /* Do not access memory above the end of our stack page,
         * it might not exist.
         */
        andl $(THREAD_SIZE-1),%eax
        cmpl $(THREAD_SIZE-20),%eax
-       popl %eax
-       CFI_ADJUST_CFA_OFFSET -4
+       popl_cfi %eax
        jae nmi_stack_correct
        cmpl $ia32_sysenter_target,12(%esp)
        je nmi_debug_stack_check
 nmi_stack_correct:
        /* We have a RING0_INT_FRAME here */
-       pushl %eax
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %eax
        SAVE_ALL
        xorl %edx,%edx          # zero error code
        movl %esp,%eax          # pt_regs pointer
@@ -1452,18 +1368,14 @@ nmi_espfix_stack:
         *
         * create the pointer to lss back
         */
-       pushl %ss
-       CFI_ADJUST_CFA_OFFSET 4
-       pushl %esp
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %ss
+       pushl_cfi %esp
        addl $4, (%esp)
        /* copy the iret frame of 12 bytes */
        .rept 3
-       pushl 16(%esp)
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi 16(%esp)
        .endr
-       pushl %eax
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi %eax
        SAVE_ALL
        FIXUP_ESPFIX_STACK              # %eax == %esp
        xorl %edx,%edx                  # zero error code
@@ -1477,8 +1389,7 @@ END(nmi)
 
 ENTRY(int3)
        RING0_INT_FRAME
-       pushl $-1                       # mark this as an int
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi $-1                   # mark this as an int
        SAVE_ALL
        TRACE_IRQS_OFF
        xorl %edx,%edx          # zero error code
@@ -1490,8 +1401,7 @@ END(int3)
 
 ENTRY(general_protection)
        RING0_EC_FRAME
-       pushl $do_general_protection
-       CFI_ADJUST_CFA_OFFSET 4
+       pushl_cfi $do_general_protection
        jmp error_code
        CFI_ENDPROC
 END(general_protection)
index 17be5ec7cbbad332973b6b46a79cdb3db2832f74..a7ae7fd1010fdbddcd596aa0aae01ee160b7bb66 100644 (file)
@@ -213,23 +213,17 @@ ENDPROC(native_usergs_sysret64)
        .macro FAKE_STACK_FRAME child_rip
        /* push in order ss, rsp, eflags, cs, rip */
        xorl %eax, %eax
-       pushq $__KERNEL_DS /* ss */
-       CFI_ADJUST_CFA_OFFSET   8
+       pushq_cfi $__KERNEL_DS /* ss */
        /*CFI_REL_OFFSET        ss,0*/
-       pushq %rax /* rsp */
-       CFI_ADJUST_CFA_OFFSET   8
+       pushq_cfi %rax /* rsp */
        CFI_REL_OFFSET  rsp,0
-       pushq $X86_EFLAGS_IF /* eflags - interrupts on */
-       CFI_ADJUST_CFA_OFFSET   8
+       pushq_cfi $X86_EFLAGS_IF /* eflags - interrupts on */
        /*CFI_REL_OFFSET        rflags,0*/
-       pushq $__KERNEL_CS /* cs */
-       CFI_ADJUST_CFA_OFFSET   8
+       pushq_cfi $__KERNEL_CS /* cs */
        /*CFI_REL_OFFSET        cs,0*/
-       pushq \child_rip /* rip */
-       CFI_ADJUST_CFA_OFFSET   8
+       pushq_cfi \child_rip /* rip */
        CFI_REL_OFFSET  rip,0
-       pushq   %rax /* orig rax */
-       CFI_ADJUST_CFA_OFFSET   8
+       pushq_cfi %rax /* orig rax */
        .endm
 
        .macro UNFAKE_STACK_FRAME
@@ -398,10 +392,8 @@ ENTRY(ret_from_fork)
 
        LOCK ; btr $TIF_FORK,TI_flags(%r8)
 
-       push kernel_eflags(%rip)
-       CFI_ADJUST_CFA_OFFSET 8
-       popf                                    # reset kernel eflags
-       CFI_ADJUST_CFA_OFFSET -8
+       pushq_cfi kernel_eflags(%rip)
+       popfq_cfi                               # reset kernel eflags
 
        call schedule_tail                      # rdi: 'prev' task parameter
 
@@ -521,11 +513,9 @@ sysret_careful:
        jnc sysret_signal
        TRACE_IRQS_ON
        ENABLE_INTERRUPTS(CLBR_NONE)
-       pushq %rdi
-       CFI_ADJUST_CFA_OFFSET 8
+       pushq_cfi %rdi
        call schedule
-       popq  %rdi
-       CFI_ADJUST_CFA_OFFSET -8
+       popq_cfi %rdi
        jmp sysret_check
 
        /* Handle a signal */
@@ -634,11 +624,9 @@ int_careful:
        jnc  int_very_careful
        TRACE_IRQS_ON
        ENABLE_INTERRUPTS(CLBR_NONE)
-       pushq %rdi
-       CFI_ADJUST_CFA_OFFSET 8
+       pushq_cfi %rdi
        call schedule
-       popq %rdi
-       CFI_ADJUST_CFA_OFFSET -8
+       popq_cfi %rdi
        DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
        jmp int_with_check
@@ -652,12 +640,10 @@ int_check_syscall_exit_work:
        /* Check for syscall exit trace */
        testl $_TIF_WORK_SYSCALL_EXIT,%edx
        jz int_signal
-       pushq %rdi
-       CFI_ADJUST_CFA_OFFSET 8
+       pushq_cfi %rdi
        leaq 8(%rsp),%rdi       # &ptregs -> arg1
        call syscall_trace_leave
-       popq %rdi
-       CFI_ADJUST_CFA_OFFSET -8
+       popq_cfi %rdi
        andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
        jmp int_restore_rest
 
@@ -714,9 +700,8 @@ END(ptregscall_common)
 
 ENTRY(stub_execve)
        CFI_STARTPROC
-       popq %r11
-       CFI_ADJUST_CFA_OFFSET -8
-       CFI_REGISTER rip, r11
+       addq $8, %rsp
+       PARTIAL_FRAME 0
        SAVE_REST
        FIXUP_TOP_OF_STACK %r11
        movq %rsp, %rcx
@@ -735,7 +720,7 @@ END(stub_execve)
 ENTRY(stub_rt_sigreturn)
        CFI_STARTPROC
        addq $8, %rsp
-       CFI_ADJUST_CFA_OFFSET   -8
+       PARTIAL_FRAME 0
        SAVE_REST
        movq %rsp,%rdi
        FIXUP_TOP_OF_STACK %r11
@@ -766,8 +751,7 @@ vector=FIRST_EXTERNAL_VECTOR
       .if vector <> FIRST_EXTERNAL_VECTOR
        CFI_ADJUST_CFA_OFFSET -8
       .endif
-1:     pushq $(~vector+0x80)   /* Note: always in signed byte range */
-       CFI_ADJUST_CFA_OFFSET 8
+1:     pushq_cfi $(~vector+0x80)       /* Note: always in signed byte range */
       .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
        jmp 2f
       .endif
@@ -796,8 +780,8 @@ END(interrupt)
 
 /* 0(%rsp): ~(interrupt number) */
        .macro interrupt func
-       subq $10*8, %rsp
-       CFI_ADJUST_CFA_OFFSET 10*8
+       subq $ORIG_RAX-ARGOFFSET+8, %rsp
+       CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+8
        call save_args
        PARTIAL_FRAME 0
        call \func
@@ -822,6 +806,7 @@ ret_from_intr:
        TRACE_IRQS_OFF
        decl PER_CPU_VAR(irq_count)
        leaveq
+       CFI_RESTORE             rbp
        CFI_DEF_CFA_REGISTER    rsp
        CFI_ADJUST_CFA_OFFSET   -8
 exit_intr:
@@ -903,11 +888,9 @@ retint_careful:
        jnc   retint_signal
        TRACE_IRQS_ON
        ENABLE_INTERRUPTS(CLBR_NONE)
-       pushq %rdi
-       CFI_ADJUST_CFA_OFFSET   8
+       pushq_cfi %rdi
        call  schedule
-       popq %rdi
-       CFI_ADJUST_CFA_OFFSET   -8
+       popq_cfi %rdi
        GET_THREAD_INFO(%rcx)
        DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
@@ -956,8 +939,7 @@ END(common_interrupt)
 .macro apicinterrupt num sym do_sym
 ENTRY(\sym)
        INTR_FRAME
-       pushq $~(\num)
-       CFI_ADJUST_CFA_OFFSET 8
+       pushq_cfi $~(\num)
        interrupt \do_sym
        jmp ret_from_intr
        CFI_ENDPROC
@@ -1023,9 +1005,9 @@ apicinterrupt ERROR_APIC_VECTOR \
 apicinterrupt SPURIOUS_APIC_VECTOR \
        spurious_interrupt smp_spurious_interrupt
 
-#ifdef CONFIG_PERF_EVENTS
-apicinterrupt LOCAL_PENDING_VECTOR \
-       perf_pending_interrupt smp_perf_pending_interrupt
+#ifdef CONFIG_IRQ_WORK
+apicinterrupt IRQ_WORK_VECTOR \
+       irq_work_interrupt smp_irq_work_interrupt
 #endif
 
 /*
@@ -1036,8 +1018,8 @@ ENTRY(\sym)
        INTR_FRAME
        PARAVIRT_ADJUST_EXCEPTION_FRAME
        pushq_cfi $-1           /* ORIG_RAX: no syscall to restart */
-       subq $15*8,%rsp
-       CFI_ADJUST_CFA_OFFSET 15*8
+       subq $ORIG_RAX-R15, %rsp
+       CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
        call error_entry
        DEFAULT_FRAME 0
        movq %rsp,%rdi          /* pt_regs pointer */
@@ -1052,9 +1034,9 @@ END(\sym)
 ENTRY(\sym)
        INTR_FRAME
        PARAVIRT_ADJUST_EXCEPTION_FRAME
-       pushq $-1               /* ORIG_RAX: no syscall to restart */
-       CFI_ADJUST_CFA_OFFSET 8
-       subq $15*8, %rsp
+       pushq_cfi $-1           /* ORIG_RAX: no syscall to restart */
+       subq $ORIG_RAX-R15, %rsp
+       CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
        call save_paranoid
        TRACE_IRQS_OFF
        movq %rsp,%rdi          /* pt_regs pointer */
@@ -1070,9 +1052,9 @@ END(\sym)
 ENTRY(\sym)
        INTR_FRAME
        PARAVIRT_ADJUST_EXCEPTION_FRAME
-       pushq $-1               /* ORIG_RAX: no syscall to restart */
-       CFI_ADJUST_CFA_OFFSET 8
-       subq $15*8, %rsp
+       pushq_cfi $-1           /* ORIG_RAX: no syscall to restart */
+       subq $ORIG_RAX-R15, %rsp
+       CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
        call save_paranoid
        TRACE_IRQS_OFF
        movq %rsp,%rdi          /* pt_regs pointer */
@@ -1089,8 +1071,8 @@ END(\sym)
 ENTRY(\sym)
        XCPT_FRAME
        PARAVIRT_ADJUST_EXCEPTION_FRAME
-       subq $15*8,%rsp
-       CFI_ADJUST_CFA_OFFSET 15*8
+       subq $ORIG_RAX-R15, %rsp
+       CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
        call error_entry
        DEFAULT_FRAME 0
        movq %rsp,%rdi                  /* pt_regs pointer */
@@ -1107,8 +1089,8 @@ END(\sym)
 ENTRY(\sym)
        XCPT_FRAME
        PARAVIRT_ADJUST_EXCEPTION_FRAME
-       subq $15*8,%rsp
-       CFI_ADJUST_CFA_OFFSET 15*8
+       subq $ORIG_RAX-R15, %rsp
+       CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
        call save_paranoid
        DEFAULT_FRAME 0
        TRACE_IRQS_OFF
@@ -1139,16 +1121,14 @@ zeroentry simd_coprocessor_error do_simd_coprocessor_error
        /* edi:  new selector */
 ENTRY(native_load_gs_index)
        CFI_STARTPROC
-       pushf
-       CFI_ADJUST_CFA_OFFSET 8
+       pushfq_cfi
        DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
        SWAPGS
 gs_change:
        movl %edi,%gs
 2:     mfence          /* workaround */
        SWAPGS
-       popf
-       CFI_ADJUST_CFA_OFFSET -8
+       popfq_cfi
        ret
        CFI_ENDPROC
 END(native_load_gs_index)
@@ -1215,8 +1195,7 @@ END(kernel_execve)
 /* Call softirq on interrupt stack. Interrupts are off. */
 ENTRY(call_softirq)
        CFI_STARTPROC
-       push %rbp
-       CFI_ADJUST_CFA_OFFSET   8
+       pushq_cfi %rbp
        CFI_REL_OFFSET rbp,0
        mov  %rsp,%rbp
        CFI_DEF_CFA_REGISTER rbp
@@ -1225,6 +1204,7 @@ ENTRY(call_softirq)
        push  %rbp                      # backlink for old unwinder
        call __do_softirq
        leaveq
+       CFI_RESTORE             rbp
        CFI_DEF_CFA_REGISTER    rsp
        CFI_ADJUST_CFA_OFFSET   -8
        decl PER_CPU_VAR(irq_count)
@@ -1368,7 +1348,7 @@ paranoidzeroentry machine_check *machine_check_vector(%rip)
 
        /* ebx: no swapgs flag */
 ENTRY(paranoid_exit)
-       INTR_FRAME
+       DEFAULT_FRAME
        DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
        testl %ebx,%ebx                         /* swapgs needed? */
@@ -1445,7 +1425,6 @@ error_swapgs:
 error_sti:
        TRACE_IRQS_OFF
        ret
-       CFI_ENDPROC
 
 /*
  * There are two places in the kernel that can potentially fault with
@@ -1470,6 +1449,7 @@ bstep_iret:
        /* Fix truncated RIP */
        movq %rcx,RIP+8(%rsp)
        jmp error_swapgs
+       CFI_ENDPROC
 END(error_entry)
 
 
@@ -1498,8 +1478,8 @@ ENTRY(nmi)
        INTR_FRAME
        PARAVIRT_ADJUST_EXCEPTION_FRAME
        pushq_cfi $-1
-       subq $15*8, %rsp
-       CFI_ADJUST_CFA_OFFSET 15*8
+       subq $ORIG_RAX-R15, %rsp
+       CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
        call save_paranoid
        DEFAULT_FRAME 0
        /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
index cd37469b54eeed3fc479d2c931d8d2ed933ea411..3afb33f14d2d2c86a3c961d87aaae531d2631ac8 100644 (file)
@@ -257,14 +257,9 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
        return mod_code_status;
 }
 
-
-
-
-static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
-
 static unsigned char *ftrace_nop_replace(void)
 {
-       return ftrace_nop;
+       return ideal_nop5;
 }
 
 static int
@@ -338,62 +333,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
 
 int __init ftrace_dyn_arch_init(void *data)
 {
-       extern const unsigned char ftrace_test_p6nop[];
-       extern const unsigned char ftrace_test_nop5[];
-       extern const unsigned char ftrace_test_jmp[];
-       int faulted = 0;
-
-       /*
-        * There is no good nop for all x86 archs.
-        * We will default to using the P6_NOP5, but first we
-        * will test to make sure that the nop will actually
-        * work on this CPU. If it faults, we will then
-        * go to a lesser efficient 5 byte nop. If that fails
-        * we then just use a jmp as our nop. This isn't the most
-        * efficient nop, but we can not use a multi part nop
-        * since we would then risk being preempted in the middle
-        * of that nop, and if we enabled tracing then, it might
-        * cause a system crash.
-        *
-        * TODO: check the cpuid to determine the best nop.
-        */
-       asm volatile (
-               "ftrace_test_jmp:"
-               "jmp ftrace_test_p6nop\n"
-               "nop\n"
-               "nop\n"
-               "nop\n"  /* 2 byte jmp + 3 bytes */
-               "ftrace_test_p6nop:"
-               P6_NOP5
-               "jmp 1f\n"
-               "ftrace_test_nop5:"
-               ".byte 0x66,0x66,0x66,0x66,0x90\n"
-               "1:"
-               ".section .fixup, \"ax\"\n"
-               "2:     movl $1, %0\n"
-               "       jmp ftrace_test_nop5\n"
-               "3:     movl $2, %0\n"
-               "       jmp 1b\n"
-               ".previous\n"
-               _ASM_EXTABLE(ftrace_test_p6nop, 2b)
-               _ASM_EXTABLE(ftrace_test_nop5, 3b)
-               : "=r"(faulted) : "0" (faulted));
-
-       switch (faulted) {
-       case 0:
-               pr_info("converting mcount calls to 0f 1f 44 00 00\n");
-               memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
-               break;
-       case 1:
-               pr_info("converting mcount calls to 66 66 66 66 90\n");
-               memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
-               break;
-       case 2:
-               pr_info("converting mcount calls to jmp . + 5\n");
-               memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
-               break;
-       }
-
        /* The return code is retured via data */
        *(unsigned long *)data = 0;
 
index 351f9c0fea1f20714321d01260003173faf548dc..7494999141b3ae9c9ab67302481a2e6c15e969e6 100644 (file)
@@ -35,7 +35,6 @@
 unsigned long                          hpet_address;
 u8                                     hpet_blockid; /* OS timer block num */
 u8                                     hpet_msi_disable;
-u8                                     hpet_readback_cmp;
 
 #ifdef CONFIG_PCI_MSI
 static unsigned long                   hpet_num_timers;
@@ -395,23 +394,27 @@ static int hpet_next_event(unsigned long delta,
         * at that point and we would wait for the next hpet interrupt
         * forever. We found out that reading the CMP register back
         * forces the transfer so we can rely on the comparison with
-        * the counter register below.
+        * the counter register below. If the read back from the
+        * compare register does not match the value we programmed
+        * then we might have a real hardware problem. We can not do
+        * much about it here, but at least alert the user/admin with
+        * a prominent warning.
         *
-        * That works fine on those ATI chipsets, but on newer Intel
-        * chipsets (ICH9...) this triggers due to an erratum: Reading
-        * the comparator immediately following a write is returning
-        * the old value.
+        * An erratum on some chipsets (ICH9,..), results in
+        * comparator read immediately following a write returning old
+        * value. Workaround for this is to read this value second
+        * time, when first read returns old value.
         *
-        * We restrict the read back to the affected ATI chipsets (set
-        * by quirks) and also run it with hpet=verbose for debugging
-        * purposes.
+        * In fact the write to the comparator register is delayed up
+        * to two HPET cycles so the workaround we tried to restrict
+        * the readback to those known to be borked ATI chipsets
+        * failed miserably. So we give up on optimizations forever
+        * and penalize all HPET incarnations unconditionally.
         */
-       if (hpet_readback_cmp || hpet_verbose) {
-               u32 cmp = hpet_readl(HPET_Tn_CMP(timer));
-
-               if (cmp != cnt)
+       if (unlikely((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt)) {
+               if (hpet_readl(HPET_Tn_CMP(timer)) != cnt)
                        printk_once(KERN_WARNING
-                           "hpet: compare register read back failed.\n");
+                               "hpet: compare register read back failed.\n");
        }
 
        return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
@@ -503,7 +506,7 @@ static int hpet_assign_irq(struct hpet_dev *dev)
 {
        unsigned int irq;
 
-       irq = create_irq();
+       irq = create_irq_nr(0, -1);
        if (!irq)
                return -EINVAL;
 
index a474ec37c32f84df372d39eac5730532d60d0228..ff15c9dcc25de8be8144fd4d15f1dfd3069314e4 100644 (file)
@@ -206,11 +206,27 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp)
 int arch_bp_generic_fields(int x86_len, int x86_type,
                           int *gen_len, int *gen_type)
 {
-       /* Len */
-       switch (x86_len) {
-       case X86_BREAKPOINT_LEN_X:
+       /* Type */
+       switch (x86_type) {
+       case X86_BREAKPOINT_EXECUTE:
+               if (x86_len != X86_BREAKPOINT_LEN_X)
+                       return -EINVAL;
+
+               *gen_type = HW_BREAKPOINT_X;
                *gen_len = sizeof(long);
+               return 0;
+       case X86_BREAKPOINT_WRITE:
+               *gen_type = HW_BREAKPOINT_W;
                break;
+       case X86_BREAKPOINT_RW:
+               *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /* Len */
+       switch (x86_len) {
        case X86_BREAKPOINT_LEN_1:
                *gen_len = HW_BREAKPOINT_LEN_1;
                break;
@@ -229,21 +245,6 @@ int arch_bp_generic_fields(int x86_len, int x86_type,
                return -EINVAL;
        }
 
-       /* Type */
-       switch (x86_type) {
-       case X86_BREAKPOINT_EXECUTE:
-               *gen_type = HW_BREAKPOINT_X;
-               break;
-       case X86_BREAKPOINT_WRITE:
-               *gen_type = HW_BREAKPOINT_W;
-               break;
-       case X86_BREAKPOINT_RW:
-               *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
-               break;
-       default:
-               return -EINVAL;
-       }
-
        return 0;
 }
 
@@ -316,9 +317,6 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
        ret = -EINVAL;
 
        switch (info->len) {
-       case X86_BREAKPOINT_LEN_X:
-               align = sizeof(long) -1;
-               break;
        case X86_BREAKPOINT_LEN_1:
                align = 0;
                break;
index a46cb3522c0c19d67f2d474c6e1f09fb616004ff..58bb239a2fd76d67121bd1c25e48512bad529825 100644 (file)
@@ -68,19 +68,22 @@ static void __cpuinit init_thread_xstate(void)
         */
 
        if (!HAVE_HWFP) {
+               /*
+                * Disable xsave as we do not support it if i387
+                * emulation is enabled.
+                */
+               setup_clear_cpu_cap(X86_FEATURE_XSAVE);
+               setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
                xstate_size = sizeof(struct i387_soft_struct);
                return;
        }
 
        if (cpu_has_fxsr)
                xstate_size = sizeof(struct i387_fxsave_struct);
-#ifdef CONFIG_X86_32
        else
                xstate_size = sizeof(struct i387_fsave_struct);
-#endif
 }
 
-#ifdef CONFIG_X86_64
 /*
  * Called at bootup to set up the initial FPU state that is later cloned
  * into all processes.
@@ -88,12 +91,21 @@ static void __cpuinit init_thread_xstate(void)
 
 void __cpuinit fpu_init(void)
 {
-       unsigned long oldcr0 = read_cr0();
-
-       set_in_cr4(X86_CR4_OSFXSR);
-       set_in_cr4(X86_CR4_OSXMMEXCPT);
+       unsigned long cr0;
+       unsigned long cr4_mask = 0;
 
-       write_cr0(oldcr0 & ~(X86_CR0_TS|X86_CR0_EM)); /* clear TS and EM */
+       if (cpu_has_fxsr)
+               cr4_mask |= X86_CR4_OSFXSR;
+       if (cpu_has_xmm)
+               cr4_mask |= X86_CR4_OSXMMEXCPT;
+       if (cr4_mask)
+               set_in_cr4(cr4_mask);
+
+       cr0 = read_cr0();
+       cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */
+       if (!HAVE_HWFP)
+               cr0 |= X86_CR0_EM;
+       write_cr0(cr0);
 
        if (!smp_processor_id())
                init_thread_xstate();
@@ -104,24 +116,12 @@ void __cpuinit fpu_init(void)
        clear_used_math();
 }
 
-#else  /* CONFIG_X86_64 */
-
-void __cpuinit fpu_init(void)
-{
-       if (!smp_processor_id())
-               init_thread_xstate();
-}
-
-#endif /* CONFIG_X86_32 */
-
 void fpu_finit(struct fpu *fpu)
 {
-#ifdef CONFIG_X86_32
        if (!HAVE_HWFP) {
                finit_soft_fpu(&fpu->state->soft);
                return;
        }
-#endif
 
        if (cpu_has_fxsr) {
                struct i387_fxsave_struct *fx = &fpu->state->fxsave;
@@ -386,19 +386,17 @@ convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
 #ifdef CONFIG_X86_64
        env->fip = fxsave->rip;
        env->foo = fxsave->rdp;
+       /*
+        * should be actually ds/cs at fpu exception time, but
+        * that information is not available in 64bit mode.
+        */
+       env->fcs = task_pt_regs(tsk)->cs;
        if (tsk == current) {
-               /*
-                * should be actually ds/cs at fpu exception time, but
-                * that information is not available in 64bit mode.
-                */
-               asm("mov %%ds, %[fos]" : [fos] "=r" (env->fos));
-               asm("mov %%cs, %[fcs]" : [fcs] "=r" (env->fcs));
+               savesegment(ds, env->fos);
        } else {
-               struct pt_regs *regs = task_pt_regs(tsk);
-
-               env->fos = 0xffff0000 | tsk->thread.ds;
-               env->fcs = regs->cs;
+               env->fos = tsk->thread.ds;
        }
+       env->fos |= 0xffff0000;
 #else
        env->fip = fxsave->fip;
        env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
index 91fd0c70a18abddc28eff75e287c70dd5d37af7d..44edb03fc9ec6d046f608c0cf1416cc034bab2fe 100644 (file)
@@ -67,10 +67,10 @@ static int show_other_interrupts(struct seq_file *p, int prec)
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
        seq_printf(p, "  Performance monitoring interrupts\n");
-       seq_printf(p, "%*s: ", prec, "PND");
+       seq_printf(p, "%*s: ", prec, "IWI");
        for_each_online_cpu(j)
-               seq_printf(p, "%10u ", irq_stats(j)->apic_pending_irqs);
-       seq_printf(p, "  Performance pending work\n");
+               seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs);
+       seq_printf(p, "  IRQ work interrupts\n");
 #endif
        if (x86_platform_ipi_callback) {
                seq_printf(p, "%*s: ", prec, "PLT");
@@ -185,7 +185,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
        sum += irq_stats(cpu)->apic_timer_irqs;
        sum += irq_stats(cpu)->irq_spurious_count;
        sum += irq_stats(cpu)->apic_perf_irqs;
-       sum += irq_stats(cpu)->apic_pending_irqs;
+       sum += irq_stats(cpu)->apic_irq_work_irqs;
 #endif
        if (x86_platform_ipi_callback)
                sum += irq_stats(cpu)->x86_platform_ipis;
diff --git a/arch/x86/kernel/irq_work.c b/arch/x86/kernel/irq_work.c
new file mode 100644 (file)
index 0000000..ca8f703
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * x86 specific code for irq_work
+ *
+ * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/irq_work.h>
+#include <linux/hardirq.h>
+#include <asm/apic.h>
+
+void smp_irq_work_interrupt(struct pt_regs *regs)
+{
+       irq_enter();
+       ack_APIC_irq();
+       inc_irq_stat(apic_irq_work_irqs);
+       irq_work_run();
+       irq_exit();
+}
+
+void arch_irq_work_raise(void)
+{
+#ifdef CONFIG_X86_LOCAL_APIC
+       if (!cpu_has_apic)
+               return;
+
+       apic->send_IPI_self(IRQ_WORK_VECTOR);
+       apic_wait_icr_idle();
+#endif
+}
index 990ae7cfc5783f131df476506bc9341574a466c2..713969b9266b587c824e37be65d7467280a5da2e 100644 (file)
@@ -224,9 +224,9 @@ static void __init apic_intr_init(void)
        alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
        alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
 
-       /* Performance monitoring interrupts: */
-# ifdef CONFIG_PERF_EVENTS
-       alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt);
+       /* IRQ work interrupts: */
+# ifdef CONFIG_IRQ_WORK
+       alloc_intr_gate(IRQ_WORK_VECTOR, irq_work_interrupt);
 # endif
 
 #endif
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
new file mode 100644 (file)
index 0000000..961b6b3
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * jump label x86 support
+ *
+ * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
+ *
+ */
+#include <linux/jump_label.h>
+#include <linux/memory.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/jhash.h>
+#include <linux/cpu.h>
+#include <asm/kprobes.h>
+#include <asm/alternative.h>
+
+#ifdef HAVE_JUMP_LABEL
+
+union jump_code_union {
+       char code[JUMP_LABEL_NOP_SIZE];
+       struct {
+               char jump;
+               int offset;
+       } __attribute__((packed));
+};
+
+void arch_jump_label_transform(struct jump_entry *entry,
+                              enum jump_label_type type)
+{
+       union jump_code_union code;
+
+       if (type == JUMP_LABEL_ENABLE) {
+               code.jump = 0xe9;
+               code.offset = entry->target -
+                               (entry->code + JUMP_LABEL_NOP_SIZE);
+       } else
+               memcpy(&code, ideal_nop5, JUMP_LABEL_NOP_SIZE);
+       get_online_cpus();
+       mutex_lock(&text_mutex);
+       text_poke_smp((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE);
+       mutex_unlock(&text_mutex);
+       put_online_cpus();
+}
+
+void arch_jump_label_text_poke_early(jump_label_t addr)
+{
+       text_poke_early((void *)addr, ideal_nop5, JUMP_LABEL_NOP_SIZE);
+}
+
+#endif
index 770ebfb349e93efe3367cf0c6caff93b61b8b884..1cbd54c0df99189548a3a03f40fbb75a1703475a 100644 (file)
@@ -230,9 +230,6 @@ static int recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
        return 0;
 }
 
-/* Dummy buffers for kallsyms_lookup */
-static char __dummy_buf[KSYM_NAME_LEN];
-
 /* Check if paddr is at an instruction boundary */
 static int __kprobes can_probe(unsigned long paddr)
 {
@@ -241,7 +238,7 @@ static int __kprobes can_probe(unsigned long paddr)
        struct insn insn;
        kprobe_opcode_t buf[MAX_INSN_SIZE];
 
-       if (!kallsyms_lookup(paddr, NULL, &offset, NULL, __dummy_buf))
+       if (!kallsyms_lookup_size_offset(paddr, NULL, &offset))
                return 0;
 
        /* Decode instructions */
@@ -1129,7 +1126,7 @@ static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr,
        *(unsigned long *)addr = val;
 }
 
-void __kprobes kprobes_optinsn_template_holder(void)
+static void __used __kprobes kprobes_optinsn_template_holder(void)
 {
        asm volatile (
                        ".global optprobe_template_entry\n"
@@ -1221,7 +1218,8 @@ static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src)
        }
        /* Check whether the address range is reserved */
        if (ftrace_text_reserved(src, src + len - 1) ||
-           alternatives_text_reserved(src, src + len - 1))
+           alternatives_text_reserved(src, src + len - 1) ||
+           jump_label_text_reserved(src, src + len - 1))
                return -EBUSY;
 
        return len;
@@ -1269,11 +1267,9 @@ static int __kprobes can_optimize(unsigned long paddr)
        unsigned long addr, size = 0, offset = 0;
        struct insn insn;
        kprobe_opcode_t buf[MAX_INSN_SIZE];
-       /* Dummy buffers for lookup_symbol_attrs */
-       static char __dummy_buf[KSYM_NAME_LEN];
 
        /* Lookup symbol including addr */
-       if (!kallsyms_lookup(paddr, &size, &offset, NULL, __dummy_buf))
+       if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
                return 0;
 
        /* Check there is enough space for a relative jump. */
index 035c8c529181fa351c042f8d6a5b8ec3240dec8f..b3ea9db39db6f7ee9f9dab00632f754d8a75827d 100644 (file)
@@ -36,7 +36,7 @@ static int init_one_level2_page(struct kimage *image, pgd_t *pgd,
                if (!page)
                        goto out;
                pud = (pud_t *)page_address(page);
-               memset(pud, 0, PAGE_SIZE);
+               clear_page(pud);
                set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
        }
        pud = pud_offset(pgd, addr);
@@ -45,7 +45,7 @@ static int init_one_level2_page(struct kimage *image, pgd_t *pgd,
                if (!page)
                        goto out;
                pmd = (pmd_t *)page_address(page);
-               memset(pmd, 0, PAGE_SIZE);
+               clear_page(pmd);
                set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
        }
        pmd = pmd_offset(pud, addr);
index e0bc186d7501f123265ae288ae071e772016e89b..8f295609173524cdf06a20c6c0ad4fbe1de12263 100644 (file)
@@ -239,11 +239,13 @@ int module_finalize(const Elf_Ehdr *hdr,
                apply_paravirt(pseg, pseg + para->sh_size);
        }
 
-       return module_bug_finalize(hdr, sechdrs, me);
+       /* make jump label nops */
+       jump_label_apply_nops(me);
+
+       return 0;
 }
 
 void module_arch_cleanup(struct module *mod)
 {
        alternatives_smp_module_del(mod);
-       module_bug_cleanup(mod);
 }
diff --git a/arch/x86/kernel/olpc-xo1.c b/arch/x86/kernel/olpc-xo1.c
new file mode 100644 (file)
index 0000000..f5442c0
--- /dev/null
@@ -0,0 +1,140 @@
+/*
+ * Support for features of the OLPC XO-1 laptop
+ *
+ * Copyright (C) 2010 One Laptop per Child
+ * Copyright (C) 2006 Red Hat, Inc.
+ * Copyright (C) 2006 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+
+#include <asm/io.h>
+#include <asm/olpc.h>
+
+#define DRV_NAME "olpc-xo1"
+
+#define PMS_BAR                4
+#define ACPI_BAR       5
+
+/* PMC registers (PMS block) */
+#define PM_SCLK                0x10
+#define PM_IN_SLPCTL   0x20
+#define PM_WKXD                0x34
+#define PM_WKD         0x30
+#define PM_SSC         0x54
+
+/* PM registers (ACPI block) */
+#define PM1_CNT                0x08
+#define PM_GPE0_STS    0x18
+
+static unsigned long acpi_base;
+static unsigned long pms_base;
+
+static void xo1_power_off(void)
+{
+       printk(KERN_INFO "OLPC XO-1 power off sequence...\n");
+
+       /* Enable all of these controls with 0 delay */
+       outl(0x40000000, pms_base + PM_SCLK);
+       outl(0x40000000, pms_base + PM_IN_SLPCTL);
+       outl(0x40000000, pms_base + PM_WKXD);
+       outl(0x40000000, pms_base + PM_WKD);
+
+       /* Clear status bits (possibly unnecessary) */
+       outl(0x0002ffff, pms_base  + PM_SSC);
+       outl(0xffffffff, acpi_base + PM_GPE0_STS);
+
+       /* Write SLP_EN bit to start the machinery */
+       outl(0x00002000, acpi_base + PM1_CNT);
+}
+
+/* Read the base addresses from the PCI BAR info */
+static int __devinit setup_bases(struct pci_dev *pdev)
+{
+       int r;
+
+       r = pci_enable_device_io(pdev);
+       if (r) {
+               dev_err(&pdev->dev, "can't enable device IO\n");
+               return r;
+       }
+
+       r = pci_request_region(pdev, ACPI_BAR, DRV_NAME);
+       if (r) {
+               dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", ACPI_BAR);
+               return r;
+       }
+
+       r = pci_request_region(pdev, PMS_BAR, DRV_NAME);
+       if (r) {
+               dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", PMS_BAR);
+               pci_release_region(pdev, ACPI_BAR);
+               return r;
+       }
+
+       acpi_base = pci_resource_start(pdev, ACPI_BAR);
+       pms_base = pci_resource_start(pdev, PMS_BAR);
+
+       return 0;
+}
+
+static int __devinit olpc_xo1_probe(struct platform_device *pdev)
+{
+       struct pci_dev *pcidev;
+       int r;
+
+       pcidev = pci_get_device(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA,
+                               NULL);
+       if (!pdev)
+               return -ENODEV;
+
+       r = setup_bases(pcidev);
+       if (r)
+               return r;
+
+       pm_power_off = xo1_power_off;
+
+       printk(KERN_INFO "OLPC XO-1 support registered\n");
+       return 0;
+}
+
+static int __devexit olpc_xo1_remove(struct platform_device *pdev)
+{
+       pm_power_off = NULL;
+       return 0;
+}
+
+static struct platform_driver olpc_xo1_driver = {
+       .driver = {
+               .name = DRV_NAME,
+               .owner = THIS_MODULE,
+       },
+       .probe = olpc_xo1_probe,
+       .remove = __devexit_p(olpc_xo1_remove),
+};
+
+static int __init olpc_xo1_init(void)
+{
+       return platform_driver_register(&olpc_xo1_driver);
+}
+
+static void __exit olpc_xo1_exit(void)
+{
+       platform_driver_unregister(&olpc_xo1_driver);
+}
+
+MODULE_AUTHOR("Daniel Drake <dsd@laptop.org>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:olpc-xo1");
+
+module_init(olpc_xo1_init);
+module_exit(olpc_xo1_exit);
index 0e0cdde519be93a3cda8bab4ba6d8637632171ef..edaf3fe8dc5e4b0326c0280042b868cd3400bf4c 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/spinlock.h>
 #include <linux/io.h>
 #include <linux/string.h>
+#include <linux/platform_device.h>
 
 #include <asm/geode.h>
 #include <asm/setup.h>
@@ -114,6 +115,7 @@ int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen,
        unsigned long flags;
        int ret = -EIO;
        int i;
+       int restarts = 0;
 
        spin_lock_irqsave(&ec_lock, flags);
 
@@ -169,7 +171,9 @@ restart:
                        if (wait_on_obf(0x6c, 1)) {
                                printk(KERN_ERR "olpc-ec:  timeout waiting for"
                                                " EC to provide data!\n");
-                               goto restart;
+                               if (restarts++ < 10)
+                                       goto restart;
+                               goto err;
                        }
                        outbuf[i] = inb(0x68);
                        pr_devel("olpc-ec:  received 0x%x\n", outbuf[i]);
@@ -183,8 +187,21 @@ err:
 }
 EXPORT_SYMBOL_GPL(olpc_ec_cmd);
 
-#ifdef CONFIG_OLPC_OPENFIRMWARE
-static void __init platform_detect(void)
+static bool __init check_ofw_architecture(void)
+{
+       size_t propsize;
+       char olpc_arch[5];
+       const void *args[] = { NULL, "architecture", olpc_arch, (void *)5 };
+       void *res[] = { &propsize };
+
+       if (olpc_ofw("getprop", args, res)) {
+               printk(KERN_ERR "ofw: getprop call failed!\n");
+               return false;
+       }
+       return propsize == 5 && strncmp("OLPC", olpc_arch, 5) == 0;
+}
+
+static u32 __init get_board_revision(void)
 {
        size_t propsize;
        __be32 rev;
@@ -193,45 +210,43 @@ static void __init platform_detect(void)
 
        if (olpc_ofw("getprop", args, res) || propsize != 4) {
                printk(KERN_ERR "ofw: getprop call failed!\n");
-               rev = cpu_to_be32(0);
+               return cpu_to_be32(0);
        }
-       olpc_platform_info.boardrev = be32_to_cpu(rev);
+       return be32_to_cpu(rev);
 }
-#else
-static void __init platform_detect(void)
+
+static bool __init platform_detect(void)
 {
-       /* stopgap until OFW support is added to the kernel */
-       olpc_platform_info.boardrev = olpc_board(0xc2);
+       if (!check_ofw_architecture())
+               return false;
+       olpc_platform_info.flags |= OLPC_F_PRESENT;
+       olpc_platform_info.boardrev = get_board_revision();
+       return true;
 }
-#endif
 
-static int __init olpc_init(void)
+static int __init add_xo1_platform_devices(void)
 {
-       unsigned char *romsig;
+       struct platform_device *pdev;
 
-       /* The ioremap check is dangerous; limit what we run it on */
-       if (!is_geode() || cs5535_has_vsa2())
-               return 0;
+       pdev = platform_device_register_simple("xo1-rfkill", -1, NULL, 0);
+       if (IS_ERR(pdev))
+               return PTR_ERR(pdev);
 
-       spin_lock_init(&ec_lock);
+       pdev = platform_device_register_simple("olpc-xo1", -1, NULL, 0);
+       if (IS_ERR(pdev))
+               return PTR_ERR(pdev);
 
-       romsig = ioremap(0xffffffc0, 16);
-       if (!romsig)
-               return 0;
+       return 0;
+}
 
-       if (strncmp(romsig, "CL1   Q", 7))
-               goto unmap;
-       if (strncmp(romsig+6, romsig+13, 3)) {
-               printk(KERN_INFO "OLPC BIOS signature looks invalid.  "
-                               "Assuming not OLPC\n");
-               goto unmap;
-       }
+static int __init olpc_init(void)
+{
+       int r = 0;
 
-       printk(KERN_INFO "OLPC board with OpenFirmware %.16s\n", romsig);
-       olpc_platform_info.flags |= OLPC_F_PRESENT;
+       if (!olpc_ofw_present() || !platform_detect())
+               return 0;
 
-       /* get the platform revision */
-       platform_detect();
+       spin_lock_init(&ec_lock);
 
        /* assume B1 and above models always have a DCON */
        if (olpc_board_at_least(olpc_board(0xb1)))
@@ -242,8 +257,10 @@ static int __init olpc_init(void)
                        (unsigned char *) &olpc_platform_info.ecver, 1);
 
 #ifdef CONFIG_PCI_OLPC
-       /* If the VSA exists let it emulate PCI, if not emulate in kernel */
-       if (!cs5535_has_vsa2())
+       /* If the VSA exists let it emulate PCI, if not emulate in kernel.
+        * XO-1 only. */
+       if (olpc_platform_info.boardrev < olpc_board_pre(0xd0) &&
+                       !cs5535_has_vsa2())
                x86_init.pci.arch_init = pci_olpc_init;
 #endif
 
@@ -252,8 +269,12 @@ static int __init olpc_init(void)
                        olpc_platform_info.boardrev >> 4,
                        olpc_platform_info.ecver);
 
-unmap:
-       iounmap(romsig);
+       if (olpc_platform_info.boardrev < olpc_board_pre(0xd0)) { /* XO-1 */
+               r = add_xo1_platform_devices();
+               if (r)
+                       return r;
+       }
+
        return 0;
 }
 
index 3218aa71ab5ebb7dd20a7d1dd94cfd88dcb3d755..787320464379f976626e22fb2d62a83730ccbf52 100644 (file)
@@ -74,6 +74,12 @@ int __olpc_ofw(const char *name, int nr_args, const void **args, int nr_res,
 }
 EXPORT_SYMBOL_GPL(__olpc_ofw);
 
+bool olpc_ofw_present(void)
+{
+       return olpc_ofw_cif != NULL;
+}
+EXPORT_SYMBOL_GPL(olpc_ofw_present);
+
 /* OFW cif _should_ be above this address */
 #define OFW_MIN 0xff000000
 
index 1db183ed7c01cb929cef9dc3fc6e1f1153841e59..c5b250011fd479aa8ecc8f8348ade0b096a7aa08 100644 (file)
@@ -413,7 +413,6 @@ struct pv_mmu_ops pv_mmu_ops = {
 
        .alloc_pte = paravirt_nop,
        .alloc_pmd = paravirt_nop,
-       .alloc_pmd_clone = paravirt_nop,
        .alloc_pud = paravirt_nop,
        .release_pte = paravirt_nop,
        .release_pmd = paravirt_nop,
index 0f7f130caa6778d6077868e6e768b88f1439fc24..c562207b1b3da8c6abcf1d85416e4c22f078a1be 100644 (file)
@@ -39,7 +39,7 @@
 #include <asm/cacheflush.h>
 #include <asm/swiotlb.h>
 #include <asm/dma.h>
-#include <asm/k8.h>
+#include <asm/amd_nb.h>
 #include <asm/x86_init.h>
 
 static unsigned long iommu_bus_base;   /* GART remapping area (physical) */
@@ -560,8 +560,11 @@ static void enable_gart_translations(void)
 {
        int i;
 
-       for (i = 0; i < num_k8_northbridges; i++) {
-               struct pci_dev *dev = k8_northbridges[i];
+       if (!k8_northbridges.gart_supported)
+               return;
+
+       for (i = 0; i < k8_northbridges.num; i++) {
+               struct pci_dev *dev = k8_northbridges.nb_misc[i];
 
                enable_gart_translation(dev, __pa(agp_gatt_table));
        }
@@ -592,16 +595,19 @@ static void gart_fixup_northbridges(struct sys_device *dev)
        if (!fix_up_north_bridges)
                return;
 
+       if (!k8_northbridges.gart_supported)
+               return;
+
        pr_info("PCI-DMA: Restoring GART aperture settings\n");
 
-       for (i = 0; i < num_k8_northbridges; i++) {
-               struct pci_dev *dev = k8_northbridges[i];
+       for (i = 0; i < k8_northbridges.num; i++) {
+               struct pci_dev *dev = k8_northbridges.nb_misc[i];
 
                /*
                 * Don't enable translations just yet.  That is the next
                 * step.  Restore the pre-suspend aperture settings.
                 */
-               pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, aperture_order << 1);
+               gart_set_size_and_enable(dev, aperture_order);
                pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25);
        }
 }
@@ -649,8 +655,8 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
 
        aper_size = aper_base = info->aper_size = 0;
        dev = NULL;
-       for (i = 0; i < num_k8_northbridges; i++) {
-               dev = k8_northbridges[i];
+       for (i = 0; i < k8_northbridges.num; i++) {
+               dev = k8_northbridges.nb_misc[i];
                new_aper_base = read_aperture(dev, &new_aper_size);
                if (!new_aper_base)
                        goto nommu;
@@ -718,10 +724,13 @@ static void gart_iommu_shutdown(void)
        if (!no_agp)
                return;
 
-       for (i = 0; i < num_k8_northbridges; i++) {
+       if (!k8_northbridges.gart_supported)
+               return;
+
+       for (i = 0; i < k8_northbridges.num; i++) {
                u32 ctl;
 
-               dev = k8_northbridges[i];
+               dev = k8_northbridges.nb_misc[i];
                pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
 
                ctl &= ~GARTEN;
@@ -739,7 +748,7 @@ int __init gart_iommu_init(void)
        unsigned long scratch;
        long i;
 
-       if (num_k8_northbridges == 0)
+       if (!k8_northbridges.gart_supported)
                return 0;
 
 #ifndef CONFIG_AGP_AMD64
diff --git a/arch/x86/kernel/pmtimer_64.c b/arch/x86/kernel/pmtimer_64.c
deleted file mode 100644 (file)
index b112406..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-/* Ported over from i386 by AK, original copyright was:
- *
- * (C) Dominik Brodowski <linux@brodo.de> 2003
- *
- * Driver to use the Power Management Timer (PMTMR) available in some
- * southbridges as primary timing source for the Linux kernel.
- *
- * Based on parts of linux/drivers/acpi/hardware/hwtimer.c, timer_pit.c,
- * timer_hpet.c, and on Arjan van de Ven's implementation for 2.4.
- *
- * This file is licensed under the GPL v2.
- *
- * Dropped all the hardware bug workarounds for now. Hopefully they
- * are not needed on 64bit chipsets.
- */
-
-#include <linux/jiffies.h>
-#include <linux/kernel.h>
-#include <linux/time.h>
-#include <linux/init.h>
-#include <linux/cpumask.h>
-#include <linux/acpi_pmtmr.h>
-
-#include <asm/io.h>
-#include <asm/proto.h>
-#include <asm/msr.h>
-#include <asm/vsyscall.h>
-
-static inline u32 cyc2us(u32 cycles)
-{
-       /* The Power Management Timer ticks at 3.579545 ticks per microsecond.
-        * 1 / PM_TIMER_FREQUENCY == 0.27936511 =~ 286/1024 [error: 0.024%]
-        *
-        * Even with HZ = 100, delta is at maximum 35796 ticks, so it can
-        * easily be multiplied with 286 (=0x11E) without having to fear
-        * u32 overflows.
-        */
-       cycles *= 286;
-       return (cycles >> 10);
-}
-
-static unsigned pmtimer_wait_tick(void)
-{
-       u32 a, b;
-       for (a = b = inl(pmtmr_ioport) & ACPI_PM_MASK;
-            a == b;
-            b = inl(pmtmr_ioport) & ACPI_PM_MASK)
-               cpu_relax();
-       return b;
-}
-
-/* note: wait time is rounded up to one tick */
-void pmtimer_wait(unsigned us)
-{
-       u32 a, b;
-       a = pmtimer_wait_tick();
-       do {
-               b = inl(pmtmr_ioport);
-               cpu_relax();
-       } while (cyc2us(b - a) < us);
-}
-
-static int __init nopmtimer_setup(char *s)
-{
-       pmtmr_ioport = 0;
-       return 1;
-}
-
-__setup("nopmtimer", nopmtimer_setup);
index 3d9ea531ddd1bfa8cc9e2e28fbfa8a22f461bc20..b3d7a3a04f389d9626837a1e776c5106e821f7dc 100644 (file)
@@ -424,7 +424,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        load_TLS(next, cpu);
 
        /* Must be after DS reload */
-       unlazy_fpu(prev_p);
+       __unlazy_fpu(prev_p);
 
        /* Make sure cpu is ready for new context */
        if (preload_fpu)
index e3af342fe83ae7a57b8f6db6de4fb9541c4ab15e..7a4cf14223ba3288384909657d49edff7371623e 100644 (file)
@@ -84,7 +84,7 @@ static int __init reboot_setup(char *str)
                        }
                                /* we will leave sorting out the final value
                                   when we are ready to reboot, since we might not
-                                  have set up boot_cpu_id or smp_num_cpu */
+                                  have detected BSP APIC ID or smp_num_cpu */
                        break;
 #endif /* CONFIG_SMP */
 
index c3a4fbb2b996d00277d6523cb76b74e2c5944621..a59f6a6df5e25d104651bbeaf7202b8063f205bd 100644 (file)
@@ -83,7 +83,6 @@
 #include <asm/dmi.h>
 #include <asm/io_apic.h>
 #include <asm/ist.h>
-#include <asm/vmi.h>
 #include <asm/setup_arch.h>
 #include <asm/bios_ebda.h>
 #include <asm/cacheflush.h>
 #include <asm/percpu.h>
 #include <asm/topology.h>
 #include <asm/apicdef.h>
-#include <asm/k8.h>
+#include <asm/amd_nb.h>
 #ifdef CONFIG_X86_64
 #include <asm/numa_64.h>
 #endif
 #include <asm/mce.h>
+#include <asm/alternative.h>
 
 /*
  * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries.
@@ -125,7 +125,6 @@ unsigned long max_pfn_mapped;
 RESERVE_BRK(dmi_alloc, 65536);
 #endif
 
-unsigned int boot_cpu_id __read_mostly;
 
 static __initdata unsigned long _brk_start = (unsigned long)__brk_base;
 unsigned long _brk_end = (unsigned long)__brk_base;
@@ -618,79 +617,7 @@ static __init void reserve_ibft_region(void)
                reserve_early_overlap_ok(addr, addr + size, "ibft");
 }
 
-#ifdef CONFIG_X86_RESERVE_LOW_64K
-static int __init dmi_low_memory_corruption(const struct dmi_system_id *d)
-{
-       printk(KERN_NOTICE
-               "%s detected: BIOS may corrupt low RAM, working around it.\n",
-               d->ident);
-
-       e820_update_range(0, 0x10000, E820_RAM, E820_RESERVED);
-       sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
-
-       return 0;
-}
-#endif
-
-/* List of systems that have known low memory corruption BIOS problems */
-static struct dmi_system_id __initdata bad_bios_dmi_table[] = {
-#ifdef CONFIG_X86_RESERVE_LOW_64K
-       {
-               .callback = dmi_low_memory_corruption,
-               .ident = "AMI BIOS",
-               .matches = {
-                       DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
-               },
-       },
-       {
-               .callback = dmi_low_memory_corruption,
-               .ident = "Phoenix BIOS",
-               .matches = {
-                       DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies"),
-               },
-       },
-       {
-               .callback = dmi_low_memory_corruption,
-               .ident = "Phoenix/MSC BIOS",
-               .matches = {
-                       DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix/MSC"),
-               },
-       },
-       /*
-        * AMI BIOS with low memory corruption was found on Intel DG45ID and
-        * DG45FC boards.
-        * It has a different DMI_BIOS_VENDOR = "Intel Corp.", for now we will
-        * match only DMI_BOARD_NAME and see if there is more bad products
-        * with this vendor.
-        */
-       {
-               .callback = dmi_low_memory_corruption,
-               .ident = "AMI BIOS",
-               .matches = {
-                       DMI_MATCH(DMI_BOARD_NAME, "DG45ID"),
-               },
-       },
-       {
-               .callback = dmi_low_memory_corruption,
-               .ident = "AMI BIOS",
-               .matches = {
-                       DMI_MATCH(DMI_BOARD_NAME, "DG45FC"),
-               },
-       },
-       /*
-        * The Dell Inspiron Mini 1012 has DMI_BIOS_VENDOR = "Dell Inc.", so
-        * match on the product name.
-        */
-       {
-               .callback = dmi_low_memory_corruption,
-               .ident = "Phoenix BIOS",
-               .matches = {
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
-               },
-       },
-#endif
-       {}
-};
+static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
 
 static void __init trim_bios_range(void)
 {
@@ -698,8 +625,14 @@ static void __init trim_bios_range(void)
         * A special case is the first 4Kb of memory;
         * This is a BIOS owned area, not kernel ram, but generally
         * not listed as such in the E820 table.
+        *
+        * This typically reserves additional memory (64KiB by default)
+        * since some BIOSes are known to corrupt low memory.  See the
+        * Kconfig help text for X86_RESERVE_LOW.
         */
-       e820_update_range(0, PAGE_SIZE, E820_RAM, E820_RESERVED);
+       e820_update_range(0, ALIGN(reserve_low, PAGE_SIZE),
+                         E820_RAM, E820_RESERVED);
+
        /*
         * special case: Some BIOSen report the PC BIOS
         * area (640->1Mb) as ram even though it is not.
@@ -709,6 +642,28 @@ static void __init trim_bios_range(void)
        sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
 }
 
+static int __init parse_reservelow(char *p)
+{
+       unsigned long long size;
+
+       if (!p)
+               return -EINVAL;
+
+       size = memparse(p, &p);
+
+       if (size < 4096)
+               size = 4096;
+
+       if (size > 640*1024)
+               size = 640*1024;
+
+       reserve_low = size;
+
+       return 0;
+}
+
+early_param("reservelow", parse_reservelow);
+
 /*
  * Determine if we were loaded by an EFI loader.  If so, then we have also been
  * passed the efi memmap, systab, etc., so we should use these data structures
@@ -726,6 +681,7 @@ void __init setup_arch(char **cmdline_p)
 {
        int acpi = 0;
        int k8 = 0;
+       unsigned long flags;
 
 #ifdef CONFIG_X86_32
        memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
@@ -734,10 +690,10 @@ void __init setup_arch(char **cmdline_p)
        printk(KERN_INFO "Command line: %s\n", boot_command_line);
 #endif
 
-       /* VMI may relocate the fixmap; do this before touching ioremap area */
-       vmi_init();
-
-       /* OFW also may relocate the fixmap */
+       /*
+        * If we have OLPC OFW, we might end up relocating the fixmap due to
+        * reserve_top(), so do this before touching the ioremap area.
+        */
        olpc_ofw_detect();
 
        early_trap_init();
@@ -838,9 +794,6 @@ void __init setup_arch(char **cmdline_p)
 
        x86_report_nx();
 
-       /* Must be before kernel pagetables are setup */
-       vmi_activate();
-
        /* after early param, so could get panic from serial */
        reserve_early_setup_data();
 
@@ -863,8 +816,6 @@ void __init setup_arch(char **cmdline_p)
 
        dmi_scan_machine();
 
-       dmi_check_system(bad_bios_dmi_table);
-
        /*
         * VMware detection requires dmi to be available, so this
         * needs to be done after dmi_scan_machine, for the BP.
@@ -1071,6 +1022,10 @@ void __init setup_arch(char **cmdline_p)
        x86_init.oem.banner();
 
        mcheck_init();
+
+       local_irq_save(flags);
+       arch_init_ideal_nop5();
+       local_irq_restore(flags);
 }
 
 #ifdef CONFIG_X86_32
index a60df9ae645440789181acd318e12bd4c450aeff..2335c15c93a4c514a0ea7d42e08b7874c1cd28d9 100644 (file)
@@ -253,7 +253,7 @@ void __init setup_per_cpu_areas(void)
                 * Up to this point, the boot CPU has been using .init.data
                 * area.  Reload any changed state for the boot CPU.
                 */
-               if (cpu == boot_cpu_id)
+               if (!cpu)
                        switch_to_new_gdt(cpu);
        }
 
index cb22acf3ed099de3a256f2f482ae1cdcd5338320..dd4c281ffe5720c3ff15f1eceaa09759e17df7d1 100644 (file)
@@ -34,7 +34,7 @@
 #ifdef CONFIG_X86_LOCAL_APIC
 static unsigned long sfi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
 
-void __init mp_sfi_register_lapic_address(unsigned long address)
+static void __init mp_sfi_register_lapic_address(unsigned long address)
 {
        mp_lapic_addr = address;
 
@@ -46,7 +46,7 @@ void __init mp_sfi_register_lapic_address(unsigned long address)
 }
 
 /* All CPUs enumerated by SFI must be present and enabled */
-void __cpuinit mp_sfi_register_lapic(u8 id)
+static void __cpuinit mp_sfi_register_lapic(u8 id)
 {
        if (MAX_APICS - id <= 0) {
                pr_warning("Processor #%d invalid (max %d)\n",
index 8b3bfc4dd70872680ff4b451a8b03903bd68727b..2ced73ba048c2af73a242952739bb7b26ea4ff55 100644 (file)
@@ -62,7 +62,7 @@
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 #include <asm/mtrr.h>
-#include <asm/vmi.h>
+#include <asm/mwait.h>
 #include <asm/apic.h>
 #include <asm/setup.h>
 #include <asm/uv/uv.h>
@@ -311,7 +311,6 @@ notrace static void __cpuinit start_secondary(void *unused)
        __flush_tlb_all();
 #endif
 
-       vmi_bringup();
        cpu_init();
        preempt_disable();
        smp_callin();
@@ -397,6 +396,19 @@ void __cpuinit smp_store_cpu_info(int id)
                identify_secondary_cpu(c);
 }
 
+static void __cpuinit link_thread_siblings(int cpu1, int cpu2)
+{
+       struct cpuinfo_x86 *c1 = &cpu_data(cpu1);
+       struct cpuinfo_x86 *c2 = &cpu_data(cpu2);
+
+       cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2));
+       cpumask_set_cpu(cpu2, cpu_sibling_mask(cpu1));
+       cpumask_set_cpu(cpu1, cpu_core_mask(cpu2));
+       cpumask_set_cpu(cpu2, cpu_core_mask(cpu1));
+       cpumask_set_cpu(cpu1, c2->llc_shared_map);
+       cpumask_set_cpu(cpu2, c1->llc_shared_map);
+}
+
 
 void __cpuinit set_cpu_sibling_map(int cpu)
 {
@@ -409,14 +421,13 @@ void __cpuinit set_cpu_sibling_map(int cpu)
                for_each_cpu(i, cpu_sibling_setup_mask) {
                        struct cpuinfo_x86 *o = &cpu_data(i);
 
-                       if (c->phys_proc_id == o->phys_proc_id &&
-                           c->cpu_core_id == o->cpu_core_id) {
-                               cpumask_set_cpu(i, cpu_sibling_mask(cpu));
-                               cpumask_set_cpu(cpu, cpu_sibling_mask(i));
-                               cpumask_set_cpu(i, cpu_core_mask(cpu));
-                               cpumask_set_cpu(cpu, cpu_core_mask(i));
-                               cpumask_set_cpu(i, c->llc_shared_map);
-                               cpumask_set_cpu(cpu, o->llc_shared_map);
+                       if (cpu_has(c, X86_FEATURE_TOPOEXT)) {
+                               if (c->phys_proc_id == o->phys_proc_id &&
+                                   c->compute_unit_id == o->compute_unit_id)
+                                       link_thread_siblings(cpu, i);
+                       } else if (c->phys_proc_id == o->phys_proc_id &&
+                                  c->cpu_core_id == o->cpu_core_id) {
+                               link_thread_siblings(cpu, i);
                        }
                }
        } else {
@@ -1109,8 +1120,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
        }
        set_cpu_sibling_map(0);
 
-       enable_IR_x2apic();
-       default_setup_apic_routing();
 
        if (smp_sanity_check(max_cpus) < 0) {
                printk(KERN_INFO "SMP disabled\n");
@@ -1118,6 +1127,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
                goto out;
        }
 
+       default_setup_apic_routing();
+
        preempt_disable();
        if (read_apic_id() != boot_cpu_physical_apicid) {
                panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
@@ -1383,11 +1394,88 @@ void play_dead_common(void)
        local_irq_disable();
 }
 
+/*
+ * We need to flush the caches before going to sleep, lest we have
+ * dirty data in our caches when we come back up.
+ */
+static inline void mwait_play_dead(void)
+{
+       unsigned int eax, ebx, ecx, edx;
+       unsigned int highest_cstate = 0;
+       unsigned int highest_subcstate = 0;
+       int i;
+       void *mwait_ptr;
+
+       if (!cpu_has(&current_cpu_data, X86_FEATURE_MWAIT))
+               return;
+       if (!cpu_has(&current_cpu_data, X86_FEATURE_CLFLSH))
+               return;
+       if (current_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
+               return;
+
+       eax = CPUID_MWAIT_LEAF;
+       ecx = 0;
+       native_cpuid(&eax, &ebx, &ecx, &edx);
+
+       /*
+        * eax will be 0 if EDX enumeration is not valid.
+        * Initialized below to cstate, sub_cstate value when EDX is valid.
+        */
+       if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) {
+               eax = 0;
+       } else {
+               edx >>= MWAIT_SUBSTATE_SIZE;
+               for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
+                       if (edx & MWAIT_SUBSTATE_MASK) {
+                               highest_cstate = i;
+                               highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
+                       }
+               }
+               eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
+                       (highest_subcstate - 1);
+       }
+
+       /*
+        * This should be a memory location in a cache line which is
+        * unlikely to be touched by other processors.  The actual
+        * content is immaterial as it is not actually modified in any way.
+        */
+       mwait_ptr = &current_thread_info()->flags;
+
+       wbinvd();
+
+       while (1) {
+               /*
+                * The CLFLUSH is a workaround for erratum AAI65 for
+                * the Xeon 7400 series.  It's not clear it is actually
+                * needed, but it should be harmless in either case.
+                * The WBINVD is insufficient due to the spurious-wakeup
+                * case where we return around the loop.
+                */
+               clflush(mwait_ptr);
+               __monitor(mwait_ptr, 0, 0);
+               mb();
+               __mwait(eax, 0);
+       }
+}
+
+static inline void hlt_play_dead(void)
+{
+       if (current_cpu_data.x86 >= 4)
+               wbinvd();
+
+       while (1) {
+               native_halt();
+       }
+}
+
 void native_play_dead(void)
 {
        play_dead_common();
        tboot_shutdown(TB_SHUTDOWN_WFS);
-       wbinvd_halt();
+
+       mwait_play_dead();      /* Only returns on failure */
+       hlt_play_dead();
 }
 
 #else /* ... !CONFIG_HOTPLUG_CPU */
index d5e06624e34a556552585690b4532509fe21552f..0b0cb5fede1993d8dc45b54ffcf27ff8732c3082 100644 (file)
@@ -33,8 +33,8 @@ int kernel_execve(const char *filename,
                  const char *const envp[])
 {
        long __res;
-       asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
+       asm volatile ("int $0x80"
        : "=a" (__res)
-       : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
+       : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
        return __res;
 }
index a874495b3673baeb27467d144995d885f2f94ebc..e2a5952573905b2eeac3d18060be54532dbfdfde 100644 (file)
@@ -45,8 +45,7 @@ void __init setup_trampoline_page_table(void)
        /* Copy kernel address range */
        clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY,
                        swapper_pg_dir + KERNEL_PGD_BOUNDARY,
-                       min_t(unsigned long, KERNEL_PGD_PTRS,
-                             KERNEL_PGD_BOUNDARY));
+                       KERNEL_PGD_PTRS);
 
        /* Initialize low mappings */
        clone_pgd_range(trampoline_pg_dir,
index 60788dee0f8a74f53d547c462d5694201b6d609d..d43968503dd22fc67a35570fcf0b92b337637c09 100644 (file)
@@ -776,21 +776,10 @@ asmlinkage void math_state_restore(void)
 }
 EXPORT_SYMBOL_GPL(math_state_restore);
 
-#ifndef CONFIG_MATH_EMULATION
-void math_emulate(struct math_emu_info *info)
-{
-       printk(KERN_EMERG
-               "math-emulation not enabled and no coprocessor found.\n");
-       printk(KERN_EMERG "killing %s.\n", current->comm);
-       force_sig(SIGFPE, current);
-       schedule();
-}
-#endif /* CONFIG_MATH_EMULATION */
-
 dotraplinkage void __kprobes
 do_device_not_available(struct pt_regs *regs, long error_code)
 {
-#ifdef CONFIG_X86_32
+#ifdef CONFIG_MATH_EMULATION
        if (read_cr0() & X86_CR0_EM) {
                struct math_emu_info info = { };
 
@@ -798,12 +787,12 @@ do_device_not_available(struct pt_regs *regs, long error_code)
 
                info.regs = regs;
                math_emulate(&info);
-       } else {
-               math_state_restore(); /* interrupts still off */
-               conditional_sti(regs);
+               return;
        }
-#else
-       math_state_restore();
+#endif
+       math_state_restore(); /* interrupts still off */
+#ifdef CONFIG_X86_32
+       conditional_sti(regs);
 #endif
 }
 
@@ -881,18 +870,6 @@ void __init trap_init(void)
 #endif
 
 #ifdef CONFIG_X86_32
-       if (cpu_has_fxsr) {
-               printk(KERN_INFO "Enabling fast FPU save and restore... ");
-               set_in_cr4(X86_CR4_OSFXSR);
-               printk("done.\n");
-       }
-       if (cpu_has_xmm) {
-               printk(KERN_INFO
-                       "Enabling unmasked SIMD FPU exception support... ");
-               set_in_cr4(X86_CR4_OSXMMEXCPT);
-               printk("done.\n");
-       }
-
        set_system_trap_gate(SYSCALL_VECTOR, &system_call);
        set_bit(SYSCALL_VECTOR, used_vectors);
 #endif
index d632934cb6386947352650f262745eb3c93c68ce..0c40d8b72416ba2ef7e86bfd812b7bf6f1db2f8f 100644 (file)
@@ -104,10 +104,14 @@ int __init notsc_setup(char *str)
 
 __setup("notsc", notsc_setup);
 
+static int no_sched_irq_time;
+
 static int __init tsc_setup(char *str)
 {
        if (!strcmp(str, "reliable"))
                tsc_clocksource_reliable = 1;
+       if (!strncmp(str, "noirqtime", 9))
+               no_sched_irq_time = 1;
        return 1;
 }
 
@@ -655,7 +659,7 @@ void restore_sched_clock_state(void)
 
        local_irq_save(flags);
 
-       get_cpu_var(cyc2ns_offset) = 0;
+       __get_cpu_var(cyc2ns_offset) = 0;
        offset = cyc2ns_suspend - sched_clock();
 
        for_each_possible_cpu(cpu)
@@ -801,6 +805,7 @@ void mark_tsc_unstable(char *reason)
        if (!tsc_unstable) {
                tsc_unstable = 1;
                sched_clock_stable = 0;
+               disable_sched_clock_irqtime();
                printk(KERN_INFO "Marking TSC unstable due to %s\n", reason);
                /* Change only the rating, when not registered */
                if (clocksource_tsc.mult)
@@ -892,60 +897,6 @@ static void __init init_tsc_clocksource(void)
        clocksource_register_khz(&clocksource_tsc, tsc_khz);
 }
 
-#ifdef CONFIG_X86_64
-/*
- * calibrate_cpu is used on systems with fixed rate TSCs to determine
- * processor frequency
- */
-#define TICK_COUNT 100000000
-static unsigned long __init calibrate_cpu(void)
-{
-       int tsc_start, tsc_now;
-       int i, no_ctr_free;
-       unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
-       unsigned long flags;
-
-       for (i = 0; i < 4; i++)
-               if (avail_to_resrv_perfctr_nmi_bit(i))
-                       break;
-       no_ctr_free = (i == 4);
-       if (no_ctr_free) {
-               WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... "
-                    "cpu_khz value may be incorrect.\n");
-               i = 3;
-               rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
-               wrmsrl(MSR_K7_EVNTSEL3, 0);
-               rdmsrl(MSR_K7_PERFCTR3, pmc3);
-       } else {
-               reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
-               reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
-       }
-       local_irq_save(flags);
-       /* start measuring cycles, incrementing from 0 */
-       wrmsrl(MSR_K7_PERFCTR0 + i, 0);
-       wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
-       rdtscl(tsc_start);
-       do {
-               rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
-               tsc_now = get_cycles();
-       } while ((tsc_now - tsc_start) < TICK_COUNT);
-
-       local_irq_restore(flags);
-       if (no_ctr_free) {
-               wrmsrl(MSR_K7_EVNTSEL3, 0);
-               wrmsrl(MSR_K7_PERFCTR3, pmc3);
-               wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
-       } else {
-               release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
-               release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
-       }
-
-       return pmc_now * tsc_khz / (tsc_now - tsc_start);
-}
-#else
-static inline unsigned long calibrate_cpu(void) { return cpu_khz; }
-#endif
-
 void __init tsc_init(void)
 {
        u64 lpj;
@@ -964,10 +915,6 @@ void __init tsc_init(void)
                return;
        }
 
-       if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) &&
-                       (boot_cpu_data.x86_vendor == X86_VENDOR_AMD))
-               cpu_khz = calibrate_cpu();
-
        printk("Detected %lu.%03lu MHz processor.\n",
                        (unsigned long)cpu_khz / 1000,
                        (unsigned long)cpu_khz % 1000);
@@ -987,6 +934,9 @@ void __init tsc_init(void)
        /* now allow native_sched_clock() to use rdtsc */
        tsc_disabled = 0;
 
+       if (!no_sched_irq_time)
+               enable_sched_clock_irqtime();
+
        lpj = ((u64)tsc_khz * 1000);
        do_div(lpj, HZ);
        lpj_fine = lpj;
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
deleted file mode 100644 (file)
index ce9fbac..0000000
+++ /dev/null
@@ -1,893 +0,0 @@
-/*
- * VMI specific paravirt-ops implementation
- *
- * Copyright (C) 2005, VMware, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT.  See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Send feedback to zach@vmware.com
- *
- */
-
-#include <linux/module.h>
-#include <linux/cpu.h>
-#include <linux/bootmem.h>
-#include <linux/mm.h>
-#include <linux/highmem.h>
-#include <linux/sched.h>
-#include <linux/gfp.h>
-#include <asm/vmi.h>
-#include <asm/io.h>
-#include <asm/fixmap.h>
-#include <asm/apicdef.h>
-#include <asm/apic.h>
-#include <asm/pgalloc.h>
-#include <asm/processor.h>
-#include <asm/timer.h>
-#include <asm/vmi_time.h>
-#include <asm/kmap_types.h>
-#include <asm/setup.h>
-
-/* Convenient for calling VMI functions indirectly in the ROM */
-typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
-typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
-
-#define call_vrom_func(rom,func) \
-   (((VROMFUNC *)(rom->func))())
-
-#define call_vrom_long_func(rom,func,arg) \
-   (((VROMLONGFUNC *)(rom->func)) (arg))
-
-static struct vrom_header *vmi_rom;
-static int disable_pge;
-static int disable_pse;
-static int disable_sep;
-static int disable_tsc;
-static int disable_mtrr;
-static int disable_noidle;
-static int disable_vmi_timer;
-
-/* Cached VMI operations */
-static struct {
-       void (*cpuid)(void /* non-c */);
-       void (*_set_ldt)(u32 selector);
-       void (*set_tr)(u32 selector);
-       void (*write_idt_entry)(struct desc_struct *, int, u32, u32);
-       void (*write_gdt_entry)(struct desc_struct *, int, u32, u32);
-       void (*write_ldt_entry)(struct desc_struct *, int, u32, u32);
-       void (*set_kernel_stack)(u32 selector, u32 sp0);
-       void (*allocate_page)(u32, u32, u32, u32, u32);
-       void (*release_page)(u32, u32);
-       void (*set_pte)(pte_t, pte_t *, unsigned);
-       void (*update_pte)(pte_t *, unsigned);
-       void (*set_linear_mapping)(int, void *, u32, u32);
-       void (*_flush_tlb)(int);
-       void (*set_initial_ap_state)(int, int);
-       void (*halt)(void);
-       void (*set_lazy_mode)(int mode);
-} vmi_ops;
-
-/* Cached VMI operations */
-struct vmi_timer_ops vmi_timer_ops;
-
-/*
- * VMI patching routines.
- */
-#define MNEM_CALL 0xe8
-#define MNEM_JMP  0xe9
-#define MNEM_RET  0xc3
-
-#define IRQ_PATCH_INT_MASK 0
-#define IRQ_PATCH_DISABLE  5
-
-static inline void patch_offset(void *insnbuf,
-                               unsigned long ip, unsigned long dest)
-{
-        *(unsigned long *)(insnbuf+1) = dest-ip-5;
-}
-
-static unsigned patch_internal(int call, unsigned len, void *insnbuf,
-                              unsigned long ip)
-{
-       u64 reloc;
-       struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
-       reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
-       switch(rel->type) {
-               case VMI_RELOCATION_CALL_REL:
-                       BUG_ON(len < 5);
-                       *(char *)insnbuf = MNEM_CALL;
-                       patch_offset(insnbuf, ip, (unsigned long)rel->eip);
-                       return 5;
-
-               case VMI_RELOCATION_JUMP_REL:
-                       BUG_ON(len < 5);
-                       *(char *)insnbuf = MNEM_JMP;
-                       patch_offset(insnbuf, ip, (unsigned long)rel->eip);
-                       return 5;
-
-               case VMI_RELOCATION_NOP:
-                       /* obliterate the whole thing */
-                       return 0;
-
-               case VMI_RELOCATION_NONE:
-                       /* leave native code in place */
-                       break;
-
-               default:
-                       BUG();
-       }
-       return len;
-}
-
-/*
- * Apply patch if appropriate, return length of new instruction
- * sequence.  The callee does nop padding for us.
- */
-static unsigned vmi_patch(u8 type, u16 clobbers, void *insns,
-                         unsigned long ip, unsigned len)
-{
-       switch (type) {
-               case PARAVIRT_PATCH(pv_irq_ops.irq_disable):
-                       return patch_internal(VMI_CALL_DisableInterrupts, len,
-                                             insns, ip);
-               case PARAVIRT_PATCH(pv_irq_ops.irq_enable):
-                       return patch_internal(VMI_CALL_EnableInterrupts, len,
-                                             insns, ip);
-               case PARAVIRT_PATCH(pv_irq_ops.restore_fl):
-                       return patch_internal(VMI_CALL_SetInterruptMask, len,
-                                             insns, ip);
-               case PARAVIRT_PATCH(pv_irq_ops.save_fl):
-                       return patch_internal(VMI_CALL_GetInterruptMask, len,
-                                             insns, ip);
-               case PARAVIRT_PATCH(pv_cpu_ops.iret):
-                       return patch_internal(VMI_CALL_IRET, len, insns, ip);
-               case PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit):
-                       return patch_internal(VMI_CALL_SYSEXIT, len, insns, ip);
-               default:
-                       break;
-       }
-       return len;
-}
-
-/* CPUID has non-C semantics, and paravirt-ops API doesn't match hardware ISA */
-static void vmi_cpuid(unsigned int *ax, unsigned int *bx,
-                               unsigned int *cx, unsigned int *dx)
-{
-       int override = 0;
-       if (*ax == 1)
-               override = 1;
-        asm volatile ("call *%6"
-                      : "=a" (*ax),
-                        "=b" (*bx),
-                        "=c" (*cx),
-                        "=d" (*dx)
-                      : "0" (*ax), "2" (*cx), "r" (vmi_ops.cpuid));
-       if (override) {
-               if (disable_pse)
-                       *dx &= ~X86_FEATURE_PSE;
-               if (disable_pge)
-                       *dx &= ~X86_FEATURE_PGE;
-               if (disable_sep)
-                       *dx &= ~X86_FEATURE_SEP;
-               if (disable_tsc)
-                       *dx &= ~X86_FEATURE_TSC;
-               if (disable_mtrr)
-                       *dx &= ~X86_FEATURE_MTRR;
-       }
-}
-
-static inline void vmi_maybe_load_tls(struct desc_struct *gdt, int nr, struct desc_struct *new)
-{
-       if (gdt[nr].a != new->a || gdt[nr].b != new->b)
-               write_gdt_entry(gdt, nr, new, 0);
-}
-
-static void vmi_load_tls(struct thread_struct *t, unsigned int cpu)
-{
-       struct desc_struct *gdt = get_cpu_gdt_table(cpu);
-       vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 0, &t->tls_array[0]);
-       vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 1, &t->tls_array[1]);
-       vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 2, &t->tls_array[2]);
-}
-
-static void vmi_set_ldt(const void *addr, unsigned entries)
-{
-       unsigned cpu = smp_processor_id();
-       struct desc_struct desc;
-
-       pack_descriptor(&desc, (unsigned long)addr,
-                       entries * sizeof(struct desc_struct) - 1,
-                       DESC_LDT, 0);
-       write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, &desc, DESC_LDT);
-       vmi_ops._set_ldt(entries ? GDT_ENTRY_LDT*sizeof(struct desc_struct) : 0);
-}
-
-static void vmi_set_tr(void)
-{
-       vmi_ops.set_tr(GDT_ENTRY_TSS*sizeof(struct desc_struct));
-}
-
-static void vmi_write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
-{
-       u32 *idt_entry = (u32 *)g;
-       vmi_ops.write_idt_entry(dt, entry, idt_entry[0], idt_entry[1]);
-}
-
-static void vmi_write_gdt_entry(struct desc_struct *dt, int entry,
-                               const void *desc, int type)
-{
-       u32 *gdt_entry = (u32 *)desc;
-       vmi_ops.write_gdt_entry(dt, entry, gdt_entry[0], gdt_entry[1]);
-}
-
-static void vmi_write_ldt_entry(struct desc_struct *dt, int entry,
-                               const void *desc)
-{
-       u32 *ldt_entry = (u32 *)desc;
-       vmi_ops.write_ldt_entry(dt, entry, ldt_entry[0], ldt_entry[1]);
-}
-
-static void vmi_load_sp0(struct tss_struct *tss,
-                                  struct thread_struct *thread)
-{
-       tss->x86_tss.sp0 = thread->sp0;
-
-       /* This can only happen when SEP is enabled, no need to test "SEP"arately */
-       if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
-               tss->x86_tss.ss1 = thread->sysenter_cs;
-               wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
-       }
-       vmi_ops.set_kernel_stack(__KERNEL_DS, tss->x86_tss.sp0);
-}
-
-static void vmi_flush_tlb_user(void)
-{
-       vmi_ops._flush_tlb(VMI_FLUSH_TLB);
-}
-
-static void vmi_flush_tlb_kernel(void)
-{
-       vmi_ops._flush_tlb(VMI_FLUSH_TLB | VMI_FLUSH_GLOBAL);
-}
-
-/* Stub to do nothing at all; used for delays and unimplemented calls */
-static void vmi_nop(void)
-{
-}
-
-static void vmi_allocate_pte(struct mm_struct *mm, unsigned long pfn)
-{
-       vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
-}
-
-static void vmi_allocate_pmd(struct mm_struct *mm, unsigned long pfn)
-{
-       /*
-        * This call comes in very early, before mem_map is setup.
-        * It is called only for swapper_pg_dir, which already has
-        * data on it.
-        */
-       vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0);
-}
-
-static void vmi_allocate_pmd_clone(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count)
-{
-       vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count);
-}
-
-static void vmi_release_pte(unsigned long pfn)
-{
-       vmi_ops.release_page(pfn, VMI_PAGE_L1);
-}
-
-static void vmi_release_pmd(unsigned long pfn)
-{
-       vmi_ops.release_page(pfn, VMI_PAGE_L2);
-}
-
-/*
- * We use the pgd_free hook for releasing the pgd page:
- */
-static void vmi_pgd_free(struct mm_struct *mm, pgd_t *pgd)
-{
-       unsigned long pfn = __pa(pgd) >> PAGE_SHIFT;
-
-       vmi_ops.release_page(pfn, VMI_PAGE_L2);
-}
-
-/*
- * Helper macros for MMU update flags.  We can defer updates until a flush
- * or page invalidation only if the update is to the current address space
- * (otherwise, there is no flush).  We must check against init_mm, since
- * this could be a kernel update, which usually passes init_mm, although
- * sometimes this check can be skipped if we know the particular function
- * is only called on user mode PTEs.  We could change the kernel to pass
- * current->active_mm here, but in particular, I was unsure if changing
- * mm/highmem.c to do this would still be correct on other architectures.
- */
-#define is_current_as(mm, mustbeuser) ((mm) == current->active_mm ||    \
-                                       (!mustbeuser && (mm) == &init_mm))
-#define vmi_flags_addr(mm, addr, level, user)                           \
-        ((level) | (is_current_as(mm, user) ?                           \
-                (VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0))
-#define vmi_flags_addr_defer(mm, addr, level, user)                     \
-        ((level) | (is_current_as(mm, user) ?                           \
-                (VMI_PAGE_DEFER | VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0))
-
-static void vmi_update_pte(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
-{
-       vmi_ops.update_pte(ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
-}
-
-static void vmi_update_pte_defer(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
-{
-       vmi_ops.update_pte(ptep, vmi_flags_addr_defer(mm, addr, VMI_PAGE_PT, 0));
-}
-
-static void vmi_set_pte(pte_t *ptep, pte_t pte)
-{
-       /* XXX because of set_pmd_pte, this can be called on PT or PD layers */
-       vmi_ops.set_pte(pte, ptep, VMI_PAGE_PT);
-}
-
-static void vmi_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
-{
-       vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
-}
-
-static void vmi_set_pmd(pmd_t *pmdp, pmd_t pmdval)
-{
-#ifdef CONFIG_X86_PAE
-       const pte_t pte = { .pte = pmdval.pmd };
-#else
-       const pte_t pte = { pmdval.pud.pgd.pgd };
-#endif
-       vmi_ops.set_pte(pte, (pte_t *)pmdp, VMI_PAGE_PD);
-}
-
-#ifdef CONFIG_X86_PAE
-
-static void vmi_set_pte_atomic(pte_t *ptep, pte_t pteval)
-{
-       /*
-        * XXX This is called from set_pmd_pte, but at both PT
-        * and PD layers so the VMI_PAGE_PT flag is wrong.  But
-        * it is only called for large page mapping changes,
-        * the Xen backend, doesn't support large pages, and the
-        * ESX backend doesn't depend on the flag.
-        */
-       set_64bit((unsigned long long *)ptep,pte_val(pteval));
-       vmi_ops.update_pte(ptep, VMI_PAGE_PT);
-}
-
-static void vmi_set_pud(pud_t *pudp, pud_t pudval)
-{
-       /* Um, eww */
-       const pte_t pte = { .pte = pudval.pgd.pgd };
-       vmi_ops.set_pte(pte, (pte_t *)pudp, VMI_PAGE_PDP);
-}
-
-static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
-{
-       const pte_t pte = { .pte = 0 };
-       vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
-}
-
-static void vmi_pmd_clear(pmd_t *pmd)
-{
-       const pte_t pte = { .pte = 0 };
-       vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
-}
-#endif
-
-#ifdef CONFIG_SMP
-static void __devinit
-vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
-                    unsigned long start_esp)
-{
-       struct vmi_ap_state ap;
-
-       /* Default everything to zero.  This is fine for most GPRs. */
-       memset(&ap, 0, sizeof(struct vmi_ap_state));
-
-       ap.gdtr_limit = GDT_SIZE - 1;
-       ap.gdtr_base = (unsigned long) get_cpu_gdt_table(phys_apicid);
-
-       ap.idtr_limit = IDT_ENTRIES * 8 - 1;
-       ap.idtr_base = (unsigned long) idt_table;
-
-       ap.ldtr = 0;
-
-       ap.cs = __KERNEL_CS;
-       ap.eip = (unsigned long) start_eip;
-       ap.ss = __KERNEL_DS;
-       ap.esp = (unsigned long) start_esp;
-
-       ap.ds = __USER_DS;
-       ap.es = __USER_DS;
-       ap.fs = __KERNEL_PERCPU;
-       ap.gs = __KERNEL_STACK_CANARY;
-
-       ap.eflags = 0;
-
-#ifdef CONFIG_X86_PAE
-       /* efer should match BSP efer. */
-       if (cpu_has_nx) {
-               unsigned l, h;
-               rdmsr(MSR_EFER, l, h);
-               ap.efer = (unsigned long long) h << 32 | l;
-       }
-#endif
-
-       ap.cr3 = __pa(swapper_pg_dir);
-       /* Protected mode, paging, AM, WP, NE, MP. */
-       ap.cr0 = 0x80050023;
-       ap.cr4 = mmu_cr4_features;
-       vmi_ops.set_initial_ap_state((u32)&ap, phys_apicid);
-}
-#endif
-
-static void vmi_start_context_switch(struct task_struct *prev)
-{
-       paravirt_start_context_switch(prev);
-       vmi_ops.set_lazy_mode(2);
-}
-
-static void vmi_end_context_switch(struct task_struct *next)
-{
-       vmi_ops.set_lazy_mode(0);
-       paravirt_end_context_switch(next);
-}
-
-static void vmi_enter_lazy_mmu(void)
-{
-       paravirt_enter_lazy_mmu();
-       vmi_ops.set_lazy_mode(1);
-}
-
-static void vmi_leave_lazy_mmu(void)
-{
-       vmi_ops.set_lazy_mode(0);
-       paravirt_leave_lazy_mmu();
-}
-
-static inline int __init check_vmi_rom(struct vrom_header *rom)
-{
-       struct pci_header *pci;
-       struct pnp_header *pnp;
-       const char *manufacturer = "UNKNOWN";
-       const char *product = "UNKNOWN";
-       const char *license = "unspecified";
-
-       if (rom->rom_signature != 0xaa55)
-               return 0;
-       if (rom->vrom_signature != VMI_SIGNATURE)
-               return 0;
-       if (rom->api_version_maj != VMI_API_REV_MAJOR ||
-           rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
-               printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
-                               rom->api_version_maj,
-                               rom->api_version_min);
-               return 0;
-       }
-
-       /*
-        * Relying on the VMI_SIGNATURE field is not 100% safe, so check
-        * the PCI header and device type to make sure this is really a
-        * VMI device.
-        */
-       if (!rom->pci_header_offs) {
-               printk(KERN_WARNING "VMI: ROM does not contain PCI header.\n");
-               return 0;
-       }
-
-       pci = (struct pci_header *)((char *)rom+rom->pci_header_offs);
-       if (pci->vendorID != PCI_VENDOR_ID_VMWARE ||
-           pci->deviceID != PCI_DEVICE_ID_VMWARE_VMI) {
-               /* Allow it to run... anyways, but warn */
-               printk(KERN_WARNING "VMI: ROM from unknown manufacturer\n");
-       }
-
-       if (rom->pnp_header_offs) {
-               pnp = (struct pnp_header *)((char *)rom+rom->pnp_header_offs);
-               if (pnp->manufacturer_offset)
-                       manufacturer = (const char *)rom+pnp->manufacturer_offset;
-               if (pnp->product_offset)
-                       product = (const char *)rom+pnp->product_offset;
-       }
-
-       if (rom->license_offs)
-               license = (char *)rom+rom->license_offs;
-
-       printk(KERN_INFO "VMI: Found %s %s, API version %d.%d, ROM version %d.%d\n",
-               manufacturer, product,
-               rom->api_version_maj, rom->api_version_min,
-               pci->rom_version_maj, pci->rom_version_min);
-
-       /* Don't allow BSD/MIT here for now because we don't want to end up
-          with any binary only shim layers */
-       if (strcmp(license, "GPL") && strcmp(license, "GPL v2")) {
-               printk(KERN_WARNING "VMI: Non GPL license `%s' found for ROM. Not used.\n",
-                       license);
-               return 0;
-       }
-
-       return 1;
-}
-
-/*
- * Probe for the VMI option ROM
- */
-static inline int __init probe_vmi_rom(void)
-{
-       unsigned long base;
-
-       /* VMI ROM is in option ROM area, check signature */
-       for (base = 0xC0000; base < 0xE0000; base += 2048) {
-               struct vrom_header *romstart;
-               romstart = (struct vrom_header *)isa_bus_to_virt(base);
-               if (check_vmi_rom(romstart)) {
-                       vmi_rom = romstart;
-                       return 1;
-               }
-       }
-       return 0;
-}
-
-/*
- * VMI setup common to all processors
- */
-void vmi_bringup(void)
-{
-       /* We must establish the lowmem mapping for MMU ops to work */
-       if (vmi_ops.set_linear_mapping)
-               vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, MAXMEM_PFN, 0);
-}
-
-/*
- * Return a pointer to a VMI function or NULL if unimplemented
- */
-static void *vmi_get_function(int vmicall)
-{
-       u64 reloc;
-       const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
-       reloc = call_vrom_long_func(vmi_rom, get_reloc, vmicall);
-       BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL);
-       if (rel->type == VMI_RELOCATION_CALL_REL)
-               return (void *)rel->eip;
-       else
-               return NULL;
-}
-
-/*
- * Helper macro for making the VMI paravirt-ops fill code readable.
- * For unimplemented operations, fall back to default, unless nop
- * is returned by the ROM.
- */
-#define para_fill(opname, vmicall)                             \
-do {                                                           \
-       reloc = call_vrom_long_func(vmi_rom, get_reloc,         \
-                                   VMI_CALL_##vmicall);        \
-       if (rel->type == VMI_RELOCATION_CALL_REL)               \
-               opname = (void *)rel->eip;                      \
-       else if (rel->type == VMI_RELOCATION_NOP)               \
-               opname = (void *)vmi_nop;                       \
-       else if (rel->type != VMI_RELOCATION_NONE)              \
-               printk(KERN_WARNING "VMI: Unknown relocation "  \
-                                   "type %d for " #vmicall"\n",\
-                                       rel->type);             \
-} while (0)
-
-/*
- * Helper macro for making the VMI paravirt-ops fill code readable.
- * For cached operations which do not match the VMI ROM ABI and must
- * go through a tranlation stub.  Ignore NOPs, since it is not clear
- * a NOP * VMI function corresponds to a NOP paravirt-op when the
- * functions are not in 1-1 correspondence.
- */
-#define para_wrap(opname, wrapper, cache, vmicall)             \
-do {                                                           \
-       reloc = call_vrom_long_func(vmi_rom, get_reloc,         \
-                                   VMI_CALL_##vmicall);        \
-       BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL);           \
-       if (rel->type == VMI_RELOCATION_CALL_REL) {             \
-               opname = wrapper;                               \
-               vmi_ops.cache = (void *)rel->eip;               \
-       }                                                       \
-} while (0)
-
-/*
- * Activate the VMI interface and switch into paravirtualized mode
- */
-static inline int __init activate_vmi(void)
-{
-       short kernel_cs;
-       u64 reloc;
-       const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
-
-       /*
-        * Prevent page tables from being allocated in highmem, even if
-        * CONFIG_HIGHPTE is enabled.
-        */
-       __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
-
-       if (call_vrom_func(vmi_rom, vmi_init) != 0) {
-               printk(KERN_ERR "VMI ROM failed to initialize!");
-               return 0;
-       }
-       savesegment(cs, kernel_cs);
-
-       pv_info.paravirt_enabled = 1;
-       pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK;
-       pv_info.name = "vmi [deprecated]";
-
-       pv_init_ops.patch = vmi_patch;
-
-       /*
-        * Many of these operations are ABI compatible with VMI.
-        * This means we can fill in the paravirt-ops with direct
-        * pointers into the VMI ROM.  If the calling convention for
-        * these operations changes, this code needs to be updated.
-        *
-        * Exceptions
-        *  CPUID paravirt-op uses pointers, not the native ISA
-        *  halt has no VMI equivalent; all VMI halts are "safe"
-        *  no MSR support yet - just trap and emulate.  VMI uses the
-        *    same ABI as the native ISA, but Linux wants exceptions
-        *    from bogus MSR read / write handled
-        *  rdpmc is not yet used in Linux
-        */
-
-       /* CPUID is special, so very special it gets wrapped like a present */
-       para_wrap(pv_cpu_ops.cpuid, vmi_cpuid, cpuid, CPUID);
-
-       para_fill(pv_cpu_ops.clts, CLTS);
-       para_fill(pv_cpu_ops.get_debugreg, GetDR);
-       para_fill(pv_cpu_ops.set_debugreg, SetDR);
-       para_fill(pv_cpu_ops.read_cr0, GetCR0);
-       para_fill(pv_mmu_ops.read_cr2, GetCR2);
-       para_fill(pv_mmu_ops.read_cr3, GetCR3);
-       para_fill(pv_cpu_ops.read_cr4, GetCR4);
-       para_fill(pv_cpu_ops.write_cr0, SetCR0);
-       para_fill(pv_mmu_ops.write_cr2, SetCR2);
-       para_fill(pv_mmu_ops.write_cr3, SetCR3);
-       para_fill(pv_cpu_ops.write_cr4, SetCR4);
-
-       para_fill(pv_irq_ops.save_fl.func, GetInterruptMask);
-       para_fill(pv_irq_ops.restore_fl.func, SetInterruptMask);
-       para_fill(pv_irq_ops.irq_disable.func, DisableInterrupts);
-       para_fill(pv_irq_ops.irq_enable.func, EnableInterrupts);
-
-       para_fill(pv_cpu_ops.wbinvd, WBINVD);
-       para_fill(pv_cpu_ops.read_tsc, RDTSC);
-
-       /* The following we emulate with trap and emulate for now */
-       /* paravirt_ops.read_msr = vmi_rdmsr */
-       /* paravirt_ops.write_msr = vmi_wrmsr */
-       /* paravirt_ops.rdpmc = vmi_rdpmc */
-
-       /* TR interface doesn't pass TR value, wrap */
-       para_wrap(pv_cpu_ops.load_tr_desc, vmi_set_tr, set_tr, SetTR);
-
-       /* LDT is special, too */
-       para_wrap(pv_cpu_ops.set_ldt, vmi_set_ldt, _set_ldt, SetLDT);
-
-       para_fill(pv_cpu_ops.load_gdt, SetGDT);
-       para_fill(pv_cpu_ops.load_idt, SetIDT);
-       para_fill(pv_cpu_ops.store_gdt, GetGDT);
-       para_fill(pv_cpu_ops.store_idt, GetIDT);
-       para_fill(pv_cpu_ops.store_tr, GetTR);
-       pv_cpu_ops.load_tls = vmi_load_tls;
-       para_wrap(pv_cpu_ops.write_ldt_entry, vmi_write_ldt_entry,
-                 write_ldt_entry, WriteLDTEntry);
-       para_wrap(pv_cpu_ops.write_gdt_entry, vmi_write_gdt_entry,
-                 write_gdt_entry, WriteGDTEntry);
-       para_wrap(pv_cpu_ops.write_idt_entry, vmi_write_idt_entry,
-                 write_idt_entry, WriteIDTEntry);
-       para_wrap(pv_cpu_ops.load_sp0, vmi_load_sp0, set_kernel_stack, UpdateKernelStack);
-       para_fill(pv_cpu_ops.set_iopl_mask, SetIOPLMask);
-       para_fill(pv_cpu_ops.io_delay, IODelay);
-
-       para_wrap(pv_cpu_ops.start_context_switch, vmi_start_context_switch,
-                 set_lazy_mode, SetLazyMode);
-       para_wrap(pv_cpu_ops.end_context_switch, vmi_end_context_switch,
-                 set_lazy_mode, SetLazyMode);
-
-       para_wrap(pv_mmu_ops.lazy_mode.enter, vmi_enter_lazy_mmu,
-                 set_lazy_mode, SetLazyMode);
-       para_wrap(pv_mmu_ops.lazy_mode.leave, vmi_leave_lazy_mmu,
-                 set_lazy_mode, SetLazyMode);
-
-       /* user and kernel flush are just handled with different flags to FlushTLB */
-       para_wrap(pv_mmu_ops.flush_tlb_user, vmi_flush_tlb_user, _flush_tlb, FlushTLB);
-       para_wrap(pv_mmu_ops.flush_tlb_kernel, vmi_flush_tlb_kernel, _flush_tlb, FlushTLB);
-       para_fill(pv_mmu_ops.flush_tlb_single, InvalPage);
-
-       /*
-        * Until a standard flag format can be agreed on, we need to
-        * implement these as wrappers in Linux.  Get the VMI ROM
-        * function pointers for the two backend calls.
-        */
-#ifdef CONFIG_X86_PAE
-       vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxELong);
-       vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxELong);
-#else
-       vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxE);
-       vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxE);
-#endif
-
-       if (vmi_ops.set_pte) {
-               pv_mmu_ops.set_pte = vmi_set_pte;
-               pv_mmu_ops.set_pte_at = vmi_set_pte_at;
-               pv_mmu_ops.set_pmd = vmi_set_pmd;
-#ifdef CONFIG_X86_PAE
-               pv_mmu_ops.set_pte_atomic = vmi_set_pte_atomic;
-               pv_mmu_ops.set_pud = vmi_set_pud;
-               pv_mmu_ops.pte_clear = vmi_pte_clear;
-               pv_mmu_ops.pmd_clear = vmi_pmd_clear;
-#endif
-       }
-
-       if (vmi_ops.update_pte) {
-               pv_mmu_ops.pte_update = vmi_update_pte;
-               pv_mmu_ops.pte_update_defer = vmi_update_pte_defer;
-       }
-
-       vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage);
-       if (vmi_ops.allocate_page) {
-               pv_mmu_ops.alloc_pte = vmi_allocate_pte;
-               pv_mmu_ops.alloc_pmd = vmi_allocate_pmd;
-               pv_mmu_ops.alloc_pmd_clone = vmi_allocate_pmd_clone;
-       }
-
-       vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage);
-       if (vmi_ops.release_page) {
-               pv_mmu_ops.release_pte = vmi_release_pte;
-               pv_mmu_ops.release_pmd = vmi_release_pmd;
-               pv_mmu_ops.pgd_free = vmi_pgd_free;
-       }
-
-       /* Set linear is needed in all cases */
-       vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping);
-
-       /*
-        * These MUST always be patched.  Don't support indirect jumps
-        * through these operations, as the VMI interface may use either
-        * a jump or a call to get to these operations, depending on
-        * the backend.  They are performance critical anyway, so requiring
-        * a patch is not a big problem.
-        */
-       pv_cpu_ops.irq_enable_sysexit = (void *)0xfeedbab0;
-       pv_cpu_ops.iret = (void *)0xbadbab0;
-
-#ifdef CONFIG_SMP
-       para_wrap(pv_apic_ops.startup_ipi_hook, vmi_startup_ipi_hook, set_initial_ap_state, SetInitialAPState);
-#endif
-
-#ifdef CONFIG_X86_LOCAL_APIC
-       para_fill(apic->read, APICRead);
-       para_fill(apic->write, APICWrite);
-#endif
-
-       /*
-        * Check for VMI timer functionality by probing for a cycle frequency method
-        */
-       reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_GetCycleFrequency);
-       if (!disable_vmi_timer && rel->type != VMI_RELOCATION_NONE) {
-               vmi_timer_ops.get_cycle_frequency = (void *)rel->eip;
-               vmi_timer_ops.get_cycle_counter =
-                       vmi_get_function(VMI_CALL_GetCycleCounter);
-               vmi_timer_ops.get_wallclock =
-                       vmi_get_function(VMI_CALL_GetWallclockTime);
-               vmi_timer_ops.wallclock_updated =
-                       vmi_get_function(VMI_CALL_WallclockUpdated);
-               vmi_timer_ops.set_alarm = vmi_get_function(VMI_CALL_SetAlarm);
-               vmi_timer_ops.cancel_alarm =
-                        vmi_get_function(VMI_CALL_CancelAlarm);
-               x86_init.timers.timer_init = vmi_time_init;
-#ifdef CONFIG_X86_LOCAL_APIC
-               x86_init.timers.setup_percpu_clockev = vmi_time_bsp_init;
-               x86_cpuinit.setup_percpu_clockev = vmi_time_ap_init;
-#endif
-               pv_time_ops.sched_clock = vmi_sched_clock;
-               x86_platform.calibrate_tsc = vmi_tsc_khz;
-               x86_platform.get_wallclock = vmi_get_wallclock;
-               x86_platform.set_wallclock = vmi_set_wallclock;
-
-               /* We have true wallclock functions; disable CMOS clock sync */
-               no_sync_cmos_clock = 1;
-       } else {
-               disable_noidle = 1;
-               disable_vmi_timer = 1;
-       }
-
-       para_fill(pv_irq_ops.safe_halt, Halt);
-
-       /*
-        * Alternative instruction rewriting doesn't happen soon enough
-        * to convert VMI_IRET to a call instead of a jump; so we have
-        * to do this before IRQs get reenabled.  Fortunately, it is
-        * idempotent.
-        */
-       apply_paravirt(__parainstructions, __parainstructions_end);
-
-       vmi_bringup();
-
-       return 1;
-}
-
-#undef para_fill
-
-void __init vmi_init(void)
-{
-       if (!vmi_rom)
-               probe_vmi_rom();
-       else
-               check_vmi_rom(vmi_rom);
-
-       /* In case probing for or validating the ROM failed, basil */
-       if (!vmi_rom)
-               return;
-
-       reserve_top_address(-vmi_rom->virtual_top);
-
-#ifdef CONFIG_X86_IO_APIC
-       /* This is virtual hardware; timer routing is wired correctly */
-       no_timer_check = 1;
-#endif
-}
-
-void __init vmi_activate(void)
-{
-       unsigned long flags;
-
-       if (!vmi_rom)
-               return;
-
-       local_irq_save(flags);
-       activate_vmi();
-       local_irq_restore(flags & X86_EFLAGS_IF);
-}
-
-static int __init parse_vmi(char *arg)
-{
-       if (!arg)
-               return -EINVAL;
-
-       if (!strcmp(arg, "disable_pge")) {
-               clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE);
-               disable_pge = 1;
-       } else if (!strcmp(arg, "disable_pse")) {
-               clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PSE);
-               disable_pse = 1;
-       } else if (!strcmp(arg, "disable_sep")) {
-               clear_cpu_cap(&boot_cpu_data, X86_FEATURE_SEP);
-               disable_sep = 1;
-       } else if (!strcmp(arg, "disable_tsc")) {
-               clear_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC);
-               disable_tsc = 1;
-       } else if (!strcmp(arg, "disable_mtrr")) {
-               clear_cpu_cap(&boot_cpu_data, X86_FEATURE_MTRR);
-               disable_mtrr = 1;
-       } else if (!strcmp(arg, "disable_timer")) {
-               disable_vmi_timer = 1;
-               disable_noidle = 1;
-       } else if (!strcmp(arg, "disable_noidle"))
-               disable_noidle = 1;
-       return 0;
-}
-
-early_param("vmi", parse_vmi);
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
deleted file mode 100644 (file)
index 5e1ff66..0000000
+++ /dev/null
@@ -1,317 +0,0 @@
-/*
- * VMI paravirtual timer support routines.
- *
- * Copyright (C) 2007, VMware, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT.  See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include <linux/cpumask.h>
-#include <linux/clocksource.h>
-#include <linux/clockchips.h>
-
-#include <asm/vmi.h>
-#include <asm/vmi_time.h>
-#include <asm/apicdef.h>
-#include <asm/apic.h>
-#include <asm/timer.h>
-#include <asm/i8253.h>
-#include <asm/irq_vectors.h>
-
-#define VMI_ONESHOT  (VMI_ALARM_IS_ONESHOT  | VMI_CYCLES_REAL | vmi_get_alarm_wiring())
-#define VMI_PERIODIC (VMI_ALARM_IS_PERIODIC | VMI_CYCLES_REAL | vmi_get_alarm_wiring())
-
-static DEFINE_PER_CPU(struct clock_event_device, local_events);
-
-static inline u32 vmi_counter(u32 flags)
-{
-       /* Given VMI_ONESHOT or VMI_PERIODIC, return the corresponding
-        * cycle counter. */
-       return flags & VMI_ALARM_COUNTER_MASK;
-}
-
-/* paravirt_ops.get_wallclock = vmi_get_wallclock */
-unsigned long vmi_get_wallclock(void)
-{
-       unsigned long long wallclock;
-       wallclock = vmi_timer_ops.get_wallclock(); // nsec
-       (void)do_div(wallclock, 1000000000);       // sec
-
-       return wallclock;
-}
-
-/* paravirt_ops.set_wallclock = vmi_set_wallclock */
-int vmi_set_wallclock(unsigned long now)
-{
-       return 0;
-}
-
-/* paravirt_ops.sched_clock = vmi_sched_clock */
-unsigned long long vmi_sched_clock(void)
-{
-       return cycles_2_ns(vmi_timer_ops.get_cycle_counter(VMI_CYCLES_AVAILABLE));
-}
-
-/* x86_platform.calibrate_tsc = vmi_tsc_khz */
-unsigned long vmi_tsc_khz(void)
-{
-       unsigned long long khz;
-       khz = vmi_timer_ops.get_cycle_frequency();
-       (void)do_div(khz, 1000);
-       return khz;
-}
-
-static inline unsigned int vmi_get_timer_vector(void)
-{
-       return IRQ0_VECTOR;
-}
-
-/** vmi clockchip */
-#ifdef CONFIG_X86_LOCAL_APIC
-static unsigned int startup_timer_irq(unsigned int irq)
-{
-       unsigned long val = apic_read(APIC_LVTT);
-       apic_write(APIC_LVTT, vmi_get_timer_vector());
-
-       return (val & APIC_SEND_PENDING);
-}
-
-static void mask_timer_irq(unsigned int irq)
-{
-       unsigned long val = apic_read(APIC_LVTT);
-       apic_write(APIC_LVTT, val | APIC_LVT_MASKED);
-}
-
-static void unmask_timer_irq(unsigned int irq)
-{
-       unsigned long val = apic_read(APIC_LVTT);
-       apic_write(APIC_LVTT, val & ~APIC_LVT_MASKED);
-}
-
-static void ack_timer_irq(unsigned int irq)
-{
-       ack_APIC_irq();
-}
-
-static struct irq_chip vmi_chip __read_mostly = {
-       .name           = "VMI-LOCAL",
-       .startup        = startup_timer_irq,
-       .mask           = mask_timer_irq,
-       .unmask         = unmask_timer_irq,
-       .ack            = ack_timer_irq
-};
-#endif
-
-/** vmi clockevent */
-#define VMI_ALARM_WIRED_IRQ0    0x00000000
-#define VMI_ALARM_WIRED_LVTT    0x00010000
-static int vmi_wiring = VMI_ALARM_WIRED_IRQ0;
-
-static inline int vmi_get_alarm_wiring(void)
-{
-       return vmi_wiring;
-}
-
-static void vmi_timer_set_mode(enum clock_event_mode mode,
-                              struct clock_event_device *evt)
-{
-       cycle_t now, cycles_per_hz;
-       BUG_ON(!irqs_disabled());
-
-       switch (mode) {
-       case CLOCK_EVT_MODE_ONESHOT:
-       case CLOCK_EVT_MODE_RESUME:
-               break;
-       case CLOCK_EVT_MODE_PERIODIC:
-               cycles_per_hz = vmi_timer_ops.get_cycle_frequency();
-               (void)do_div(cycles_per_hz, HZ);
-               now = vmi_timer_ops.get_cycle_counter(vmi_counter(VMI_PERIODIC));
-               vmi_timer_ops.set_alarm(VMI_PERIODIC, now, cycles_per_hz);
-               break;
-       case CLOCK_EVT_MODE_UNUSED:
-       case CLOCK_EVT_MODE_SHUTDOWN:
-               switch (evt->mode) {
-               case CLOCK_EVT_MODE_ONESHOT:
-                       vmi_timer_ops.cancel_alarm(VMI_ONESHOT);
-                       break;
-               case CLOCK_EVT_MODE_PERIODIC:
-                       vmi_timer_ops.cancel_alarm(VMI_PERIODIC);
-                       break;
-               default:
-                       break;
-               }
-               break;
-       default:
-               break;
-       }
-}
-
-static int vmi_timer_next_event(unsigned long delta,
-                               struct clock_event_device *evt)
-{
-       /* Unfortunately, set_next_event interface only passes relative
-        * expiry, but we want absolute expiry.  It'd be better if were
-        * were passed an absolute expiry, since a bunch of time may
-        * have been stolen between the time the delta is computed and
-        * when we set the alarm below. */
-       cycle_t now = vmi_timer_ops.get_cycle_counter(vmi_counter(VMI_ONESHOT));
-
-       BUG_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
-       vmi_timer_ops.set_alarm(VMI_ONESHOT, now + delta, 0);
-       return 0;
-}
-
-static struct clock_event_device vmi_clockevent = {
-       .name           = "vmi-timer",
-       .features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-       .shift          = 22,
-       .set_mode       = vmi_timer_set_mode,
-       .set_next_event = vmi_timer_next_event,
-       .rating         = 1000,
-       .irq            = 0,
-};
-
-static irqreturn_t vmi_timer_interrupt(int irq, void *dev_id)
-{
-       struct clock_event_device *evt = &__get_cpu_var(local_events);
-       evt->event_handler(evt);
-       return IRQ_HANDLED;
-}
-
-static struct irqaction vmi_clock_action  = {
-       .name           = "vmi-timer",
-       .handler        = vmi_timer_interrupt,
-       .flags          = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER,
-};
-
-static void __devinit vmi_time_init_clockevent(void)
-{
-       cycle_t cycles_per_msec;
-       struct clock_event_device *evt;
-
-       int cpu = smp_processor_id();
-       evt = &__get_cpu_var(local_events);
-
-       /* Use cycles_per_msec since div_sc params are 32-bits. */
-       cycles_per_msec = vmi_timer_ops.get_cycle_frequency();
-       (void)do_div(cycles_per_msec, 1000);
-
-       memcpy(evt, &vmi_clockevent, sizeof(*evt));
-       /* Must pick .shift such that .mult fits in 32-bits.  Choosing
-        * .shift to be 22 allows 2^(32-22) cycles per nano-seconds
-        * before overflow. */
-       evt->mult = div_sc(cycles_per_msec, NSEC_PER_MSEC, evt->shift);
-       /* Upper bound is clockevent's use of ulong for cycle deltas. */
-       evt->max_delta_ns = clockevent_delta2ns(ULONG_MAX, evt);
-       evt->min_delta_ns = clockevent_delta2ns(1, evt);
-       evt->cpumask = cpumask_of(cpu);
-
-       printk(KERN_WARNING "vmi: registering clock event %s. mult=%u shift=%u\n",
-              evt->name, evt->mult, evt->shift);
-       clockevents_register_device(evt);
-}
-
-void __init vmi_time_init(void)
-{
-       unsigned int cpu;
-       /* Disable PIT: BIOSes start PIT CH0 with 18.2hz peridic. */
-       outb_pit(0x3a, PIT_MODE); /* binary, mode 5, LSB/MSB, ch 0 */
-
-       vmi_time_init_clockevent();
-       setup_irq(0, &vmi_clock_action);
-       for_each_possible_cpu(cpu)
-               per_cpu(vector_irq, cpu)[vmi_get_timer_vector()] = 0;
-}
-
-#ifdef CONFIG_X86_LOCAL_APIC
-void __devinit vmi_time_bsp_init(void)
-{
-       /*
-        * On APIC systems, we want local timers to fire on each cpu.  We do
-        * this by programming LVTT to deliver timer events to the IRQ handler
-        * for IRQ-0, since we can't re-use the APIC local timer handler
-        * without interfering with that code.
-        */
-       clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
-       local_irq_disable();
-#ifdef CONFIG_SMP
-       /*
-        * XXX handle_percpu_irq only defined for SMP; we need to switch over
-        * to using it, since this is a local interrupt, which each CPU must
-        * handle individually without locking out or dropping simultaneous
-        * local timers on other CPUs.  We also don't want to trigger the
-        * quirk workaround code for interrupts which gets invoked from
-        * handle_percpu_irq via eoi, so we use our own IRQ chip.
-        */
-       set_irq_chip_and_handler_name(0, &vmi_chip, handle_percpu_irq, "lvtt");
-#else
-       set_irq_chip_and_handler_name(0, &vmi_chip, handle_edge_irq, "lvtt");
-#endif
-       vmi_wiring = VMI_ALARM_WIRED_LVTT;
-       apic_write(APIC_LVTT, vmi_get_timer_vector());
-       local_irq_enable();
-       clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
-}
-
-void __devinit vmi_time_ap_init(void)
-{
-       vmi_time_init_clockevent();
-       apic_write(APIC_LVTT, vmi_get_timer_vector());
-}
-#endif
-
-/** vmi clocksource */
-static struct clocksource clocksource_vmi;
-
-static cycle_t read_real_cycles(struct clocksource *cs)
-{
-       cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL);
-       return max(ret, clocksource_vmi.cycle_last);
-}
-
-static struct clocksource clocksource_vmi = {
-       .name                   = "vmi-timer",
-       .rating                 = 450,
-       .read                   = read_real_cycles,
-       .mask                   = CLOCKSOURCE_MASK(64),
-       .mult                   = 0, /* to be set */
-       .shift                  = 22,
-       .flags                  = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-static int __init init_vmi_clocksource(void)
-{
-       cycle_t cycles_per_msec;
-
-       if (!vmi_timer_ops.get_cycle_frequency)
-               return 0;
-       /* Use khz2mult rather than hz2mult since hz arg is only 32-bits. */
-       cycles_per_msec = vmi_timer_ops.get_cycle_frequency();
-       (void)do_div(cycles_per_msec, 1000);
-
-       /* Note that clocksource.{mult, shift} converts in the opposite direction
-        * as clockevents.  */
-       clocksource_vmi.mult = clocksource_khz2mult(cycles_per_msec,
-                                                   clocksource_vmi.shift);
-
-       printk(KERN_WARNING "vmi: registering clock source khz=%lld\n", cycles_per_msec);
-       return clocksource_register(&clocksource_vmi);
-
-}
-module_init(init_vmi_clocksource);
index b38bd8b92aa6c84ed9a00295eeaac73bade26458..66ca98aafdd6a73d7eea8834d1f1c930384b8268 100644 (file)
@@ -1870,17 +1870,16 @@ static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
                               struct x86_emulate_ops *ops)
 {
        struct decode_cache *c = &ctxt->decode;
-       u64 old = c->dst.orig_val;
+       u64 old = c->dst.orig_val64;
 
        if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
            ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
-
                c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
                c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
                ctxt->eflags &= ~EFLG_ZF;
        } else {
-               c->dst.val = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
-                      (u32) c->regs[VCPU_REGS_RBX];
+               c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
+                       (u32) c->regs[VCPU_REGS_RBX];
 
                ctxt->eflags |= EFLG_ZF;
        }
@@ -2616,7 +2615,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
                                        c->src.valptr, c->src.bytes);
                if (rc != X86EMUL_CONTINUE)
                        goto done;
-               c->src.orig_val = c->src.val;
+               c->src.orig_val64 = c->src.val64;
        }
 
        if (c->src2.type == OP_MEM) {
index 8d10c063d7f207451b087a11a8d7bf0c888f3695..4b7b73ce209894442eddd9df2b8f076408389b12 100644 (file)
@@ -64,6 +64,9 @@ static void pic_unlock(struct kvm_pic *s)
                if (!found)
                        found = s->kvm->bsp_vcpu;
 
+               if (!found)
+                       return;
+
                kvm_vcpu_kick(found);
        }
 }
index ffed06871c5cf389594244086ecb3685496b4258..63c314502993b33c1a53773d111eb69133668179 100644 (file)
@@ -43,7 +43,6 @@ struct kvm_kpic_state {
        u8 irr;         /* interrupt request register */
        u8 imr;         /* interrupt mask register */
        u8 isr;         /* interrupt service register */
-       u8 isr_ack;     /* interrupt ack detection */
        u8 priority_add;        /* highest irq priority */
        u8 irq_base;
        u8 read_reg_select;
@@ -56,6 +55,7 @@ struct kvm_kpic_state {
        u8 init4;               /* true if 4 byte init */
        u8 elcr;                /* PIIX edge/trigger selection */
        u8 elcr_mask;
+       u8 isr_ack;     /* interrupt ack detection */
        struct kvm_pic *pics_state;
 };
 
index 77d8c0f4817d5f10f88e725ad49b22642c1172c5..22b06f7660f4f44459ef62568659f745d2d8dc83 100644 (file)
@@ -1056,14 +1056,13 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
 
        vcpu->arch.apic = apic;
 
-       apic->regs_page = alloc_page(GFP_KERNEL);
+       apic->regs_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
        if (apic->regs_page == NULL) {
                printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
                       vcpu->vcpu_id);
                goto nomem_free_apic;
        }
        apic->regs = page_address(apic->regs_page);
-       memset(apic->regs, 0, PAGE_SIZE);
        apic->vcpu = vcpu;
 
        hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
index bc5b9b8d4a33117259882835bfb884f4f8f37656..8a3f9f64f86f9e7fee5bc5112bf50a04fbe37b15 100644 (file)
@@ -766,7 +766,6 @@ static void init_vmcb(struct vcpu_svm *svm)
 
        control->iopm_base_pa = iopm_base;
        control->msrpm_base_pa = __pa(svm->msrpm);
-       control->tsc_offset = 0;
        control->int_ctl = V_INTR_MASKING_MASK;
 
        init_seg(&save->es);
@@ -902,6 +901,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
        svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
        svm->asid_generation = 0;
        init_vmcb(svm);
+       svm->vmcb->control.tsc_offset = 0-native_read_tsc();
 
        err = fx_init(&svm->vcpu);
        if (err)
@@ -3163,8 +3163,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
        sync_lapic_to_cr8(vcpu);
 
        save_host_msrs(vcpu);
-       fs_selector = kvm_read_fs();
-       gs_selector = kvm_read_gs();
+       savesegment(fs, fs_selector);
+       savesegment(gs, gs_selector);
        ldt_selector = kvm_read_ldt();
        svm->vmcb->save.cr2 = vcpu->arch.cr2;
        /* required for live migration with NPT */
@@ -3251,10 +3251,15 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
        vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
        vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
 
-       kvm_load_fs(fs_selector);
-       kvm_load_gs(gs_selector);
-       kvm_load_ldt(ldt_selector);
        load_host_msrs(vcpu);
+       loadsegment(fs, fs_selector);
+#ifdef CONFIG_X86_64
+       load_gs_index(gs_selector);
+       wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
+#else
+       loadsegment(gs, gs_selector);
+#endif
+       kvm_load_ldt(ldt_selector);
 
        reload_tss(vcpu);
 
index 49b25eee25acc075538a411fc24c23a326f02fd4..7bddfab120139435d1d10885520cc5293fefcc07 100644 (file)
@@ -803,7 +803,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
         */
        vmx->host_state.ldt_sel = kvm_read_ldt();
        vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
-       vmx->host_state.fs_sel = kvm_read_fs();
+       savesegment(fs, vmx->host_state.fs_sel);
        if (!(vmx->host_state.fs_sel & 7)) {
                vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
                vmx->host_state.fs_reload_needed = 0;
@@ -811,7 +811,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
                vmcs_write16(HOST_FS_SELECTOR, 0);
                vmx->host_state.fs_reload_needed = 1;
        }
-       vmx->host_state.gs_sel = kvm_read_gs();
+       savesegment(gs, vmx->host_state.gs_sel);
        if (!(vmx->host_state.gs_sel & 7))
                vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
        else {
@@ -841,27 +841,21 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
 
 static void __vmx_load_host_state(struct vcpu_vmx *vmx)
 {
-       unsigned long flags;
-
        if (!vmx->host_state.loaded)
                return;
 
        ++vmx->vcpu.stat.host_state_reload;
        vmx->host_state.loaded = 0;
        if (vmx->host_state.fs_reload_needed)
-               kvm_load_fs(vmx->host_state.fs_sel);
+               loadsegment(fs, vmx->host_state.fs_sel);
        if (vmx->host_state.gs_ldt_reload_needed) {
                kvm_load_ldt(vmx->host_state.ldt_sel);
-               /*
-                * If we have to reload gs, we must take care to
-                * preserve our gs base.
-                */
-               local_irq_save(flags);
-               kvm_load_gs(vmx->host_state.gs_sel);
 #ifdef CONFIG_X86_64
-               wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
+               load_gs_index(vmx->host_state.gs_sel);
+               wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
+#else
+               loadsegment(gs, vmx->host_state.gs_sel);
 #endif
-               local_irq_restore(flags);
        }
        reload_tss();
 #ifdef CONFIG_X86_64
@@ -2589,8 +2583,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
        vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
        vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
        vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
-       vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs());    /* 22.2.4 */
-       vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs());    /* 22.2.4 */
+       vmcs_write16(HOST_FS_SELECTOR, 0);            /* 22.2.4 */
+       vmcs_write16(HOST_GS_SELECTOR, 0);            /* 22.2.4 */
        vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
 #ifdef CONFIG_X86_64
        rdmsrl(MSR_FS_BASE, a);
index 3a09c625d5268ad4dc0f7a033d429c234babcb96..6c2ecf0a806d67040335f7c0c57c0b57fed3384d 100644 (file)
@@ -1991,13 +1991,14 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
                0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
                0 /* Reserved, DCA */ | F(XMM4_1) |
                F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
-               0 /* Reserved, AES */ | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX);
+               0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
+               F(F16C);
        /* cpuid 0x80000001.ecx */
        const u32 kvm_supported_word6_x86_features =
                F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
                F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
-               F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
-               0 /* SKINIT */ | 0 /* WDT */;
+               F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
+               0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
 
        /* all calls to cpuid_count() should be made on the same cpu */
        get_cpu();
index 9257510b4836837eb4cdff719000b716ff3bdd63..9d5f5584845587acbf5f544d7405e260a5e16b95 100644 (file)
@@ -324,9 +324,8 @@ static void lguest_load_gdt(const struct desc_ptr *desc)
 }
 
 /*
- * For a single GDT entry which changes, we do the lazy thing: alter our GDT,
- * then tell the Host to reload the entire thing.  This operation is so rare
- * that this naive implementation is reasonable.
+ * For a single GDT entry which changes, we simply change our copy and
+ * then tell the host about it.
  */
 static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
                                   const void *desc, int type)
@@ -338,9 +337,13 @@ static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
 }
 
 /*
- * OK, I lied.  There are three "thread local storage" GDT entries which change
+ * There are three "thread local storage" GDT entries which change
  * on every context switch (these three entries are how glibc implements
- * __thread variables).  So we have a hypercall specifically for this case.
+ * __thread variables).  As an optimization, we have a hypercall
+ * specifically for this case.
+ *
+ * Wouldn't it be nicer to have a general LOAD_GDT_ENTRIES hypercall
+ * which took a range of entries?
  */
 static void lguest_load_tls(struct thread_struct *t, unsigned int cpu)
 {
index 5415a9d06f53b75c4a993b0bbe96508502691142..b908a59eccf52fe4ab22382ae793cb2a9f023e4f 100644 (file)
@@ -22,22 +22,187 @@ EXPORT_SYMBOL(memset);
 
 void *memmove(void *dest, const void *src, size_t n)
 {
-       int d0, d1, d2;
-
-       if (dest < src) {
-               memcpy(dest, src, n);
-       } else {
-               __asm__ __volatile__(
-                       "std\n\t"
-                       "rep\n\t"
-                       "movsb\n\t"
-                       "cld"
-                       : "=&c" (d0), "=&S" (d1), "=&D" (d2)
-                       :"0" (n),
-                        "1" (n-1+src),
-                        "2" (n-1+dest)
-                       :"memory");
-       }
-       return dest;
+       int d0,d1,d2,d3,d4,d5;
+       char *ret = dest;
+
+       __asm__ __volatile__(
+               /* Handle more 16bytes in loop */
+               "cmp $0x10, %0\n\t"
+               "jb     1f\n\t"
+
+               /* Decide forward/backward copy mode */
+               "cmp %2, %1\n\t"
+               "jb     2f\n\t"
+
+               /*
+                * movs instruction have many startup latency
+                * so we handle small size by general register.
+                */
+               "cmp  $680, %0\n\t"
+               "jb 3f\n\t"
+               /*
+                * movs instruction is only good for aligned case.
+                */
+               "mov %1, %3\n\t"
+               "xor %2, %3\n\t"
+               "and $0xff, %3\n\t"
+               "jz 4f\n\t"
+               "3:\n\t"
+               "sub $0x10, %0\n\t"
+
+               /*
+                * We gobble 16byts forward in each loop.
+                */
+               "3:\n\t"
+               "sub $0x10, %0\n\t"
+               "mov 0*4(%1), %3\n\t"
+               "mov 1*4(%1), %4\n\t"
+               "mov  %3, 0*4(%2)\n\t"
+               "mov  %4, 1*4(%2)\n\t"
+               "mov 2*4(%1), %3\n\t"
+               "mov 3*4(%1), %4\n\t"
+               "mov  %3, 2*4(%2)\n\t"
+               "mov  %4, 3*4(%2)\n\t"
+               "lea  0x10(%1), %1\n\t"
+               "lea  0x10(%2), %2\n\t"
+               "jae 3b\n\t"
+               "add $0x10, %0\n\t"
+               "jmp 1f\n\t"
+
+               /*
+                * Handle data forward by movs.
+                */
+               ".p2align 4\n\t"
+               "4:\n\t"
+               "mov -4(%1, %0), %3\n\t"
+               "lea -4(%2, %0), %4\n\t"
+               "shr $2, %0\n\t"
+               "rep movsl\n\t"
+               "mov %3, (%4)\n\t"
+               "jmp 11f\n\t"
+               /*
+                * Handle data backward by movs.
+                */
+               ".p2align 4\n\t"
+               "6:\n\t"
+               "mov (%1), %3\n\t"
+               "mov %2, %4\n\t"
+               "lea -4(%1, %0), %1\n\t"
+               "lea -4(%2, %0), %2\n\t"
+               "shr $2, %0\n\t"
+               "std\n\t"
+               "rep movsl\n\t"
+               "mov %3,(%4)\n\t"
+               "cld\n\t"
+               "jmp 11f\n\t"
+
+               /*
+                * Start to prepare for backward copy.
+                */
+               ".p2align 4\n\t"
+               "2:\n\t"
+               "cmp  $680, %0\n\t"
+               "jb 5f\n\t"
+               "mov %1, %3\n\t"
+               "xor %2, %3\n\t"
+               "and $0xff, %3\n\t"
+               "jz 6b\n\t"
+
+               /*
+                * Calculate copy position to tail.
+                */
+               "5:\n\t"
+               "add %0, %1\n\t"
+               "add %0, %2\n\t"
+               "sub $0x10, %0\n\t"
+
+               /*
+                * We gobble 16byts backward in each loop.
+                */
+               "7:\n\t"
+               "sub $0x10, %0\n\t"
+
+               "mov -1*4(%1), %3\n\t"
+               "mov -2*4(%1), %4\n\t"
+               "mov  %3, -1*4(%2)\n\t"
+               "mov  %4, -2*4(%2)\n\t"
+               "mov -3*4(%1), %3\n\t"
+               "mov -4*4(%1), %4\n\t"
+               "mov  %3, -3*4(%2)\n\t"
+               "mov  %4, -4*4(%2)\n\t"
+               "lea  -0x10(%1), %1\n\t"
+               "lea  -0x10(%2), %2\n\t"
+               "jae 7b\n\t"
+               /*
+                * Calculate copy position to head.
+                */
+               "add $0x10, %0\n\t"
+               "sub %0, %1\n\t"
+               "sub %0, %2\n\t"
+
+               /*
+                * Move data from 8 bytes to 15 bytes.
+                */
+               ".p2align 4\n\t"
+               "1:\n\t"
+               "cmp $8, %0\n\t"
+               "jb 8f\n\t"
+               "mov 0*4(%1), %3\n\t"
+               "mov 1*4(%1), %4\n\t"
+               "mov -2*4(%1, %0), %5\n\t"
+               "mov -1*4(%1, %0), %1\n\t"
+
+               "mov  %3, 0*4(%2)\n\t"
+               "mov  %4, 1*4(%2)\n\t"
+               "mov  %5, -2*4(%2, %0)\n\t"
+               "mov  %1, -1*4(%2, %0)\n\t"
+               "jmp 11f\n\t"
+
+               /*
+                * Move data from 4 bytes to 7 bytes.
+                */
+               ".p2align 4\n\t"
+               "8:\n\t"
+               "cmp $4, %0\n\t"
+               "jb 9f\n\t"
+               "mov 0*4(%1), %3\n\t"
+               "mov -1*4(%1, %0), %4\n\t"
+               "mov  %3, 0*4(%2)\n\t"
+               "mov  %4, -1*4(%2, %0)\n\t"
+               "jmp 11f\n\t"
+
+               /*
+                * Move data from 2 bytes to 3 bytes.
+                */
+               ".p2align 4\n\t"
+               "9:\n\t"
+               "cmp $2, %0\n\t"
+               "jb 10f\n\t"
+               "movw 0*2(%1), %%dx\n\t"
+               "movw -1*2(%1, %0), %%bx\n\t"
+               "movw %%dx, 0*2(%2)\n\t"
+               "movw %%bx, -1*2(%2, %0)\n\t"
+               "jmp 11f\n\t"
+
+               /*
+                * Move data for 1 byte.
+                */
+               ".p2align 4\n\t"
+               "10:\n\t"
+               "cmp $1, %0\n\t"
+               "jb 11f\n\t"
+               "movb (%1), %%cl\n\t"
+               "movb %%cl, (%2)\n\t"
+               ".p2align 4\n\t"
+               "11:"
+               : "=&c" (d0), "=&S" (d1), "=&D" (d2),
+                 "=r" (d3),"=r" (d4), "=r"(d5)
+               :"0" (n),
+                "1" (src),
+                "2" (dest)
+               :"memory");
+
+       return ret;
+
 }
 EXPORT_SYMBOL(memmove);
index bcbcd1e0f7d57fe4b3972adc24785dc6837386f6..75ef61e35e38aee1cf62b05a48f1add62b148f98 100644 (file)
 ENTRY(__memcpy)
 ENTRY(memcpy)
        CFI_STARTPROC
+       movq %rdi, %rax
 
        /*
-        * Put the number of full 64-byte blocks into %ecx.
-        * Tail portion is handled at the end:
+        * Use 32bit CMP here to avoid long NOP padding.
         */
-       movq %rdi, %rax
-       movl %edx, %ecx
-       shrl   $6, %ecx
-       jz .Lhandle_tail
+       cmp  $0x20, %edx
+       jb .Lhandle_tail
 
-       .p2align 4
-.Lloop_64:
        /*
-        * We decrement the loop index here - and the zero-flag is
-        * checked at the end of the loop (instructions inbetween do
-        * not change the zero flag):
+        * We check whether memory false dependece could occur,
+        * then jump to corresponding copy mode.
         */
-       decl %ecx
+       cmp  %dil, %sil
+       jl .Lcopy_backward
+       subl $0x20, %edx
+.Lcopy_forward_loop:
+       subq $0x20,     %rdx
 
        /*
-        * Move in blocks of 4x16 bytes:
+        * Move in blocks of 4x8 bytes:
         */
-       movq 0*8(%rsi),         %r11
-       movq 1*8(%rsi),         %r8
-       movq %r11,              0*8(%rdi)
-       movq %r8,               1*8(%rdi)
-
-       movq 2*8(%rsi),         %r9
-       movq 3*8(%rsi),         %r10
-       movq %r9,               2*8(%rdi)
-       movq %r10,              3*8(%rdi)
-
-       movq 4*8(%rsi),         %r11
-       movq 5*8(%rsi),         %r8
-       movq %r11,              4*8(%rdi)
-       movq %r8,               5*8(%rdi)
-
-       movq 6*8(%rsi),         %r9
-       movq 7*8(%rsi),         %r10
-       movq %r9,               6*8(%rdi)
-       movq %r10,              7*8(%rdi)
-
-       leaq 64(%rsi), %rsi
-       leaq 64(%rdi), %rdi
-
-       jnz  .Lloop_64
+       movq 0*8(%rsi), %r8
+       movq 1*8(%rsi), %r9
+       movq 2*8(%rsi), %r10
+       movq 3*8(%rsi), %r11
+       leaq 4*8(%rsi), %rsi
+
+       movq %r8,       0*8(%rdi)
+       movq %r9,       1*8(%rdi)
+       movq %r10,      2*8(%rdi)
+       movq %r11,      3*8(%rdi)
+       leaq 4*8(%rdi), %rdi
+       jae  .Lcopy_forward_loop
+       addq $0x20,     %rdx
+       jmp  .Lhandle_tail
+
+.Lcopy_backward:
+       /*
+        * Calculate copy position to tail.
+        */
+       addq %rdx,      %rsi
+       addq %rdx,      %rdi
+       subq $0x20,     %rdx
+       /*
+        * At most 3 ALU operations in one cycle,
+        * so append NOPS in the same 16bytes trunk.
+        */
+       .p2align 4
+.Lcopy_backward_loop:
+       subq $0x20,     %rdx
+       movq -1*8(%rsi),        %r8
+       movq -2*8(%rsi),        %r9
+       movq -3*8(%rsi),        %r10
+       movq -4*8(%rsi),        %r11
+       leaq -4*8(%rsi),        %rsi
+       movq %r8,               -1*8(%rdi)
+       movq %r9,               -2*8(%rdi)
+       movq %r10,              -3*8(%rdi)
+       movq %r11,              -4*8(%rdi)
+       leaq -4*8(%rdi),        %rdi
+       jae  .Lcopy_backward_loop
 
+       /*
+        * Calculate copy position to head.
+        */
+       addq $0x20,     %rdx
+       subq %rdx,      %rsi
+       subq %rdx,      %rdi
 .Lhandle_tail:
-       movl %edx, %ecx
-       andl  $63, %ecx
-       shrl   $3, %ecx
-       jz   .Lhandle_7
+       cmpq $16,       %rdx
+       jb   .Lless_16bytes
 
+       /*
+        * Move data from 16 bytes to 31 bytes.
+        */
+       movq 0*8(%rsi), %r8
+       movq 1*8(%rsi), %r9
+       movq -2*8(%rsi, %rdx),  %r10
+       movq -1*8(%rsi, %rdx),  %r11
+       movq %r8,       0*8(%rdi)
+       movq %r9,       1*8(%rdi)
+       movq %r10,      -2*8(%rdi, %rdx)
+       movq %r11,      -1*8(%rdi, %rdx)
+       retq
        .p2align 4
-.Lloop_8:
-       decl %ecx
-       movq (%rsi),            %r8
-       movq %r8,               (%rdi)
-       leaq 8(%rdi),           %rdi
-       leaq 8(%rsi),           %rsi
-       jnz  .Lloop_8
-
-.Lhandle_7:
-       movl %edx, %ecx
-       andl $7, %ecx
-       jz .Lend
+.Lless_16bytes:
+       cmpq $8,        %rdx
+       jb   .Lless_8bytes
+       /*
+        * Move data from 8 bytes to 15 bytes.
+        */
+       movq 0*8(%rsi), %r8
+       movq -1*8(%rsi, %rdx),  %r9
+       movq %r8,       0*8(%rdi)
+       movq %r9,       -1*8(%rdi, %rdx)
+       retq
+       .p2align 4
+.Lless_8bytes:
+       cmpq $4,        %rdx
+       jb   .Lless_3bytes
 
+       /*
+        * Move data from 4 bytes to 7 bytes.
+        */
+       movl (%rsi), %ecx
+       movl -4(%rsi, %rdx), %r8d
+       movl %ecx, (%rdi)
+       movl %r8d, -4(%rdi, %rdx)
+       retq
        .p2align 4
+.Lless_3bytes:
+       cmpl $0, %edx
+       je .Lend
+       /*
+        * Move data from 1 bytes to 3 bytes.
+        */
 .Lloop_1:
        movb (%rsi), %r8b
        movb %r8b, (%rdi)
        incq %rdi
        incq %rsi
-       decl %ecx
+       decl %edx
        jnz .Lloop_1
 
 .Lend:
-       ret
+       retq
        CFI_ENDPROC
 ENDPROC(memcpy)
 ENDPROC(__memcpy)
index 0a33909bf12213dbb0945d057e5c7c537296074e..6d0f0ec41b348ad2a912fdb184303c094ce1a996 100644 (file)
 #undef memmove
 void *memmove(void *dest, const void *src, size_t count)
 {
-       if (dest < src) {
-               return memcpy(dest, src, count);
-       } else {
-               char *p = dest + count;
-               const char *s = src + count;
-               while (count--)
-                       *--p = *--s;
-       }
-       return dest;
+       unsigned long d0,d1,d2,d3,d4,d5,d6,d7;
+       char *ret;
+
+       __asm__ __volatile__(
+               /* Handle more 32bytes in loop */
+               "mov %2, %3\n\t"
+               "cmp $0x20, %0\n\t"
+               "jb     1f\n\t"
+
+               /* Decide forward/backward copy mode */
+               "cmp %2, %1\n\t"
+               "jb     2f\n\t"
+
+               /*
+                * movsq instruction have many startup latency
+                * so we handle small size by general register.
+                */
+               "cmp  $680, %0\n\t"
+               "jb 3f\n\t"
+               /*
+                * movsq instruction is only good for aligned case.
+                */
+               "cmpb %%dil, %%sil\n\t"
+               "je 4f\n\t"
+               "3:\n\t"
+               "sub $0x20, %0\n\t"
+               /*
+                * We gobble 32byts forward in each loop.
+                */
+               "5:\n\t"
+               "sub $0x20, %0\n\t"
+               "movq 0*8(%1), %4\n\t"
+               "movq 1*8(%1), %5\n\t"
+               "movq 2*8(%1), %6\n\t"
+               "movq 3*8(%1), %7\n\t"
+               "leaq 4*8(%1), %1\n\t"
+
+               "movq %4, 0*8(%2)\n\t"
+               "movq %5, 1*8(%2)\n\t"
+               "movq %6, 2*8(%2)\n\t"
+               "movq %7, 3*8(%2)\n\t"
+               "leaq 4*8(%2), %2\n\t"
+               "jae 5b\n\t"
+               "addq $0x20, %0\n\t"
+               "jmp 1f\n\t"
+               /*
+                * Handle data forward by movsq.
+                */
+               ".p2align 4\n\t"
+               "4:\n\t"
+               "movq %0, %8\n\t"
+               "movq -8(%1, %0), %4\n\t"
+               "lea -8(%2, %0), %5\n\t"
+               "shrq $3, %8\n\t"
+               "rep movsq\n\t"
+               "movq %4, (%5)\n\t"
+               "jmp 13f\n\t"
+               /*
+                * Handle data backward by movsq.
+                */
+               ".p2align 4\n\t"
+               "7:\n\t"
+               "movq %0, %8\n\t"
+               "movq (%1), %4\n\t"
+               "movq %2, %5\n\t"
+               "leaq -8(%1, %0), %1\n\t"
+               "leaq -8(%2, %0), %2\n\t"
+               "shrq $3, %8\n\t"
+               "std\n\t"
+               "rep movsq\n\t"
+               "cld\n\t"
+               "movq %4, (%5)\n\t"
+               "jmp 13f\n\t"
+
+               /*
+                * Start to prepare for backward copy.
+                */
+               ".p2align 4\n\t"
+               "2:\n\t"
+               "cmp $680, %0\n\t"
+               "jb 6f \n\t"
+               "cmp %%dil, %%sil\n\t"
+               "je 7b \n\t"
+               "6:\n\t"
+               /*
+                * Calculate copy position to tail.
+                */
+               "addq %0, %1\n\t"
+               "addq %0, %2\n\t"
+               "subq $0x20, %0\n\t"
+               /*
+                * We gobble 32byts backward in each loop.
+                */
+               "8:\n\t"
+               "subq $0x20, %0\n\t"
+               "movq -1*8(%1), %4\n\t"
+               "movq -2*8(%1), %5\n\t"
+               "movq -3*8(%1), %6\n\t"
+               "movq -4*8(%1), %7\n\t"
+               "leaq -4*8(%1), %1\n\t"
+
+               "movq %4, -1*8(%2)\n\t"
+               "movq %5, -2*8(%2)\n\t"
+               "movq %6, -3*8(%2)\n\t"
+               "movq %7, -4*8(%2)\n\t"
+               "leaq -4*8(%2), %2\n\t"
+               "jae 8b\n\t"
+               /*
+                * Calculate copy position to head.
+                */
+               "addq $0x20, %0\n\t"
+               "subq %0, %1\n\t"
+               "subq %0, %2\n\t"
+               "1:\n\t"
+               "cmpq $16, %0\n\t"
+               "jb 9f\n\t"
+               /*
+                * Move data from 16 bytes to 31 bytes.
+                */
+               "movq 0*8(%1), %4\n\t"
+               "movq 1*8(%1), %5\n\t"
+               "movq -2*8(%1, %0), %6\n\t"
+               "movq -1*8(%1, %0), %7\n\t"
+               "movq %4, 0*8(%2)\n\t"
+               "movq %5, 1*8(%2)\n\t"
+               "movq %6, -2*8(%2, %0)\n\t"
+               "movq %7, -1*8(%2, %0)\n\t"
+               "jmp 13f\n\t"
+               ".p2align 4\n\t"
+               "9:\n\t"
+               "cmpq $8, %0\n\t"
+               "jb 10f\n\t"
+               /*
+                * Move data from 8 bytes to 15 bytes.
+                */
+               "movq 0*8(%1), %4\n\t"
+               "movq -1*8(%1, %0), %5\n\t"
+               "movq %4, 0*8(%2)\n\t"
+               "movq %5, -1*8(%2, %0)\n\t"
+               "jmp 13f\n\t"
+               "10:\n\t"
+               "cmpq $4, %0\n\t"
+               "jb 11f\n\t"
+               /*
+                * Move data from 4 bytes to 7 bytes.
+                */
+               "movl (%1), %4d\n\t"
+               "movl -4(%1, %0), %5d\n\t"
+               "movl %4d, (%2)\n\t"
+               "movl %5d, -4(%2, %0)\n\t"
+               "jmp 13f\n\t"
+               "11:\n\t"
+               "cmp $2, %0\n\t"
+               "jb 12f\n\t"
+               /*
+                * Move data from 2 bytes to 3 bytes.
+                */
+               "movw (%1), %4w\n\t"
+               "movw -2(%1, %0), %5w\n\t"
+               "movw %4w, (%2)\n\t"
+               "movw %5w, -2(%2, %0)\n\t"
+               "jmp 13f\n\t"
+               "12:\n\t"
+               "cmp $1, %0\n\t"
+               "jb 13f\n\t"
+               /*
+                * Move data for 1 byte.
+                */
+               "movb (%1), %4b\n\t"
+               "movb %4b, (%2)\n\t"
+               "13:\n\t"
+               : "=&d" (d0), "=&S" (d1), "=&D" (d2), "=&a" (ret) ,
+                 "=r"(d3), "=r"(d4), "=r"(d5), "=r"(d6), "=&c" (d7)
+               :"0" (count),
+                "1" (src),
+                "2" (dest)
+               :"memory");
+
+               return ret;
+
 }
 EXPORT_SYMBOL(memmove);
index 4c4508e8a2043015c1cce3eb49a46c0c435e9297..79b0b372d2d033ca35a4bb83295a332c17bbb6c4 100644 (file)
@@ -229,7 +229,16 @@ void vmalloc_sync_all(void)
 
                spin_lock_irqsave(&pgd_lock, flags);
                list_for_each_entry(page, &pgd_list, lru) {
-                       if (!vmalloc_sync_one(page_address(page), address))
+                       spinlock_t *pgt_lock;
+                       pmd_t *ret;
+
+                       pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
+
+                       spin_lock(pgt_lock);
+                       ret = vmalloc_sync_one(page_address(page), address);
+                       spin_unlock(pgt_lock);
+
+                       if (!ret)
                                break;
                }
                spin_unlock_irqrestore(&pgd_lock, flags);
@@ -251,6 +260,8 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
        if (!(address >= VMALLOC_START && address < VMALLOC_END))
                return -1;
 
+       WARN_ON_ONCE(in_nmi());
+
        /*
         * Synchronize this task's top level page-table
         * with the 'reference' page table.
@@ -326,29 +337,7 @@ out:
 
 void vmalloc_sync_all(void)
 {
-       unsigned long address;
-
-       for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END;
-            address += PGDIR_SIZE) {
-
-               const pgd_t *pgd_ref = pgd_offset_k(address);
-               unsigned long flags;
-               struct page *page;
-
-               if (pgd_none(*pgd_ref))
-                       continue;
-
-               spin_lock_irqsave(&pgd_lock, flags);
-               list_for_each_entry(page, &pgd_list, lru) {
-                       pgd_t *pgd;
-                       pgd = (pgd_t *)page_address(page) + pgd_index(address);
-                       if (pgd_none(*pgd))
-                               set_pgd(pgd, *pgd_ref);
-                       else
-                               BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
-               }
-               spin_unlock_irqrestore(&pgd_lock, flags);
-       }
+       sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
 }
 
 /*
@@ -369,6 +358,8 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
        if (!(address >= VMALLOC_START && address < VMALLOC_END))
                return -1;
 
+       WARN_ON_ONCE(in_nmi());
+
        /*
         * Copy kernel mappings over when needed. This can also
         * happen within a race in page table update. In the later
@@ -894,8 +885,14 @@ spurious_fault(unsigned long error_code, unsigned long address)
        if (pmd_large(*pmd))
                return spurious_fault_check(error_code, (pte_t *) pmd);
 
+       /*
+        * Note: don't use pte_present() here, since it returns true
+        * if the _PAGE_PROTNONE bit is set.  However, this aliases the
+        * _PAGE_GLOBAL bit, which for kernel pages give false positives
+        * when CONFIG_DEBUG_PAGEALLOC is used.
+        */
        pte = pte_offset_kernel(pmd, address);
-       if (!pte_present(*pte))
+       if (!(pte_flags(*pte) & _PAGE_PRESENT))
                return 0;
 
        ret = spurious_fault_check(error_code, pte);
index bca79091b9d6158bcab97bdba2d8b767babeea46..558f2d33207636a54abaf0f024bf0bf942879044 100644 (file)
@@ -67,7 +67,7 @@ static __init void *alloc_low_page(void)
                panic("alloc_low_page: ran out of memory");
 
        adr = __va(pfn * PAGE_SIZE);
-       memset(adr, 0, PAGE_SIZE);
+       clear_page(adr);
        return adr;
 }
 
@@ -558,7 +558,7 @@ char swsusp_pg_dir[PAGE_SIZE]
 
 static inline void save_pg_dir(void)
 {
-       memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
+       copy_page(swsusp_pg_dir, swapper_pg_dir);
 }
 #else /* !CONFIG_ACPI_SLEEP */
 static inline void save_pg_dir(void)
index 9a6674689a20f8e491f0a0f845079de9febaac5f..c55f900fbf89253b5a568c50feadd08057c1dde9 100644 (file)
@@ -97,6 +97,43 @@ static int __init nonx32_setup(char *str)
 }
 __setup("noexec32=", nonx32_setup);
 
+/*
+ * When memory was added/removed make sure all the processes MM have
+ * suitable PGD entries in the local PGD level page.
+ */
+void sync_global_pgds(unsigned long start, unsigned long end)
+{
+       unsigned long address;
+
+       for (address = start; address <= end; address += PGDIR_SIZE) {
+               const pgd_t *pgd_ref = pgd_offset_k(address);
+               unsigned long flags;
+               struct page *page;
+
+               if (pgd_none(*pgd_ref))
+                       continue;
+
+               spin_lock_irqsave(&pgd_lock, flags);
+               list_for_each_entry(page, &pgd_list, lru) {
+                       pgd_t *pgd;
+                       spinlock_t *pgt_lock;
+
+                       pgd = (pgd_t *)page_address(page) + pgd_index(address);
+                       pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
+                       spin_lock(pgt_lock);
+
+                       if (pgd_none(*pgd))
+                               set_pgd(pgd, *pgd_ref);
+                       else
+                               BUG_ON(pgd_page_vaddr(*pgd)
+                                      != pgd_page_vaddr(*pgd_ref));
+
+                       spin_unlock(pgt_lock);
+               }
+               spin_unlock_irqrestore(&pgd_lock, flags);
+       }
+}
+
 /*
  * NOTE: This function is marked __ref because it calls __init function
  * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
@@ -293,7 +330,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
                panic("alloc_low_page: ran out of memory");
 
        adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
-       memset(adr, 0, PAGE_SIZE);
+       clear_page(adr);
        *phys  = pfn * PAGE_SIZE;
        return adr;
 }
@@ -534,11 +571,13 @@ kernel_physical_mapping_init(unsigned long start,
                             unsigned long end,
                             unsigned long page_size_mask)
 {
-
+       bool pgd_changed = false;
        unsigned long next, last_map_addr = end;
+       unsigned long addr;
 
        start = (unsigned long)__va(start);
        end = (unsigned long)__va(end);
+       addr = start;
 
        for (; start < end; start = next) {
                pgd_t *pgd = pgd_offset_k(start);
@@ -563,7 +602,12 @@ kernel_physical_mapping_init(unsigned long start,
                spin_lock(&init_mm.page_table_lock);
                pgd_populate(&init_mm, pgd, __va(pud_phys));
                spin_unlock(&init_mm.page_table_lock);
+               pgd_changed = true;
        }
+
+       if (pgd_changed)
+               sync_global_pgds(addr, end);
+
        __flush_tlb_all();
 
        return last_map_addr;
@@ -1003,6 +1047,7 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node)
                }
 
        }
+       sync_global_pgds((unsigned long)start_page, end);
        return 0;
 }
 
index 84e236ce76ba9a8afd624cfc4c506ebaa654b926..72fc70cf6184c756b1157f272b0d5e2b7bcc0609 100644 (file)
@@ -74,7 +74,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
 /*
  * Map 'pfn' using fixed map 'type' and protections 'prot'
  */
-void *
+void __iomem *
 iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
 {
        /*
@@ -86,12 +86,12 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
        if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
                prot = PAGE_KERNEL_UC_MINUS;
 
-       return kmap_atomic_prot_pfn(pfn, type, prot);
+       return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, type, prot);
 }
 EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
 
 void
-iounmap_atomic(void *kvaddr, enum km_type type)
+iounmap_atomic(void __iomem *kvaddr, enum km_type type)
 {
        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
        enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
index 970ed579d4e4e86c6265828335b35e35024cfc38..52d54bfc1ebb015df36ff91967ebf421eb8fe4ca 100644 (file)
@@ -22,7 +22,7 @@
 #include <asm/numa.h>
 #include <asm/mpspec.h>
 #include <asm/apic.h>
-#include <asm/k8.h>
+#include <asm/amd_nb.h>
 
 static struct bootnode __initdata nodes[8];
 static nodemask_t __initdata nodes_parsed = NODE_MASK_NONE;
@@ -54,8 +54,8 @@ static __init int find_northbridge(void)
 static __init void early_get_boot_cpu_id(void)
 {
        /*
-        * need to get boot_cpu_id so can use that to create apicid_to_node
-        * in k8_scan_nodes()
+        * need to get the APIC ID of the BSP so can use that to
+        * create apicid_to_node in k8_scan_nodes()
         */
 #ifdef CONFIG_X86_MPPARSE
        /*
@@ -212,7 +212,7 @@ int __init k8_scan_nodes(void)
        bits = boot_cpu_data.x86_coreid_bits;
        cores = (1<<bits);
        apicid_base = 0;
-       /* need to get boot_cpu_id early for system with apicid lifting */
+       /* get the APIC ID of the BSP early for systems with apicid lifting */
        early_get_boot_cpu_id();
        if (boot_cpu_physical_apicid > 0) {
                pr_info("BSP APIC ID: %02x\n", boot_cpu_physical_apicid);
index b3b531a4f8e587e3560b2087e9c483b2cb1b0f93..d87dd6d042d64309fedac698dd1a5b28a8448594 100644 (file)
@@ -631,6 +631,8 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
        if (!pte)
                return false;
 
+       WARN_ON_ONCE(in_nmi());
+
        if (error_code & 2)
                kmemcheck_access(regs, address, KMEMCHECK_WRITE);
        else
index 63c19e27aa6f115badc42f0df6a33ffb1610e6c0..324aa3f072379a46c1f21668680d3b83b9cf10cc 100644 (file)
@@ -9,7 +9,7 @@ static bool opcode_is_prefix(uint8_t b)
                b == 0xf0 || b == 0xf2 || b == 0xf3
                /* Group 2 */
                || b == 0x2e || b == 0x36 || b == 0x3e || b == 0x26
-               || b == 0x64 || b == 0x65 || b == 0x2e || b == 0x3e
+               || b == 0x64 || b == 0x65
                /* Group 3 */
                || b == 0x66
                /* Group 4 */
index a7bcc23ef96c989f5fef986cbd7816dfd551d20e..4962f1aeda6f8f76def655121632bc0b68e3e9f2 100644 (file)
@@ -18,7 +18,7 @@
 #include <asm/dma.h>
 #include <asm/numa.h>
 #include <asm/acpi.h>
-#include <asm/k8.h>
+#include <asm/amd_nb.h>
 
 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
 EXPORT_SYMBOL(node_data);
index 5c4ee422590e5dc23aec0071e642bf246b627565..8be8c7d7bc89759a55059ea440af5b26e3d9e0c9 100644 (file)
@@ -87,7 +87,19 @@ static inline void pgd_list_del(pgd_t *pgd)
 #define UNSHARED_PTRS_PER_PGD                          \
        (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
 
-static void pgd_ctor(pgd_t *pgd)
+
+static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
+{
+       BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
+       virt_to_page(pgd)->index = (pgoff_t)mm;
+}
+
+struct mm_struct *pgd_page_get_mm(struct page *page)
+{
+       return (struct mm_struct *)page->index;
+}
+
+static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
 {
        /* If the pgd points to a shared pagetable level (either the
           ptes in non-PAE, or shared PMD in PAE), then just copy the
@@ -98,15 +110,13 @@ static void pgd_ctor(pgd_t *pgd)
                clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
                                swapper_pg_dir + KERNEL_PGD_BOUNDARY,
                                KERNEL_PGD_PTRS);
-               paravirt_alloc_pmd_clone(__pa(pgd) >> PAGE_SHIFT,
-                                        __pa(swapper_pg_dir) >> PAGE_SHIFT,
-                                        KERNEL_PGD_BOUNDARY,
-                                        KERNEL_PGD_PTRS);
        }
 
        /* list required to sync kernel mapping updates */
-       if (!SHARED_KERNEL_PMD)
+       if (!SHARED_KERNEL_PMD) {
+               pgd_set_mm(pgd, mm);
                pgd_list_add(pgd);
+       }
 }
 
 static void pgd_dtor(pgd_t *pgd)
@@ -272,7 +282,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
         */
        spin_lock_irqsave(&pgd_lock, flags);
 
-       pgd_ctor(pgd);
+       pgd_ctor(mm, pgd);
        pgd_prepopulate_pmd(mm, pgd, pmds);
 
        spin_unlock_irqrestore(&pgd_lock, flags);
index f9897f7a9ef1e25cfa81e9190a3449bec9e35f70..9c0d0d399c307678a09668155d9067139ab30416 100644 (file)
@@ -420,9 +420,11 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
                return -1;
        }
 
-       for_each_node_mask(i, nodes_parsed)
-               e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
-                                               nodes[i].end >> PAGE_SHIFT);
+       for (i = 0; i < num_node_memblks; i++)
+               e820_register_active_regions(memblk_nodeid[i],
+                               node_memblk_range[i].start >> PAGE_SHIFT,
+                               node_memblk_range[i].end >> PAGE_SHIFT);
+
        /* for out of order entries in SRAT */
        sort_node_map();
        if (!nodes_cover_memory(nodes)) {
index c03f14ab666742d6960ff3339ebcfe28a003308b..49358481c733235918cde7c576a3332fba50c364 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/smp.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
+#include <linux/cpu.h>
 
 #include <asm/tlbflush.h>
 #include <asm/mmu_context.h>
@@ -52,6 +53,8 @@ union smp_flush_state {
    want false sharing in the per cpu data segment. */
 static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS];
 
+static DEFINE_PER_CPU_READ_MOSTLY(int, tlb_vector_offset);
+
 /*
  * We cannot call mmdrop() because we are in interrupt context,
  * instead update mm->cpu_vm_mask.
@@ -173,7 +176,7 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask,
        union smp_flush_state *f;
 
        /* Caller has disabled preemption */
-       sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
+       sender = this_cpu_read(tlb_vector_offset);
        f = &flush_state[sender];
 
        /*
@@ -218,6 +221,47 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
        flush_tlb_others_ipi(cpumask, mm, va);
 }
 
+static void __cpuinit calculate_tlb_offset(void)
+{
+       int cpu, node, nr_node_vecs;
+       /*
+        * we are changing tlb_vector_offset for each CPU in runtime, but this
+        * will not cause inconsistency, as the write is atomic under X86. we
+        * might see more lock contentions in a short time, but after all CPU's
+        * tlb_vector_offset are changed, everything should go normal
+        *
+        * Note: if NUM_INVALIDATE_TLB_VECTORS % nr_online_nodes !=0, we might
+        * waste some vectors.
+        **/
+       if (nr_online_nodes > NUM_INVALIDATE_TLB_VECTORS)
+               nr_node_vecs = 1;
+       else
+               nr_node_vecs = NUM_INVALIDATE_TLB_VECTORS/nr_online_nodes;
+
+       for_each_online_node(node) {
+               int node_offset = (node % NUM_INVALIDATE_TLB_VECTORS) *
+                       nr_node_vecs;
+               int cpu_offset = 0;
+               for_each_cpu(cpu, cpumask_of_node(node)) {
+                       per_cpu(tlb_vector_offset, cpu) = node_offset +
+                               cpu_offset;
+                       cpu_offset++;
+                       cpu_offset = cpu_offset % nr_node_vecs;
+               }
+       }
+}
+
+static int tlb_cpuhp_notify(struct notifier_block *n,
+               unsigned long action, void *hcpu)
+{
+       switch (action & 0xf) {
+       case CPU_ONLINE:
+       case CPU_DEAD:
+               calculate_tlb_offset();
+       }
+       return NOTIFY_OK;
+}
+
 static int __cpuinit init_smp_flush(void)
 {
        int i;
@@ -225,6 +269,8 @@ static int __cpuinit init_smp_flush(void)
        for (i = 0; i < ARRAY_SIZE(flush_state); i++)
                raw_spin_lock_init(&flush_state[i].tlbstate_lock);
 
+       calculate_tlb_offset();
+       hotcpu_notifier(tlb_cpuhp_notify, 0);
        return 0;
 }
 core_initcall(init_smp_flush);
index 3855096c59b81910fe827c76151e84bbac836770..2d49d4e19a3619c0be2c7d17a892b8aea582048f 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/ptrace.h>
 #include <asm/uaccess.h>
 #include <asm/stacktrace.h>
+#include <linux/compat.h>
 
 static void backtrace_warning_symbol(void *data, char *msg,
                                     unsigned long symbol)
@@ -48,14 +49,12 @@ static struct stacktrace_ops backtrace_ops = {
        .walk_stack     = print_context_stack,
 };
 
-struct frame_head {
-       struct frame_head *bp;
-       unsigned long ret;
-} __attribute__((packed));
-
-static struct frame_head *dump_user_backtrace(struct frame_head *head)
+#ifdef CONFIG_COMPAT
+static struct stack_frame_ia32 *
+dump_user_backtrace_32(struct stack_frame_ia32 *head)
 {
-       struct frame_head bufhead[2];
+       struct stack_frame_ia32 bufhead[2];
+       struct stack_frame_ia32 *fp;
 
        /* Also check accessibility of one struct frame_head beyond */
        if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
@@ -63,20 +62,66 @@ static struct frame_head *dump_user_backtrace(struct frame_head *head)
        if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
                return NULL;
 
-       oprofile_add_trace(bufhead[0].ret);
+       fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
+
+       oprofile_add_trace(bufhead[0].return_address);
+
+       /* frame pointers should strictly progress back up the stack
+       * (towards higher addresses) */
+       if (head >= fp)
+               return NULL;
+
+       return fp;
+}
+
+static inline int
+x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
+{
+       struct stack_frame_ia32 *head;
+
+       /* User process is 32-bit */
+       if (!current || !test_thread_flag(TIF_IA32))
+               return 0;
+
+       head = (struct stack_frame_ia32 *) regs->bp;
+       while (depth-- && head)
+               head = dump_user_backtrace_32(head);
+
+       return 1;
+}
+
+#else
+static inline int
+x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
+{
+       return 0;
+}
+#endif /* CONFIG_COMPAT */
+
+static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
+{
+       struct stack_frame bufhead[2];
+
+       /* Also check accessibility of one struct stack_frame beyond */
+       if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
+               return NULL;
+       if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
+               return NULL;
+
+       oprofile_add_trace(bufhead[0].return_address);
 
        /* frame pointers should strictly progress back up the stack
         * (towards higher addresses) */
-       if (head >= bufhead[0].bp)
+       if (head >= bufhead[0].next_frame)
                return NULL;
 
-       return bufhead[0].bp;
+       return bufhead[0].next_frame;
 }
 
 void
 x86_backtrace(struct pt_regs * const regs, unsigned int depth)
 {
-       struct frame_head *head = (struct frame_head *)frame_pointer(regs);
+       struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
 
        if (!user_mode_vm(regs)) {
                unsigned long stack = kernel_stack_pointer(regs);
@@ -86,6 +131,9 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
                return;
        }
 
+       if (x86_backtrace_32(regs, depth))
+               return;
+
        while (depth-- && head)
                head = dump_user_backtrace(head);
 }
index f6b48f6c595176a59c8e2fe5fa145bc11acca118..bd1489c3ce09b7416c400701f7fb407eae673615 100644 (file)
@@ -568,8 +568,13 @@ static int __init init_sysfs(void)
        int error;
 
        error = sysdev_class_register(&oprofile_sysclass);
-       if (!error)
-               error = sysdev_register(&device_oprofile);
+       if (error)
+               return error;
+
+       error = sysdev_register(&device_oprofile);
+       if (error)
+               sysdev_class_unregister(&oprofile_sysclass);
+
        return error;
 }
 
@@ -580,8 +585,10 @@ static void exit_sysfs(void)
 }
 
 #else
-#define init_sysfs() do { } while (0)
-#define exit_sysfs() do { } while (0)
+
+static inline int  init_sysfs(void) { return 0; }
+static inline void exit_sysfs(void) { }
+
 #endif /* CONFIG_PM */
 
 static int __init p4_init(char **cpu_type)
@@ -664,7 +671,10 @@ static int __init ppro_init(char **cpu_type)
        case 14:
                *cpu_type = "i386/core";
                break;
-       case 15: case 23:
+       case 0x0f:
+       case 0x16:
+       case 0x17:
+       case 0x1d:
                *cpu_type = "i386/core_2";
                break;
        case 0x1a:
@@ -685,9 +695,6 @@ static int __init ppro_init(char **cpu_type)
        return 1;
 }
 
-/* in order to get sysfs right */
-static int using_nmi;
-
 int __init op_nmi_init(struct oprofile_operations *ops)
 {
        __u8 vendor = boot_cpu_data.x86_vendor;
@@ -774,14 +781,15 @@ int __init op_nmi_init(struct oprofile_operations *ops)
 
        mux_init(ops);
 
-       init_sysfs();
-       using_nmi = 1;
+       ret = init_sysfs();
+       if (ret)
+               return ret;
+
        printk(KERN_INFO "oprofile: using NMI interrupt.\n");
        return 0;
 }
 
 void op_nmi_exit(void)
 {
-       if (using_nmi)
-               exit_sysfs();
+       exit_sysfs();
 }
index b34815408f582bd002854d83c8e5736b6de37b24..13700ec8e2e43587010594fe4e361ac88d29cb85 100644 (file)
@@ -304,7 +304,7 @@ static struct pci_raw_ops pci_olpc_conf = {
 
 int __init pci_olpc_init(void)
 {
-       printk(KERN_INFO "PCI: Using configuration type OLPC\n");
+       printk(KERN_INFO "PCI: Using configuration type OLPC XO-1\n");
        raw_pci_ops = &pci_olpc_conf;
        is_lx = is_geode_lx();
        return 0;
index 42086ac406af21da6d281687625684c0dddbe11c..b2363fcbcd0f71e55d8aa88433cffdd93314d24a 100644 (file)
@@ -1969,7 +1969,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
        .alloc_pte = xen_alloc_pte_init,
        .release_pte = xen_release_pte_init,
        .alloc_pmd = xen_alloc_pmd_init,
-       .alloc_pmd_clone = paravirt_nop,
        .release_pmd = xen_release_pmd_init,
 
 #ifdef CONFIG_X86_64
index 1a5353a753fcd10e1330c03d1af43a6fe9c5a5b7..b2bb5aa3b0540e42847a7664aa529cf5d2c0fb83 100644 (file)
@@ -489,8 +489,9 @@ static void xen_hvm_setup_cpu_clockevents(void)
 __init void xen_hvm_init_time_ops(void)
 {
        /* vector callback is needed otherwise we cannot receive interrupts
-        * on cpu > 0 */
-       if (!xen_have_vector_callback && num_present_cpus() > 1)
+        * on cpu > 0 and at this point we don't know how many cpus are
+        * available */
+       if (!xen_have_vector_callback)
                return;
        if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
                printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
index a6809645d212d9cf970b85473e3d9c05bc4ba652..2fef1ef931a06756d364707c28d97e6c440aa484 100644 (file)
@@ -966,7 +966,7 @@ blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
 
        /* Currently we do not support hierarchy deeper than two level (0,1) */
        if (parent != cgroup->top_cgroup)
-               return ERR_PTR(-EINVAL);
+               return ERR_PTR(-EPERM);
 
        blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
        if (!blkcg)
index ee1a1e7e63ccfc3e735566e3725febee1890bb2e..32a1c123dfb36a5fff24857221a9a4d1b79131bf 100644 (file)
@@ -1198,9 +1198,9 @@ static int __make_request(struct request_queue *q, struct bio *bio)
        int el_ret;
        unsigned int bytes = bio->bi_size;
        const unsigned short prio = bio_prio(bio);
-       const bool sync = (bio->bi_rw & REQ_SYNC);
-       const bool unplug = (bio->bi_rw & REQ_UNPLUG);
-       const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
+       const bool sync = !!(bio->bi_rw & REQ_SYNC);
+       const bool unplug = !!(bio->bi_rw & REQ_UNPLUG);
+       const unsigned long ff = bio->bi_rw & REQ_FAILFAST_MASK;
        int rw_flags;
 
        if ((bio->bi_rw & REQ_HARDBARRIER) &&
index c65d7593f7f1deba5511347ceeb6dd5c881e5b3c..ade0a08c9099afdc487916924c0fe6718185536d 100644 (file)
@@ -307,7 +307,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
                return PTR_ERR(bio);
 
        if (rq_data_dir(rq) == WRITE)
-               bio->bi_rw |= (1 << REQ_WRITE);
+               bio->bi_rw |= REQ_WRITE;
 
        if (do_copy)
                rq->cmd_flags |= REQ_COPY_USER;
index 3b0cd4249671d9826b42ea10473a11486c403368..eafc94f68d79f2ceb1c0a89fce8e449c3a5ff3f6 100644 (file)
@@ -361,6 +361,18 @@ static int attempt_merge(struct request_queue *q, struct request *req,
        if (!rq_mergeable(req) || !rq_mergeable(next))
                return 0;
 
+       /*
+        * Don't merge file system requests and discard requests
+        */
+       if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD))
+               return 0;
+
+       /*
+        * Don't merge discard requests and secure discard requests
+        */
+       if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE))
+               return 0;
+
        /*
         * not contiguous
         */
index 001ab18078f5ba1b8c6f34e021cc02ef401d98b6..0749b89c68852fc824e4afa862d871814c56ddd8 100644 (file)
@@ -511,6 +511,7 @@ int blk_register_queue(struct gendisk *disk)
                kobject_uevent(&q->kobj, KOBJ_REMOVE);
                kobject_del(&q->kobj);
                blk_trace_remove_sysfs(disk_to_dev(disk));
+               kobject_put(&dev->kobj);
                return ret;
        }
 
index 6e7dc87141e48230d0eb82c2bdcf770b0ac581a9..d6b911ac002cd9ef14c76535a6b3b6814243079b 100644 (file)
@@ -142,14 +142,18 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
 
 static inline int blk_cpu_to_group(int cpu)
 {
+       int group = NR_CPUS;
 #ifdef CONFIG_SCHED_MC
        const struct cpumask *mask = cpu_coregroup_mask(cpu);
-       return cpumask_first(mask);
+       group = cpumask_first(mask);
 #elif defined(CONFIG_SCHED_SMT)
-       return cpumask_first(topology_thread_cpumask(cpu));
+       group = cpumask_first(topology_thread_cpumask(cpu));
 #else
        return cpu;
 #endif
+       if (likely(group < NR_CPUS))
+               return group;
+       return cpu;
 }
 
 /*
index 82d58829ba591eeef13a3ed4eca96b8a019c4d85..0c00870553a3fca2c58c325b780b92689fc677a6 100644 (file)
@@ -426,7 +426,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
        /*
         * fill in all the output members
         */
-       hdr->device_status = status_byte(rq->errors);
+       hdr->device_status = rq->errors & 0xff;
        hdr->transport_status = host_byte(rq->errors);
        hdr->driver_status = driver_byte(rq->errors);
        hdr->info = 0;
index eb4086f7dfef9eb7efc6d202fb7a979919a551b8..9eba291eb6fd23854aee14d8a35b5b1108fc0629 100644 (file)
@@ -30,6 +30,7 @@ static const int cfq_slice_sync = HZ / 10;
 static int cfq_slice_async = HZ / 25;
 static const int cfq_slice_async_rq = 2;
 static int cfq_slice_idle = HZ / 125;
+static int cfq_group_idle = HZ / 125;
 static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
 static const int cfq_hist_divisor = 4;
 
@@ -147,6 +148,8 @@ struct cfq_queue {
        struct cfq_queue *new_cfqq;
        struct cfq_group *cfqg;
        struct cfq_group *orig_cfqg;
+       /* Number of sectors dispatched from queue in single dispatch round */
+       unsigned long nr_sectors;
 };
 
 /*
@@ -198,6 +201,8 @@ struct cfq_group {
        struct hlist_node cfqd_node;
        atomic_t ref;
 #endif
+       /* number of requests that are on the dispatch list or inside driver */
+       int dispatched;
 };
 
 /*
@@ -271,6 +276,7 @@ struct cfq_data {
        unsigned int cfq_slice[2];
        unsigned int cfq_slice_async_rq;
        unsigned int cfq_slice_idle;
+       unsigned int cfq_group_idle;
        unsigned int cfq_latency;
        unsigned int cfq_group_isolation;
 
@@ -378,6 +384,21 @@ CFQ_CFQQ_FNS(wait_busy);
                        &cfqg->service_trees[i][j]: NULL) \
 
 
+static inline bool iops_mode(struct cfq_data *cfqd)
+{
+       /*
+        * If we are not idling on queues and it is a NCQ drive, parallel
+        * execution of requests is on and measuring time is not possible
+        * in most of the cases until and unless we drive shallower queue
+        * depths and that becomes a performance bottleneck. In such cases
+        * switch to start providing fairness in terms of number of IOs.
+        */
+       if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
+               return true;
+       else
+               return false;
+}
+
 static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
 {
        if (cfq_class_idle(cfqq))
@@ -906,7 +927,6 @@ static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
                        slice_used = cfqq->allocated_slice;
        }
 
-       cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u", slice_used);
        return slice_used;
 }
 
@@ -914,19 +934,21 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
                                struct cfq_queue *cfqq)
 {
        struct cfq_rb_root *st = &cfqd->grp_service_tree;
-       unsigned int used_sl, charge_sl;
+       unsigned int used_sl, charge;
        int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
                        - cfqg->service_tree_idle.count;
 
        BUG_ON(nr_sync < 0);
-       used_sl = charge_sl = cfq_cfqq_slice_usage(cfqq);
+       used_sl = charge = cfq_cfqq_slice_usage(cfqq);
 
-       if (!cfq_cfqq_sync(cfqq) && !nr_sync)
-               charge_sl = cfqq->allocated_slice;
+       if (iops_mode(cfqd))
+               charge = cfqq->slice_dispatch;
+       else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
+               charge = cfqq->allocated_slice;
 
        /* Can't update vdisktime while group is on service tree */
        cfq_rb_erase(&cfqg->rb_node, st);
-       cfqg->vdisktime += cfq_scale_slice(charge_sl, cfqg);
+       cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
        __cfq_group_service_tree_add(st, cfqg);
 
        /* This group is being expired. Save the context */
@@ -940,6 +962,9 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
 
        cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
                                        st->min_vdisktime);
+       cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u"
+                       " sect=%u", used_sl, cfqq->slice_dispatch, charge,
+                       iops_mode(cfqd), cfqq->nr_sectors);
        cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
        cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
 }
@@ -994,10 +1019,20 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
         */
        atomic_set(&cfqg->ref, 1);
 
-       /* Add group onto cgroup list */
-       sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
-       cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
+       /*
+        * Add group onto cgroup list. It might happen that bdi->dev is
+        * not initiliazed yet. Initialize this new group without major
+        * and minor info and this info will be filled in once a new thread
+        * comes for IO. See code above.
+        */
+       if (bdi->dev) {
+               sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
+               cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
                                        MKDEV(major, minor));
+       } else
+               cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
+                                       0);
+
        cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
 
        /* Add group on cfqd list */
@@ -1587,6 +1622,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
                cfqq->allocated_slice = 0;
                cfqq->slice_end = 0;
                cfqq->slice_dispatch = 0;
+               cfqq->nr_sectors = 0;
 
                cfq_clear_cfqq_wait_request(cfqq);
                cfq_clear_cfqq_must_dispatch(cfqq);
@@ -1839,6 +1875,9 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
        BUG_ON(!service_tree);
        BUG_ON(!service_tree->count);
 
+       if (!cfqd->cfq_slice_idle)
+               return false;
+
        /* We never do for idle class queues. */
        if (prio == IDLE_WORKLOAD)
                return false;
@@ -1863,7 +1902,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
 {
        struct cfq_queue *cfqq = cfqd->active_queue;
        struct cfq_io_context *cic;
-       unsigned long sl;
+       unsigned long sl, group_idle = 0;
 
        /*
         * SSD device without seek penalty, disable idling. But only do so
@@ -1879,8 +1918,13 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
        /*
         * idle is disabled, either manually or by past process history
         */
-       if (!cfqd->cfq_slice_idle || !cfq_should_idle(cfqd, cfqq))
-               return;
+       if (!cfq_should_idle(cfqd, cfqq)) {
+               /* no queue idling. Check for group idling */
+               if (cfqd->cfq_group_idle)
+                       group_idle = cfqd->cfq_group_idle;
+               else
+                       return;
+       }
 
        /*
         * still active requests from this queue, don't idle
@@ -1907,13 +1951,21 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
                return;
        }
 
+       /* There are other queues in the group, don't do group idle */
+       if (group_idle && cfqq->cfqg->nr_cfqq > 1)
+               return;
+
        cfq_mark_cfqq_wait_request(cfqq);
 
-       sl = cfqd->cfq_slice_idle;
+       if (group_idle)
+               sl = cfqd->cfq_group_idle;
+       else
+               sl = cfqd->cfq_slice_idle;
 
        mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
        cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
-       cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
+       cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
+                       group_idle ? 1 : 0);
 }
 
 /*
@@ -1929,9 +1981,11 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
        cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
        cfq_remove_request(rq);
        cfqq->dispatched++;
+       (RQ_CFQG(rq))->dispatched++;
        elv_dispatch_sort(q, rq);
 
        cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
+       cfqq->nr_sectors += blk_rq_sectors(rq);
        cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
                                        rq_data_dir(rq), rq_is_sync(rq));
 }
@@ -2198,7 +2252,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
                        cfqq = NULL;
                        goto keep_queue;
                } else
-                       goto expire;
+                       goto check_group_idle;
        }
 
        /*
@@ -2226,8 +2280,23 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
         * flight or is idling for a new request, allow either of these
         * conditions to happen (or time out) before selecting a new queue.
         */
-       if (timer_pending(&cfqd->idle_slice_timer) ||
-           (cfqq->dispatched && cfq_should_idle(cfqd, cfqq))) {
+       if (timer_pending(&cfqd->idle_slice_timer)) {
+               cfqq = NULL;
+               goto keep_queue;
+       }
+
+       if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
+               cfqq = NULL;
+               goto keep_queue;
+       }
+
+       /*
+        * If group idle is enabled and there are requests dispatched from
+        * this group, wait for requests to complete.
+        */
+check_group_idle:
+       if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1
+           && cfqq->cfqg->dispatched) {
                cfqq = NULL;
                goto keep_queue;
        }
@@ -3375,6 +3444,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
        WARN_ON(!cfqq->dispatched);
        cfqd->rq_in_driver--;
        cfqq->dispatched--;
+       (RQ_CFQG(rq))->dispatched--;
        cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
                        rq_start_time_ns(rq), rq_io_start_time_ns(rq),
                        rq_data_dir(rq), rq_is_sync(rq));
@@ -3404,7 +3474,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
                 * the queue.
                 */
                if (cfq_should_wait_busy(cfqd, cfqq)) {
-                       cfqq->slice_end = jiffies + cfqd->cfq_slice_idle;
+                       unsigned long extend_sl = cfqd->cfq_slice_idle;
+                       if (!cfqd->cfq_slice_idle)
+                               extend_sl = cfqd->cfq_group_idle;
+                       cfqq->slice_end = jiffies + extend_sl;
                        cfq_mark_cfqq_wait_busy(cfqq);
                        cfq_log_cfqq(cfqd, cfqq, "will busy wait");
                }
@@ -3850,6 +3923,7 @@ static void *cfq_init_queue(struct request_queue *q)
        cfqd->cfq_slice[1] = cfq_slice_sync;
        cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
        cfqd->cfq_slice_idle = cfq_slice_idle;
+       cfqd->cfq_group_idle = cfq_group_idle;
        cfqd->cfq_latency = 1;
        cfqd->cfq_group_isolation = 0;
        cfqd->hw_tag = -1;
@@ -3922,6 +3996,7 @@ SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
+SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
@@ -3954,6 +4029,7 @@ STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
                UINT_MAX, 0);
 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
+STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
@@ -3975,6 +4051,7 @@ static struct elv_fs_entry cfq_attrs[] = {
        CFQ_ATTR(slice_async),
        CFQ_ATTR(slice_async_rq),
        CFQ_ATTR(slice_idle),
+       CFQ_ATTR(group_idle),
        CFQ_ATTR(low_latency),
        CFQ_ATTR(group_isolation),
        __ATTR_NULL
@@ -4028,6 +4105,12 @@ static int __init cfq_init(void)
        if (!cfq_slice_idle)
                cfq_slice_idle = 1;
 
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+       if (!cfq_group_idle)
+               cfq_group_idle = 1;
+#else
+               cfq_group_idle = 0;
+#endif
        if (cfq_slab_setup())
                return -ENOMEM;
 
index ec585c9554d33c04b973537f9ac3216e332afa50..4e11559aa2b02d78c4d90c104e81e7a2e84aa05d 100644 (file)
@@ -938,6 +938,7 @@ int elv_register_queue(struct request_queue *q)
                        }
                }
                kobject_uevent(&e->kobj, KOBJ_ADD);
+               e->registered = 1;
        }
        return error;
 }
@@ -947,6 +948,7 @@ static void __elv_unregister_queue(struct elevator_queue *e)
 {
        kobject_uevent(&e->kobj, KOBJ_REMOVE);
        kobject_del(&e->kobj);
+       e->registered = 0;
 }
 
 void elv_unregister_queue(struct request_queue *q)
@@ -1009,18 +1011,19 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
 {
        struct elevator_queue *old_elevator, *e;
        void *data;
+       int err;
 
        /*
         * Allocate new elevator
         */
        e = elevator_alloc(q, new_e);
        if (!e)
-               return 0;
+               return -ENOMEM;
 
        data = elevator_init_queue(q, e);
        if (!data) {
                kobject_put(&e->kobj);
-               return 0;
+               return -ENOMEM;
        }
 
        /*
@@ -1041,10 +1044,13 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
 
        spin_unlock_irq(q->queue_lock);
 
-       __elv_unregister_queue(old_elevator);
+       if (old_elevator->registered) {
+               __elv_unregister_queue(old_elevator);
 
-       if (elv_register_queue(q))
-               goto fail_register;
+               err = elv_register_queue(q);
+               if (err)
+                       goto fail_register;
+       }
 
        /*
         * finally exit old elevator and turn off BYPASS.
@@ -1056,7 +1062,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
 
        blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
 
-       return 1;
+       return 0;
 
 fail_register:
        /*
@@ -1071,17 +1077,19 @@ fail_register:
        queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
        spin_unlock_irq(q->queue_lock);
 
-       return 0;
+       return err;
 }
 
-ssize_t elv_iosched_store(struct request_queue *q, const char *name,
-                         size_t count)
+/*
+ * Switch this queue to the given IO scheduler.
+ */
+int elevator_change(struct request_queue *q, const char *name)
 {
        char elevator_name[ELV_NAME_MAX];
        struct elevator_type *e;
 
        if (!q->elevator)
-               return count;
+               return -ENXIO;
 
        strlcpy(elevator_name, name, sizeof(elevator_name));
        e = elevator_get(strstrip(elevator_name));
@@ -1092,13 +1100,27 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
 
        if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
                elevator_put(e);
-               return count;
+               return 0;
        }
 
-       if (!elevator_switch(q, e))
-               printk(KERN_ERR "elevator: switch to %s failed\n",
-                                                       elevator_name);
-       return count;
+       return elevator_switch(q, e);
+}
+EXPORT_SYMBOL(elevator_change);
+
+ssize_t elv_iosched_store(struct request_queue *q, const char *name,
+                         size_t count)
+{
+       int ret;
+
+       if (!q->elevator)
+               return count;
+
+       ret = elevator_change(q, name);
+       if (!ret)
+               return count;
+
+       printk(KERN_ERR "elevator: switch to %s failed\n", name);
+       return ret;
 }
 
 ssize_t elv_iosched_show(struct request_queue *q, char *name)
index ae473445ad6daf11254ee269c3194c9ed03e0cb1..a2aea53a75ed9fdf99058f9676429c84104c2f27 100644 (file)
@@ -50,7 +50,7 @@ obj-$(CONFIG_SPI)             += spi/
 obj-y                          += net/
 obj-$(CONFIG_ATM)              += atm/
 obj-$(CONFIG_FUSION)           += message/
-obj-$(CONFIG_FIREWIRE)         += firewire/
+obj-y                          += firewire/
 obj-y                          += ieee1394/
 obj-$(CONFIG_UIO)              += uio/
 obj-y                          += cdrom/
index b811f2173f6f167c84700040fe2720bafb43156f..88681aca88c581891399e18b056d9a6df94a9e01 100644 (file)
@@ -105,7 +105,7 @@ config ACPI_EC_DEBUGFS
 
          Be aware that using this interface can confuse your Embedded
          Controller in a way that a normal reboot is not enough. You then
-         have to power of your system, and remove the laptop battery for
+         have to power off your system, and remove the laptop battery for
          some seconds.
          An Embedded Controller typically is available on laptops and reads
          sensor values like battery state and temperature.
index b76848c80be34729c56bd20d04f75f9f534cbb36..6afceb3d4034edef3a868722e41af82873057e74 100644 (file)
 #include <linux/slab.h>
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_drivers.h>
+#include <asm/mwait.h>
 
 #define ACPI_PROCESSOR_AGGREGATOR_CLASS        "acpi_pad"
 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
 static DEFINE_MUTEX(isolated_cpus_lock);
 
-#define MWAIT_SUBSTATE_MASK    (0xf)
-#define MWAIT_CSTATE_MASK      (0xf)
-#define MWAIT_SUBSTATE_SIZE    (4)
-#define CPUID_MWAIT_LEAF (5)
-#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
-#define CPUID5_ECX_INTERRUPT_BREAK     (0x2)
 static unsigned long power_saving_mwait_eax;
 
 static unsigned char tsc_detected_unstable;
@@ -382,31 +377,32 @@ static void acpi_pad_remove_sysfs(struct acpi_device *device)
        device_remove_file(&device->dev, &dev_attr_rrtime);
 }
 
-/* Query firmware how many CPUs should be idle */
-static int acpi_pad_pur(acpi_handle handle, int *num_cpus)
+/*
+ * Query firmware how many CPUs should be idle
+ * return -1 on failure
+ */
+static int acpi_pad_pur(acpi_handle handle)
 {
        struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
        union acpi_object *package;
-       int rev, num, ret = -EINVAL;
+       int num = -1;
 
        if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer)))
-               return -EINVAL;
+               return num;
 
        if (!buffer.length || !buffer.pointer)
-               return -EINVAL;
+               return num;
 
        package = buffer.pointer;
-       if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2)
-               goto out;
-       rev = package->package.elements[0].integer.value;
-       num = package->package.elements[1].integer.value;
-       if (rev != 1 || num < 0)
-               goto out;
-       *num_cpus = num;
-       ret = 0;
-out:
+
+       if (package->type == ACPI_TYPE_PACKAGE &&
+               package->package.count == 2 &&
+               package->package.elements[0].integer.value == 1) /* rev 1 */
+
+               num = package->package.elements[1].integer.value;
+
        kfree(buffer.pointer);
-       return ret;
+       return num;
 }
 
 /* Notify firmware how many CPUs are idle */
@@ -433,7 +429,8 @@ static void acpi_pad_handle_notify(acpi_handle handle)
        uint32_t idle_cpus;
 
        mutex_lock(&isolated_cpus_lock);
-       if (acpi_pad_pur(handle, &num_cpus)) {
+       num_cpus = acpi_pad_pur(handle);
+       if (num_cpus < 0) {
                mutex_unlock(&isolated_cpus_lock);
                return;
        }
index df85b53a674fc33105fa1434b3800db7ab26a423..7dad9160f20998112cc302d0a239c9fa27fcd429 100644 (file)
@@ -854,6 +854,7 @@ struct acpi_bit_register_info {
        ACPI_BITMASK_POWER_BUTTON_STATUS   | \
        ACPI_BITMASK_SLEEP_BUTTON_STATUS   | \
        ACPI_BITMASK_RT_CLOCK_STATUS       | \
+       ACPI_BITMASK_PCIEXP_WAKE_DISABLE   | \
        ACPI_BITMASK_WAKE_STATUS)
 
 #define ACPI_BITMASK_TIMER_ENABLE               0x0001
index 74c24d517f81768a6a00418db71c9f93be335b2e..4093522eed45692012b5da617c57187ef20c4458 100644 (file)
@@ -109,7 +109,7 @@ void acpi_ex_enter_interpreter(void)
  *
  * DESCRIPTION: Reacquire the interpreter execution region from within the
  *              interpreter code. Failure to enter the interpreter region is a
- *              fatal system error. Used in  conjuction with
+ *              fatal system error. Used in  conjunction with
  *              relinquish_interpreter
  *
  ******************************************************************************/
index 22cfcfbd9fff77cd4cd5bb77be80c244461b2df5..491191e6cf692bffe1811a22674a0eb0720c3771 100644 (file)
@@ -149,7 +149,7 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
 
                        /*
                         * 16-, 32-, and 64-bit cases must use the move macros that perform
-                        * endian conversion and/or accomodate hardware that cannot perform
+                        * endian conversion and/or accommodate hardware that cannot perform
                         * misaligned memory transfers
                         */
                case ACPI_RSC_MOVE16:
index 907e350f1c7df58370cb0e68a399b91d903ec466..fca34ccfd294a782e3f05312d99d5b5fdef5bfb4 100644 (file)
@@ -34,6 +34,6 @@ config ACPI_APEI_ERST_DEBUG
        depends on ACPI_APEI
        help
          ERST is a way provided by APEI to save and retrieve hardware
-         error infomation to and from a persistent store. Enable this
+         error information to and from a persistent store. Enable this
          if you want to debugging and testing the ERST kernel support
          and firmware implementation.
index 73fd0c7487c1ae0b1311f4d7e1c048ddc809153a..4a904a4bf05f83a1b952ec2365811f8c5db931e1 100644 (file)
@@ -445,11 +445,15 @@ EXPORT_SYMBOL_GPL(apei_resources_sub);
 int apei_resources_request(struct apei_resources *resources,
                           const char *desc)
 {
-       struct apei_res *res, *res_bak;
+       struct apei_res *res, *res_bak = NULL;
        struct resource *r;
+       int rc;
 
-       apei_resources_sub(resources, &apei_resources_all);
+       rc = apei_resources_sub(resources, &apei_resources_all);
+       if (rc)
+               return rc;
 
+       rc = -EINVAL;
        list_for_each_entry(res, &resources->iomem, list) {
                r = request_mem_region(res->start, res->end - res->start,
                                       desc);
@@ -475,7 +479,11 @@ int apei_resources_request(struct apei_resources *resources,
                }
        }
 
-       apei_resources_merge(&apei_resources_all, resources);
+       rc = apei_resources_merge(&apei_resources_all, resources);
+       if (rc) {
+               pr_err(APEI_PFX "Fail to merge resources!\n");
+               goto err_unmap_ioport;
+       }
 
        return 0;
 err_unmap_ioport:
@@ -491,12 +499,13 @@ err_unmap_iomem:
                        break;
                release_mem_region(res->start, res->end - res->start);
        }
-       return -EINVAL;
+       return rc;
 }
 EXPORT_SYMBOL_GPL(apei_resources_request);
 
 void apei_resources_release(struct apei_resources *resources)
 {
+       int rc;
        struct apei_res *res;
 
        list_for_each_entry(res, &resources->iomem, list)
@@ -504,7 +513,9 @@ void apei_resources_release(struct apei_resources *resources)
        list_for_each_entry(res, &resources->ioport, list)
                release_region(res->start, res->end - res->start);
 
-       apei_resources_sub(&apei_resources_all, resources);
+       rc = apei_resources_sub(&apei_resources_all, resources);
+       if (rc)
+               pr_err(APEI_PFX "Fail to sub resources!\n");
 }
 EXPORT_SYMBOL_GPL(apei_resources_release);
 
index 465c885938ee89a45db10f1f39d1ceb8cba41cf9..cf29df69380b8dd32585a074c53efc7af7a65471 100644 (file)
@@ -426,7 +426,9 @@ DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL,
 
 static int einj_check_table(struct acpi_table_einj *einj_tab)
 {
-       if (einj_tab->header_length != sizeof(struct acpi_table_einj))
+       if ((einj_tab->header_length !=
+            (sizeof(struct acpi_table_einj) - sizeof(einj_tab->header)))
+           && (einj_tab->header_length != sizeof(struct acpi_table_einj)))
                return -EINVAL;
        if (einj_tab->header.length < sizeof(struct acpi_table_einj))
                return -EINVAL;
index 5281ddda2777c99c40f0830c1d4e5022cc5b37aa..da1228a9a544f026bab6c549e65892d439367266 100644 (file)
@@ -2,7 +2,7 @@
  * APEI Error Record Serialization Table debug support
  *
  * ERST is a way provided by APEI to save and retrieve hardware error
- * infomation to and from a persistent store. This file provide the
+ * information to and from a persistent store. This file provide the
  * debugging/testing support for ERST kernel support and firmware
  * implementation.
  *
@@ -111,11 +111,13 @@ retry:
                goto out;
        }
        if (len > erst_dbg_buf_len) {
-               kfree(erst_dbg_buf);
+               void *p;
                rc = -ENOMEM;
-               erst_dbg_buf = kmalloc(len, GFP_KERNEL);
-               if (!erst_dbg_buf)
+               p = kmalloc(len, GFP_KERNEL);
+               if (!p)
                        goto out;
+               kfree(erst_dbg_buf);
+               erst_dbg_buf = p;
                erst_dbg_buf_len = len;
                goto retry;
        }
@@ -150,11 +152,13 @@ static ssize_t erst_dbg_write(struct file *filp, const char __user *ubuf,
        if (mutex_lock_interruptible(&erst_dbg_mutex))
                return -EINTR;
        if (usize > erst_dbg_buf_len) {
-               kfree(erst_dbg_buf);
+               void *p;
                rc = -ENOMEM;
-               erst_dbg_buf = kmalloc(usize, GFP_KERNEL);
-               if (!erst_dbg_buf)
+               p = kmalloc(usize, GFP_KERNEL);
+               if (!p)
                        goto out;
+               kfree(erst_dbg_buf);
+               erst_dbg_buf = p;
                erst_dbg_buf_len = usize;
        }
        rc = copy_from_user(erst_dbg_buf, ubuf, usize);
index 18645f4e83cdd2f22d526276b320dac631be8940..1211c03149e8c7c258fee89dd1109e1e6b901c26 100644 (file)
@@ -2,7 +2,7 @@
  * APEI Error Record Serialization Table support
  *
  * ERST is a way provided by APEI to save and retrieve hardware error
- * infomation to and from a persistent store.
+ * information to and from a persistent store.
  *
  * For more information about ERST, please refer to ACPI Specification
  * version 4.0, section 17.4.
@@ -266,13 +266,30 @@ static int erst_exec_move_data(struct apei_exec_context *ctx,
 {
        int rc;
        u64 offset;
+       void *src, *dst;
+
+       /* ioremap does not work in interrupt context */
+       if (in_interrupt()) {
+               pr_warning(ERST_PFX
+                          "MOVE_DATA can not be used in interrupt context");
+               return -EBUSY;
+       }
 
        rc = __apei_exec_read_register(entry, &offset);
        if (rc)
                return rc;
-       memmove((void *)ctx->dst_base + offset,
-               (void *)ctx->src_base + offset,
-               ctx->var2);
+
+       src = ioremap(ctx->src_base + offset, ctx->var2);
+       if (!src)
+               return -ENOMEM;
+       dst = ioremap(ctx->dst_base + offset, ctx->var2);
+       if (!dst)
+               return -ENOMEM;
+
+       memmove(dst, src, ctx->var2);
+
+       iounmap(src);
+       iounmap(dst);
 
        return 0;
 }
@@ -750,7 +767,9 @@ __setup("erst_disable", setup_erst_disable);
 
 static int erst_check_table(struct acpi_table_erst *erst_tab)
 {
-       if (erst_tab->header_length != sizeof(struct acpi_table_erst))
+       if ((erst_tab->header_length !=
+            (sizeof(struct acpi_table_erst) - sizeof(erst_tab->header)))
+           && (erst_tab->header_length != sizeof(struct acpi_table_einj)))
                return -EINVAL;
        if (erst_tab->header.length < sizeof(struct acpi_table_erst))
                return -EINVAL;
index 385a6059714a72dd32256685a2606586b92d771e..0d505e59214df73dfb40b67b7d5fc45a2d48e127 100644 (file)
@@ -302,7 +302,7 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev)
        struct ghes *ghes = NULL;
        int rc = -EINVAL;
 
-       generic = ghes_dev->dev.platform_data;
+       generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;
        if (!generic->enabled)
                return -ENODEV;
 
index 343168d1826626c202171c3a53c5b5e4c2090a65..1a3508a7fe03f157c2e0144727bbeb5c244d2a46 100644 (file)
@@ -137,20 +137,23 @@ static int hest_parse_ghes_count(struct acpi_hest_header *hest_hdr, void *data)
 
 static int hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data)
 {
-       struct acpi_hest_generic *generic;
        struct platform_device *ghes_dev;
        struct ghes_arr *ghes_arr = data;
        int rc;
 
        if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR)
                return 0;
-       generic = (struct acpi_hest_generic *)hest_hdr;
-       if (!generic->enabled)
+
+       if (!((struct acpi_hest_generic *)hest_hdr)->enabled)
                return 0;
        ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id);
        if (!ghes_dev)
                return -ENOMEM;
-       ghes_dev->dev.platform_data = generic;
+
+       rc = platform_device_add_data(ghes_dev, &hest_hdr, sizeof(void *));
+       if (rc)
+               goto err;
+
        rc = platform_device_add(ghes_dev);
        if (rc)
                goto err;
index 8f8bd736d4ff11919656e79d2ef9442b037fafff..542e5390389120de7ecd7da4cb7bd059baa18b01 100644 (file)
@@ -142,7 +142,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
        list_add_tail_rcu(&map->list, &acpi_iomaps);
        spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
 
-       return vaddr + (paddr - pg_off);
+       return map->vaddr + (paddr - map->paddr);
 err_unmap:
        iounmap(vaddr);
        return NULL;
index dc58402b0a177a4e03e8dc54e7a094804edc1d36..98417201e9ce3881257354e7c2a360ee019a4e39 100644 (file)
@@ -273,7 +273,6 @@ static enum power_supply_property energy_battery_props[] = {
        POWER_SUPPLY_PROP_CYCLE_COUNT,
        POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
        POWER_SUPPLY_PROP_VOLTAGE_NOW,
-       POWER_SUPPLY_PROP_CURRENT_NOW,
        POWER_SUPPLY_PROP_POWER_NOW,
        POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
        POWER_SUPPLY_PROP_ENERGY_FULL,
index 2bb28b9d91c4c2643106ed0d63069ae85799d136..af308d03f49235a6da0b30791e6857c824e8e32f 100644 (file)
@@ -183,6 +183,8 @@ static int __init dmi_disable_osi_vista(const struct dmi_system_id *d)
 {
        printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
        acpi_osi_setup("!Windows 2006");
+       acpi_osi_setup("!Windows 2006 SP1");
+       acpi_osi_setup("!Windows 2006 SP2");
        return 0;
 }
 static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
@@ -202,6 +204,23 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
                },
        },
        {
+       /*
+        * There have a NVIF method in MSI GX723 DSDT need call by Nvidia
+        * driver (e.g. nouveau) when user press brightness hotkey.
+        * Currently, nouveau driver didn't do the job and it causes there
+        * have a infinite while loop in DSDT when user press hotkey.
+        * We add MSI GX723's dmi information to this table for workaround
+        * this issue.
+        * Will remove MSI GX723 from the table after nouveau grows support.
+        */
+       .callback = dmi_disable_osi_vista,
+       .ident = "MSI GX723",
+       .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"),
+                    DMI_MATCH(DMI_PRODUCT_NAME, "GX723"),
+               },
+       },
+       {
        .callback = dmi_disable_osi_vista,
        .ident = "Sony VGN-NS10J_S",
        .matches = {
@@ -226,6 +245,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
                },
        },
        {
+       .callback = dmi_disable_osi_vista,
+       .ident = "Toshiba Satellite L355",
+       .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+                    DMI_MATCH(DMI_PRODUCT_VERSION, "Satellite L355"),
+               },
+       },
+       {
        .callback = dmi_disable_osi_win7,
        .ident = "ASUS K50IJ",
        .matches = {
@@ -233,6 +260,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
                     DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"),
                },
        },
+       {
+       .callback = dmi_disable_osi_vista,
+       .ident = "Toshiba P305D",
+       .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+                    DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P305D"),
+               },
+       },
 
        /*
         * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
index 5c221ab535d5b0a459516a16034f80a94a7d1bb4..310e3b9749cbbacdabb3c03d288a6b42874aa41b 100644 (file)
@@ -55,7 +55,7 @@ EXPORT_SYMBOL(acpi_root_dir);
 static int set_power_nocheck(const struct dmi_system_id *id)
 {
        printk(KERN_NOTICE PREFIX "%s detected - "
-               "disable power check in power transistion\n", id->ident);
+               "disable power check in power transition\n", id->ident);
        acpi_power_nocheck = 1;
        return 0;
 }
@@ -80,23 +80,15 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
 
 static struct dmi_system_id dsdt_dmi_table[] __initdata = {
        /*
-        * Insyde BIOS on some TOSHIBA machines corrupt the DSDT.
+        * Invoke DSDT corruption work-around on all Toshiba Satellite.
         * https://bugzilla.kernel.org/show_bug.cgi?id=14679
         */
        {
         .callback = set_copy_dsdt,
-        .ident = "TOSHIBA Satellite A505",
+        .ident = "TOSHIBA Satellite",
         .matches = {
                DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
-               DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A505"),
-               },
-       },
-       {
-        .callback = set_copy_dsdt,
-        .ident = "TOSHIBA Satellite L505D",
-        .matches = {
-               DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
-               DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L505D"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "Satellite"),
                },
        },
        {}
@@ -1027,7 +1019,7 @@ static int __init acpi_init(void)
 
        /*
         * If the laptop falls into the DMI check table, the power state check
-        * will be disabled in the course of device power transistion.
+        * will be disabled in the course of device power transition.
         */
        dmi_check_system(power_nocheck_dmi_table);
 
index 8a3b840c0bb268d0580cd5550c41986bf3c09662..d94d2953c9740f34675eeb576e00e74ee621bfd2 100644 (file)
@@ -369,7 +369,9 @@ static void __exit acpi_fan_exit(void)
 
        acpi_bus_unregister_driver(&acpi_fan_driver);
 
+#ifdef CONFIG_ACPI_PROCFS
        remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir);
+#endif
 
        return;
 }
index e9699aaed1092874b0f275d2ca86a8142850db1c..bec561c14bebee3a77817bdce8fa09b939f85ac0 100644 (file)
@@ -28,12 +28,6 @@ static int set_no_mwait(const struct dmi_system_id *id)
 }
 
 static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = {
-       {
-       set_no_mwait, "IFL91 board", {
-       DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
-       DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"),
-       DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"),
-       DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL},
        {
        set_no_mwait, "Extensa 5220", {
        DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
@@ -352,4 +346,5 @@ void __init acpi_early_processor_set_pdc(void)
        acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
                            ACPI_UINT32_MAX,
                            early_init_pdc, NULL, NULL, NULL);
+       acpi_get_devices("ACPI0007", early_init_pdc, NULL, NULL);
 }
index 15602189238942cc9db03b9d9f25c18823128ea0..347eb21b235302d44d0c5e2d768ce3a8870142f2 100644 (file)
@@ -850,7 +850,7 @@ static int __init acpi_processor_init(void)
                printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
                        acpi_idle_driver.name);
        } else {
-               printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s",
+               printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s\n",
                        cpuidle_get_driver()->name);
        }
 
index ba1bd263d903094692c3683c9557e8b919d1d985..3a73a93596e88a29c1e66fabec91dd2d0db96f6c 100644 (file)
@@ -447,8 +447,8 @@ int acpi_processor_notify_smm(struct module *calling_module)
        if (!try_module_get(calling_module))
                return -EINVAL;
 
-       /* is_done is set to negative if an error occured,
-        * and to postitive if _no_ error occured, but SMM
+       /* is_done is set to negative if an error occurred,
+        * and to postitive if _no_ error occurred, but SMM
         * was already notified. This avoids double notification
         * which might lead to unexpected results...
         */
index cf82989ae7568c3c54ded69d16829a2851119d80..4754ff6e70e6daa25b13df6f115745eb7bfb7e55 100644 (file)
@@ -363,6 +363,12 @@ static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
        return 0;
 }
 
+static int __init init_nvs_nosave(const struct dmi_system_id *d)
+{
+       acpi_nvs_nosave();
+       return 0;
+}
+
 static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
        {
        .callback = init_old_suspend_ordering,
@@ -397,6 +403,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
                DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
                },
        },
+       {
+       .callback = init_nvs_nosave,
+       .ident = "Sony Vaio VGN-SR11M",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"),
+               },
+       },
+       {
+       .callback = init_nvs_nosave,
+       .ident = "Everex StepNote Series",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
+               },
+       },
        {},
 };
 #endif /* CONFIG_SUSPEND */
index 68e2e4582fa2f18968058c8125704a23f151cb62..f8588f81048ac989d6af1d727693a234f54bc27b 100644 (file)
@@ -100,7 +100,7 @@ static const struct acpi_dlevel acpi_debug_levels[] = {
        ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
 };
 
-static int param_get_debug_layer(char *buffer, struct kernel_param *kp)
+static int param_get_debug_layer(char *buffer, const struct kernel_param *kp)
 {
        int result = 0;
        int i;
@@ -128,7 +128,7 @@ static int param_get_debug_layer(char *buffer, struct kernel_param *kp)
        return result;
 }
 
-static int param_get_debug_level(char *buffer, struct kernel_param *kp)
+static int param_get_debug_level(char *buffer, const struct kernel_param *kp)
 {
        int result = 0;
        int i;
@@ -149,10 +149,18 @@ static int param_get_debug_level(char *buffer, struct kernel_param *kp)
        return result;
 }
 
-module_param_call(debug_layer, param_set_uint, param_get_debug_layer,
-                 &acpi_dbg_layer, 0644);
-module_param_call(debug_level, param_set_uint, param_get_debug_level,
-                 &acpi_dbg_level, 0644);
+static struct kernel_param_ops param_ops_debug_layer = {
+       .set = param_set_uint,
+       .get = param_get_debug_layer,
+};
+
+static struct kernel_param_ops param_ops_debug_level = {
+       .set = param_set_uint,
+       .get = param_get_debug_level,
+};
+
+module_param_cb(debug_layer, &param_ops_debug_layer, &acpi_dbg_layer, 0644);
+module_param_cb(debug_level, &param_ops_debug_level, &acpi_dbg_level, 0644);
 
 static char trace_method_name[6];
 module_param_string(trace_method_name, trace_method_name, 6, 0644);
index c5fef01b3c9591701fb03ae86290a6b5b643c32a..b836761265988590cea46dbcffc39d30ea3c5578 100644 (file)
@@ -59,8 +59,8 @@ acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context,
                                  "support\n"));
                *cap |= ACPI_VIDEO_BACKLIGHT;
                if (ACPI_FAILURE(acpi_get_handle(handle, "_BQC", &h_dummy)))
-                       printk(KERN_WARNING FW_BUG PREFIX "ACPI brightness "
-                                       "control misses _BQC function\n");
+                       printk(KERN_WARNING FW_BUG PREFIX "No _BQC method, "
+                               "cannot determine initial brightness\n");
                /* We have backlight support, no need to scan further */
                return AE_CTRL_TERMINATE;
        }
index 013727b20417226249ca942d28708164a497c2dc..99d0e5a511482215a7b3c59ffaed9f0827cfa2b1 100644 (file)
@@ -90,6 +90,10 @@ static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
 static int ahci_pci_device_resume(struct pci_dev *pdev);
 #endif
 
+static struct scsi_host_template ahci_sht = {
+       AHCI_SHT("ahci"),
+};
+
 static struct ata_port_operations ahci_vt8251_ops = {
        .inherits               = &ahci_ops,
        .hardreset              = ahci_vt8251_hardreset,
@@ -253,6 +257,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
        { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
        { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
+       { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
+       { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
+       { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
 
        /* JMicron 360/1/3/5/6, match class to avoid IDE function */
        { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
index 474427b6f99f47a8dd997042cc9f2d271a743228..e5fdeebf9ef0610d6f43999baefc4fd06fcd354b 100644 (file)
@@ -298,7 +298,17 @@ struct ahci_host_priv {
 
 extern int ahci_ignore_sss;
 
-extern struct scsi_host_template ahci_sht;
+extern struct device_attribute *ahci_shost_attrs[];
+extern struct device_attribute *ahci_sdev_attrs[];
+
+#define AHCI_SHT(drv_name)                                             \
+       ATA_NCQ_SHT(drv_name),                                          \
+       .can_queue              = AHCI_MAX_CMDS - 1,                    \
+       .sg_tablesize           = AHCI_MAX_SG,                          \
+       .dma_boundary           = AHCI_DMA_BOUNDARY,                    \
+       .shost_attrs            = ahci_shost_attrs,                     \
+       .sdev_attrs             = ahci_sdev_attrs
+
 extern struct ata_port_operations ahci_ops;
 
 void ahci_save_initial_config(struct device *dev,
index 4e97f33cca4406212b9769c6c13fc11afc02bc4a..84b643270e7af5c49d8a73db78f6e6230ceabf7b 100644 (file)
 #include <linux/ahci_platform.h>
 #include "ahci.h"
 
+static struct scsi_host_template ahci_platform_sht = {
+       AHCI_SHT("ahci_platform"),
+};
+
 static int __init ahci_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -145,7 +149,7 @@ static int __init ahci_probe(struct platform_device *pdev)
        ahci_print_info(host, "platform");
 
        rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
-                              &ahci_sht);
+                              &ahci_platform_sht);
        if (rc)
                goto err0;
 
index 3971bc0a4838235ea26f4bff0b97ec7dd3335b69..d712675d0a9657d3cc7742b74220aaf05d0fa537 100644 (file)
@@ -302,6 +302,10 @@ static const struct pci_device_id piix_pci_tbl[] = {
        { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
        /* SATA Controller IDE (CPT) */
        { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+       /* SATA Controller IDE (PBG) */
+       { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+       /* SATA Controller IDE (PBG) */
+       { 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
        { }     /* terminate list */
 };
 
index 666850d31df2c304b9d1409e21e20355d52f2a65..8eea309ea21231fcb50ff0498a366ff8a8a1dcf7 100644 (file)
@@ -121,7 +121,7 @@ static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
 static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
                   ahci_read_em_buffer, ahci_store_em_buffer);
 
-static struct device_attribute *ahci_shost_attrs[] = {
+struct device_attribute *ahci_shost_attrs[] = {
        &dev_attr_link_power_management_policy,
        &dev_attr_em_message_type,
        &dev_attr_em_message,
@@ -132,22 +132,14 @@ static struct device_attribute *ahci_shost_attrs[] = {
        &dev_attr_em_buffer,
        NULL
 };
+EXPORT_SYMBOL_GPL(ahci_shost_attrs);
 
-static struct device_attribute *ahci_sdev_attrs[] = {
+struct device_attribute *ahci_sdev_attrs[] = {
        &dev_attr_sw_activity,
        &dev_attr_unload_heads,
        NULL
 };
-
-struct scsi_host_template ahci_sht = {
-       ATA_NCQ_SHT("ahci"),
-       .can_queue              = AHCI_MAX_CMDS - 1,
-       .sg_tablesize           = AHCI_MAX_SG,
-       .dma_boundary           = AHCI_DMA_BOUNDARY,
-       .shost_attrs            = ahci_shost_attrs,
-       .sdev_attrs             = ahci_sdev_attrs,
-};
-EXPORT_SYMBOL_GPL(ahci_sht);
+EXPORT_SYMBOL_GPL(ahci_sdev_attrs);
 
 struct ata_port_operations ahci_ops = {
        .inherits               = &sata_pmp_port_ops,
@@ -1326,7 +1318,7 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
        /* issue the first D2H Register FIS */
        msecs = 0;
        now = jiffies;
-       if (time_after(now, deadline))
+       if (time_after(deadline, now))
                msecs = jiffies_to_msecs(deadline - now);
 
        tf.ctl |= ATA_SRST;
index c035b3d041ee1572492d7a17f56323bde0fccacd..932eaee5024527f4e617064726e39db808c17e87 100644 (file)
@@ -5418,6 +5418,7 @@ static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
  */
 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
 {
+       unsigned int ehi_flags = ATA_EHI_QUIET;
        int rc;
 
        /*
@@ -5426,7 +5427,18 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
         */
        ata_lpm_enable(host);
 
-       rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
+       /*
+        * On some hardware, device fails to respond after spun down
+        * for suspend.  As the device won't be used before being
+        * resumed, we don't need to touch the device.  Ask EH to skip
+        * the usual stuff and proceed directly to suspend.
+        *
+        * http://thread.gmane.org/gmane.linux.ide/46764
+        */
+       if (mesg.event == PM_EVENT_SUSPEND)
+               ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
+
+       rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1);
        if (rc == 0)
                host->dev->power.power_state = mesg;
        return rc;
index c9ae299b83428d039c13cce845e7acc497bcff13..e48302eae55fcd2ad619564c9cd8237c722efb29 100644 (file)
@@ -3235,6 +3235,10 @@ static int ata_eh_skip_recovery(struct ata_link *link)
        if (link->flags & ATA_LFLAG_DISABLED)
                return 1;
 
+       /* skip if explicitly requested */
+       if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
+               return 1;
+
        /* thaw frozen port and recover failed devices */
        if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
                return 0;
index 3b82d8ef76f0ffc81e6bb62e9d3ea19d1be57195..e30c537cce32f99ab7fcb07f7f124cd920c4cd4b 100644 (file)
@@ -418,6 +418,7 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
                if (ioaddr->ctl_addr)
                        iowrite8(tf->ctl, ioaddr->ctl_addr);
                ap->last_ctl = tf->ctl;
+               ata_wait_idle(ap);
        }
 
        if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
@@ -453,6 +454,8 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
                iowrite8(tf->device, ioaddr->device_addr);
                VPRINTK("device 0x%X\n", tf->device);
        }
+
+       ata_wait_idle(ap);
 }
 EXPORT_SYMBOL_GPL(ata_sff_tf_load);
 
@@ -1042,7 +1045,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
 int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
                     u8 status, int in_wq)
 {
-       struct ata_eh_info *ehi = &ap->link.eh_info;
+       struct ata_link *link = qc->dev->link;
+       struct ata_eh_info *ehi = &link->eh_info;
        unsigned long flags = 0;
        int poll_next;
 
@@ -1298,8 +1302,14 @@ fsm_start:
 }
 EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
 
-void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay)
+void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
 {
+       struct ata_port *ap = link->ap;
+
+       WARN_ON((ap->sff_pio_task_link != NULL) &&
+               (ap->sff_pio_task_link != link));
+       ap->sff_pio_task_link = link;
+
        /* may fail if ata_sff_flush_pio_task() in progress */
        queue_delayed_work(ata_sff_wq, &ap->sff_pio_task,
                           msecs_to_jiffies(delay));
@@ -1321,14 +1331,18 @@ static void ata_sff_pio_task(struct work_struct *work)
 {
        struct ata_port *ap =
                container_of(work, struct ata_port, sff_pio_task.work);
+       struct ata_link *link = ap->sff_pio_task_link;
        struct ata_queued_cmd *qc;
        u8 status;
        int poll_next;
 
+       BUG_ON(ap->sff_pio_task_link == NULL); 
        /* qc can be NULL if timeout occurred */
-       qc = ata_qc_from_tag(ap, ap->link.active_tag);
-       if (!qc)
+       qc = ata_qc_from_tag(ap, link->active_tag);
+       if (!qc) {
+               ap->sff_pio_task_link = NULL;
                return;
+       }
 
 fsm_start:
        WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
@@ -1345,11 +1359,16 @@ fsm_start:
                msleep(2);
                status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
                if (status & ATA_BUSY) {
-                       ata_sff_queue_pio_task(ap, ATA_SHORT_PAUSE);
+                       ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
                        return;
                }
        }
 
+       /*
+        * hsm_move() may trigger another command to be processed.
+        * clean the link beforehand.
+        */
+       ap->sff_pio_task_link = NULL;
        /* move the HSM */
        poll_next = ata_sff_hsm_move(ap, qc, status, 1);
 
@@ -1376,6 +1395,7 @@ fsm_start:
 unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
 {
        struct ata_port *ap = qc->ap;
+       struct ata_link *link = qc->dev->link;
 
        /* Use polling pio if the LLD doesn't handle
         * interrupt driven pio and atapi CDB interrupt.
@@ -1396,7 +1416,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
                ap->hsm_task_state = HSM_ST_LAST;
 
                if (qc->tf.flags & ATA_TFLAG_POLLING)
-                       ata_sff_queue_pio_task(ap, 0);
+                       ata_sff_queue_pio_task(link, 0);
 
                break;
 
@@ -1409,7 +1429,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
                if (qc->tf.flags & ATA_TFLAG_WRITE) {
                        /* PIO data out protocol */
                        ap->hsm_task_state = HSM_ST_FIRST;
-                       ata_sff_queue_pio_task(ap, 0);
+                       ata_sff_queue_pio_task(link, 0);
 
                        /* always send first data block using the
                         * ata_sff_pio_task() codepath.
@@ -1419,7 +1439,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
                        ap->hsm_task_state = HSM_ST;
 
                        if (qc->tf.flags & ATA_TFLAG_POLLING)
-                               ata_sff_queue_pio_task(ap, 0);
+                               ata_sff_queue_pio_task(link, 0);
 
                        /* if polling, ata_sff_pio_task() handles the
                         * rest.  otherwise, interrupt handler takes
@@ -1441,7 +1461,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
                /* send cdb by polling if no cdb interrupt */
                if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
                    (qc->tf.flags & ATA_TFLAG_POLLING))
-                       ata_sff_queue_pio_task(ap, 0);
+                       ata_sff_queue_pio_task(link, 0);
                break;
 
        default:
@@ -2734,6 +2754,7 @@ EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
 unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
 {
        struct ata_port *ap = qc->ap;
+       struct ata_link *link = qc->dev->link;
 
        /* defer PIO handling to sff_qc_issue */
        if (!ata_is_dma(qc->tf.protocol))
@@ -2762,7 +2783,7 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
 
                /* send cdb by polling if no cdb interrupt */
                if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
-                       ata_sff_queue_pio_task(ap, 0);
+                       ata_sff_queue_pio_task(link, 0);
                break;
 
        default:
index ba43f0f8c880a21aa1b8b1d3b9ae3d8c3487d415..2215632e4b317684a818b0ee5cbb7037fe064079 100644 (file)
@@ -74,7 +74,8 @@ static int artop6260_pre_reset(struct ata_link *link, unsigned long deadline)
        struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 
        /* Odd numbered device ids are the units with enable bits (the -R cards) */
-       if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
+       if ((pdev->device & 1) &&
+           !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
                return -ENOENT;
 
        return ata_sff_prereset(link, deadline);
index 5e659885de162e7ef3cf38e3e1f8a67098108712..ac8d7d97e4085d4122b33fbd58bde6951f3ef2e7 100644 (file)
@@ -417,6 +417,8 @@ static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
                        tf->lbam,
                        tf->lbah);
        }
+
+       ata_wait_idle(ap);
 }
 
 static int via_port_start(struct ata_port *ap)
index 81982594a014b603aa2ebd8b5944ed13049c929b..a9fd9709c2627c7514fe74a8ec9ebc26f09b44f9 100644 (file)
@@ -2284,7 +2284,7 @@ static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
        }
 
        if (qc->tf.flags & ATA_TFLAG_POLLING)
-               ata_sff_queue_pio_task(ap, 0);
+               ata_sff_queue_pio_task(link, 0);
        return 0;
 }
 
index ee9ddeb53417c7da782d252f7d9e4f8113ee44b4..8cb0347dec2848e4d6c33f5276b9ba986a9f3fac 100644 (file)
@@ -3156,7 +3156,6 @@ static int __devinit ia_init_one(struct pci_dev *pdev,
 {  
        struct atm_dev *dev;  
        IADEV *iadev;  
-        unsigned long flags;
        int ret;
 
        iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
@@ -3188,19 +3187,14 @@ static int __devinit ia_init_one(struct pci_dev *pdev,
        ia_dev[iadev_count] = iadev;
        _ia_dev[iadev_count] = dev;
        iadev_count++;
-       spin_lock_init(&iadev->misc_lock);
-       /* First fixes first. I don't want to think about this now. */
-       spin_lock_irqsave(&iadev->misc_lock, flags); 
        if (ia_init(dev) || ia_start(dev)) {  
                IF_INIT(printk("IA register failed!\n");)
                iadev_count--;
                ia_dev[iadev_count] = NULL;
                _ia_dev[iadev_count] = NULL;
-               spin_unlock_irqrestore(&iadev->misc_lock, flags); 
                ret = -EINVAL;
                goto err_out_deregister_dev;
        }
-       spin_unlock_irqrestore(&iadev->misc_lock, flags); 
        IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
 
        iadev->next_board = ia_boards;  
index b2cd20f549cb4d474edb0a2a9a2f419d40d5072b..077735e0e04bfdd1d12f048ac8006f8a598832f8 100644 (file)
@@ -1022,7 +1022,7 @@ typedef struct iadev_t {
        struct dle_q rx_dle_q;  
        struct free_desc_q *rx_free_desc_qhead;  
        struct sk_buff_head rx_dma_q;  
-        spinlock_t rx_lock, misc_lock;
+       spinlock_t rx_lock;
        struct atm_vcc **rx_open;       /* list of all open VCs */  
         u16 num_rx_desc, rx_buf_sz, rxing;
         u32 rx_pkt_ram, rx_tmp_cnt;
index f916ddf63938a03444c2ad6e4d8a6b63c46212aa..f46138ab38b6c310fffc589681f727840a05c646 100644 (file)
@@ -444,6 +444,7 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr,
        struct atm_dev *atmdev = container_of(dev, struct atm_dev, class_dev);
        struct solos_card *card = atmdev->dev_data;
        struct sk_buff *skb;
+       unsigned int len;
 
        spin_lock(&card->cli_queue_lock);
        skb = skb_dequeue(&card->cli_queue[SOLOS_CHAN(atmdev)]);
@@ -451,11 +452,12 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr,
        if(skb == NULL)
                return sprintf(buf, "No data.\n");
 
-       memcpy(buf, skb->data, skb->len);
-       dev_dbg(&card->dev->dev, "len: %d\n", skb->len);
+       len = skb->len;
+       memcpy(buf, skb->data, len);
+       dev_dbg(&card->dev->dev, "len: %d\n", len);
 
        kfree_skb(skb);
-       return skb->len;
+       return len;
 }
 
 static int send_command(struct solos_card *card, int dev, const char *buf, size_t size)
index 5419a49ff135121ce608004f40a211a9587b5cf5..276d5a701dc37cfbebc828a88efc824abc163774 100644 (file)
@@ -59,6 +59,7 @@ void device_pm_init(struct device *dev)
 {
        dev->power.status = DPM_ON;
        init_completion(&dev->power.completion);
+       complete_all(&dev->power.completion);
        dev->power.wakeup_count = 0;
        pm_runtime_init(dev);
 }
index 9fc630ce1ddb4b46f93dd2c35a459849399efa8b..f6f37a05a0c3a5664503040bcdfaae32516d6e6c 100644 (file)
@@ -45,7 +45,8 @@ static ssize_t show_##name(struct sys_device *dev,            \
        return sprintf(buf, "%d\n", topology_##name(cpu));      \
 }
 
-#if defined(topology_thread_cpumask) || defined(topology_core_cpumask)
+#if defined(topology_thread_cpumask) || defined(topology_core_cpumask) || \
+    defined(topology_book_cpumask)
 static ssize_t show_cpumap(int type, const struct cpumask *mask, char *buf)
 {
        ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
@@ -114,6 +115,14 @@ define_siblings_show_func(core_cpumask);
 define_one_ro_named(core_siblings, show_core_cpumask);
 define_one_ro_named(core_siblings_list, show_core_cpumask_list);
 
+#ifdef CONFIG_SCHED_BOOK
+define_id_show_func(book_id);
+define_one_ro(book_id);
+define_siblings_show_func(book_cpumask);
+define_one_ro_named(book_siblings, show_book_cpumask);
+define_one_ro_named(book_siblings_list, show_book_cpumask_list);
+#endif
+
 static struct attribute *default_attrs[] = {
        &attr_physical_package_id.attr,
        &attr_core_id.attr,
@@ -121,6 +130,11 @@ static struct attribute *default_attrs[] = {
        &attr_thread_siblings_list.attr,
        &attr_core_siblings.attr,
        &attr_core_siblings_list.attr,
+#ifdef CONFIG_SCHED_BOOK
+       &attr_book_id.attr,
+       &attr_book_siblings.attr,
+       &attr_book_siblings_list.attr,
+#endif
        NULL
 };
 
index de277689da6153fac725e54e50f752e7a6ee5978..4b9359a6f6ca45c4cd5f8803b5e7cdb9ba496678 100644 (file)
@@ -488,4 +488,21 @@ config BLK_DEV_HD
 
          If unsure, say N.
 
+config BLK_DEV_RBD
+       tristate "Rados block device (RBD)"
+       depends on INET && EXPERIMENTAL && BLOCK
+       select CEPH_LIB
+       select LIBCRC32C
+       select CRYPTO_AES
+       select CRYPTO
+       default n
+       help
+         Say Y here if you want include the Rados block device, which stripes
+         a block device over objects stored in the Ceph distributed object
+         store.
+
+         More information at http://ceph.newdream.net/.
+
+         If unsure, say N.
+
 endif # BLK_DEV
index aff5ac925c34332f235e523164f7f85cd76b6751..d7f463d6312d6494c3ef795c5b96ff11fa0ec801 100644 (file)
@@ -37,5 +37,6 @@ obj-$(CONFIG_BLK_DEV_HD)      += hd.o
 
 obj-$(CONFIG_XEN_BLKDEV_FRONTEND)      += xen-blkfront.o
 obj-$(CONFIG_BLK_DEV_DRBD)     += drbd/
+obj-$(CONFIG_BLK_DEV_RBD)     += rbd.o
 
 swim_mod-objs  := swim.o swim_asm.o
index 31064df1370a96320f548d1ce5f191eaf2e97051..5e4fadcdece979b76571462210f60195a6531f27 100644 (file)
@@ -297,6 +297,8 @@ static void enqueue_cmd_and_start_io(ctlr_info_t *h,
        spin_lock_irqsave(&h->lock, flags);
        addQ(&h->reqQ, c);
        h->Qdepth++;
+       if (h->Qdepth > h->maxQsinceinit)
+               h->maxQsinceinit = h->Qdepth;
        start_io(h);
        spin_unlock_irqrestore(&h->lock, flags);
 }
@@ -4519,6 +4521,12 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
        misc_fw_support = readl(&cfgtable->misc_fw_support);
        use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
 
+       /* The doorbell reset seems to cause lockups on some Smart
+        * Arrays (e.g. P410, P410i, maybe others).  Until this is
+        * fixed or at least isolated, avoid the doorbell reset.
+        */
+       use_doorbell = 0;
+
        rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
        if (rc)
                goto unmap_cfgtable;
@@ -4712,6 +4720,9 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
        h->scatter_list = kmalloc(h->max_commands *
                                                sizeof(struct scatterlist *),
                                                GFP_KERNEL);
+       if (!h->scatter_list)
+               goto clean4;
+
        for (k = 0; k < h->nr_cmds; k++) {
                h->scatter_list[k] = kmalloc(sizeof(struct scatterlist) *
                                                        h->maxsgentries,
@@ -4781,7 +4792,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
 clean4:
        kfree(h->cmd_pool_bits);
        /* Free up sg elements */
-       for (k = 0; k < h->nr_cmds; k++)
+       for (k-- ; k >= 0; k--)
                kfree(h->scatter_list[k]);
        kfree(h->scatter_list);
        cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
index f3c636d237187df21879c0e8acc4a4c943e59ad2..91797bbbe702f01afc5770f45618487ab21054da 100644 (file)
@@ -477,7 +477,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
        pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
 
        if (bio_rw(bio) == WRITE) {
-               bool barrier = (bio->bi_rw & REQ_HARDBARRIER);
+               bool barrier = !!(bio->bi_rw & REQ_HARDBARRIER);
                struct file *file = lo->lo_backing_file;
 
                if (barrier) {
index b82c5ce5e9dfaf3bcc1d81b68554de8cdff0493c..76fa3deaee84059d4431f7763981311210f16f06 100644 (file)
@@ -974,8 +974,7 @@ static int mg_probe(struct platform_device *plat_dev)
        host->breq->queuedata = host;
 
        /* mflash is random device, thanx for the noop */
-       elevator_exit(host->breq->elevator);
-       err = elevator_init(host->breq, "noop");
+       err = elevator_change(host->breq, "noop");
        if (err) {
                printk(KERN_ERR "%s:%d (elevator_init) fail\n",
                                __func__, __LINE__);
index b1cbeb59bb7622e61f75bc58f373cadf1e822218..37a2bb5950761fcfa6cd5ea16f30b5c74dc51a1e 100644 (file)
@@ -2369,7 +2369,7 @@ static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
        pkt_shrink_pktlist(pd);
 }
 
-static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor)
+static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
 {
        if (dev_minor >= MAX_WRITERS)
                return NULL;
index e9da874d04192b125561f4b71d8ba21ce55fadcc..03688c2da319c007f4923c4ffd989e4f9666b755 100644 (file)
@@ -113,7 +113,7 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
                        memcpy(buf, dev->bounce_buf+offset, size);
                offset += size;
                flush_kernel_dcache_page(bvec->bv_page);
-               bvec_kunmap_irq(bvec, &flags);
+               bvec_kunmap_irq(buf, &flags);
                i++;
        }
 }
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
new file mode 100644 (file)
index 0000000..6ec9d53
--- /dev/null
@@ -0,0 +1,1841 @@
+/*
+   rbd.c -- Export ceph rados objects as a Linux block device
+
+
+   based on drivers/block/osdblk.c:
+
+   Copyright 2009 Red Hat, Inc.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; see the file COPYING.  If not, write to
+   the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+
+
+
+   Instructions for use
+   --------------------
+
+   1) Map a Linux block device to an existing rbd image.
+
+      Usage: <mon ip addr> <options> <pool name> <rbd image name> [snap name]
+
+      $ echo "192.168.0.1 name=admin rbd foo" > /sys/class/rbd/add
+
+      The snapshot name can be "-" or omitted to map the image read/write.
+
+   2) List all active blkdev<->object mappings.
+
+      In this example, we have performed step #1 twice, creating two blkdevs,
+      mapped to two separate rados objects in the rados rbd pool
+
+      $ cat /sys/class/rbd/list
+      #id     major   client_name     pool    name    snap    KB
+      0       254     client4143      rbd     foo     -      1024000
+
+      The columns, in order, are:
+      - blkdev unique id
+      - blkdev assigned major
+      - rados client id
+      - rados pool name
+      - rados block device name
+      - mapped snapshot ("-" if none)
+      - device size in KB
+
+
+   3) Create a snapshot.
+
+      Usage: <blkdev id> <snapname>
+
+      $ echo "0 mysnap" > /sys/class/rbd/snap_create
+
+
+   4) Listing a snapshot.
+
+      $ cat /sys/class/rbd/snaps_list
+      #id     snap    KB
+      0       -       1024000 (*)
+      0       foo     1024000
+
+      The columns, in order, are:
+      - blkdev unique id
+      - snapshot name, '-' means none (active read/write version)
+      - size of device at time of snapshot
+      - the (*) indicates this is the active version
+
+   5) Rollback to snapshot.
+
+      Usage: <blkdev id> <snapname>
+
+      $ echo "0 mysnap" > /sys/class/rbd/snap_rollback
+
+
+   6) Mapping an image using snapshot.
+
+      A snapshot mapping is read-only. This is being done by passing
+      snap=<snapname> to the options when adding a device.
+
+      $ echo "192.168.0.1 name=admin,snap=mysnap rbd foo" > /sys/class/rbd/add
+
+
+   7) Remove an active blkdev<->rbd image mapping.
+
+      In this example, we remove the mapping with blkdev unique id 1.
+
+      $ echo 1 > /sys/class/rbd/remove
+
+
+   NOTE:  The actual creation and deletion of rados objects is outside the scope
+   of this driver.
+
+ */
+
+#include <linux/ceph/libceph.h>
+#include <linux/ceph/osd_client.h>
+#include <linux/ceph/mon_client.h>
+#include <linux/ceph/decode.h>
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+
+#include "rbd_types.h"
+
+#define DRV_NAME "rbd"
+#define DRV_NAME_LONG "rbd (rados block device)"
+
+#define RBD_MINORS_PER_MAJOR   256             /* max minors per blkdev */
+
+#define RBD_MAX_MD_NAME_LEN    (96 + sizeof(RBD_SUFFIX))
+#define RBD_MAX_POOL_NAME_LEN  64
+#define RBD_MAX_SNAP_NAME_LEN  32
+#define RBD_MAX_OPT_LEN                1024
+
+#define RBD_SNAP_HEAD_NAME     "-"
+
+#define DEV_NAME_LEN           32
+
+/*
+ * block device image metadata (in-memory version)
+ */
+struct rbd_image_header {
+       u64 image_size;
+       char block_name[32];
+       __u8 obj_order;
+       __u8 crypt_type;
+       __u8 comp_type;
+       struct rw_semaphore snap_rwsem;
+       struct ceph_snap_context *snapc;
+       size_t snap_names_len;
+       u64 snap_seq;
+       u32 total_snaps;
+
+       char *snap_names;
+       u64 *snap_sizes;
+};
+
+/*
+ * an instance of the client.  multiple devices may share a client.
+ */
+struct rbd_client {
+       struct ceph_client      *client;
+       struct kref             kref;
+       struct list_head        node;
+};
+
+/*
+ * a single io request
+ */
+struct rbd_request {
+       struct request          *rq;            /* blk layer request */
+       struct bio              *bio;           /* cloned bio */
+       struct page             **pages;        /* list of used pages */
+       u64                     len;
+};
+
+/*
+ * a single device
+ */
+struct rbd_device {
+       int                     id;             /* blkdev unique id */
+
+       int                     major;          /* blkdev assigned major */
+       struct gendisk          *disk;          /* blkdev's gendisk and rq */
+       struct request_queue    *q;
+
+       struct ceph_client      *client;
+       struct rbd_client       *rbd_client;
+
+       char                    name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
+
+       spinlock_t              lock;           /* queue lock */
+
+       struct rbd_image_header header;
+       char                    obj[RBD_MAX_OBJ_NAME_LEN]; /* rbd image name */
+       int                     obj_len;
+       char                    obj_md_name[RBD_MAX_MD_NAME_LEN]; /* hdr nm. */
+       char                    pool_name[RBD_MAX_POOL_NAME_LEN];
+       int                     poolid;
+
+       char                    snap_name[RBD_MAX_SNAP_NAME_LEN];
+       u32 cur_snap;   /* index+1 of current snapshot within snap context
+                          0 - for the head */
+       int read_only;
+
+       struct list_head        node;
+};
+
+static spinlock_t node_lock;      /* protects client get/put */
+
+static struct class *class_rbd;          /* /sys/class/rbd */
+static DEFINE_MUTEX(ctl_mutex);          /* Serialize open/close/setup/teardown */
+static LIST_HEAD(rbd_dev_list);    /* devices */
+static LIST_HEAD(rbd_client_list);      /* clients */
+
+
+static int rbd_open(struct block_device *bdev, fmode_t mode)
+{
+       struct gendisk *disk = bdev->bd_disk;
+       struct rbd_device *rbd_dev = disk->private_data;
+
+       set_device_ro(bdev, rbd_dev->read_only);
+
+       if ((mode & FMODE_WRITE) && rbd_dev->read_only)
+               return -EROFS;
+
+       return 0;
+}
+
+static const struct block_device_operations rbd_bd_ops = {
+       .owner                  = THIS_MODULE,
+       .open                   = rbd_open,
+};
+
+/*
+ * Initialize an rbd client instance.
+ * We own *opt.
+ */
+static struct rbd_client *rbd_client_create(struct ceph_options *opt)
+{
+       struct rbd_client *rbdc;
+       int ret = -ENOMEM;
+
+       dout("rbd_client_create\n");
+       rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
+       if (!rbdc)
+               goto out_opt;
+
+       kref_init(&rbdc->kref);
+       INIT_LIST_HEAD(&rbdc->node);
+
+       rbdc->client = ceph_create_client(opt, rbdc);
+       if (IS_ERR(rbdc->client))
+               goto out_rbdc;
+       opt = NULL; /* Now rbdc->client is responsible for opt */
+
+       ret = ceph_open_session(rbdc->client);
+       if (ret < 0)
+               goto out_err;
+
+       spin_lock(&node_lock);
+       list_add_tail(&rbdc->node, &rbd_client_list);
+       spin_unlock(&node_lock);
+
+       dout("rbd_client_create created %p\n", rbdc);
+       return rbdc;
+
+out_err:
+       ceph_destroy_client(rbdc->client);
+out_rbdc:
+       kfree(rbdc);
+out_opt:
+       if (opt)
+               ceph_destroy_options(opt);
+       return ERR_PTR(ret);
+}
+
+/*
+ * Find a ceph client with specific addr and configuration.
+ */
+static struct rbd_client *__rbd_client_find(struct ceph_options *opt)
+{
+       struct rbd_client *client_node;
+
+       if (opt->flags & CEPH_OPT_NOSHARE)
+               return NULL;
+
+       list_for_each_entry(client_node, &rbd_client_list, node)
+               if (ceph_compare_options(opt, client_node->client) == 0)
+                       return client_node;
+       return NULL;
+}
+
+/*
+ * Get a ceph client with specific addr and configuration, if one does
+ * not exist create it.
+ */
+static int rbd_get_client(struct rbd_device *rbd_dev, const char *mon_addr,
+                         char *options)
+{
+       struct rbd_client *rbdc;
+       struct ceph_options *opt;
+       int ret;
+
+       ret = ceph_parse_options(&opt, options, mon_addr,
+                                mon_addr + strlen(mon_addr), NULL, NULL);
+       if (ret < 0)
+               return ret;
+
+       spin_lock(&node_lock);
+       rbdc = __rbd_client_find(opt);
+       if (rbdc) {
+               ceph_destroy_options(opt);
+
+               /* using an existing client */
+               kref_get(&rbdc->kref);
+               rbd_dev->rbd_client = rbdc;
+               rbd_dev->client = rbdc->client;
+               spin_unlock(&node_lock);
+               return 0;
+       }
+       spin_unlock(&node_lock);
+
+       rbdc = rbd_client_create(opt);
+       if (IS_ERR(rbdc))
+               return PTR_ERR(rbdc);
+
+       rbd_dev->rbd_client = rbdc;
+       rbd_dev->client = rbdc->client;
+       return 0;
+}
+
+/*
+ * Destroy ceph client
+ */
+static void rbd_client_release(struct kref *kref)
+{
+       struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
+
+       dout("rbd_release_client %p\n", rbdc);
+       spin_lock(&node_lock);
+       list_del(&rbdc->node);
+       spin_unlock(&node_lock);
+
+       ceph_destroy_client(rbdc->client);
+       kfree(rbdc);
+}
+
+/*
+ * Drop reference to ceph client node. If it's not referenced anymore, release
+ * it.
+ */
+static void rbd_put_client(struct rbd_device *rbd_dev)
+{
+       kref_put(&rbd_dev->rbd_client->kref, rbd_client_release);
+       rbd_dev->rbd_client = NULL;
+       rbd_dev->client = NULL;
+}
+
+
+/*
+ * Create a new header structure, translate header format from the on-disk
+ * header.
+ */
+static int rbd_header_from_disk(struct rbd_image_header *header,
+                                struct rbd_image_header_ondisk *ondisk,
+                                int allocated_snaps,
+                                gfp_t gfp_flags)
+{
+       int i;
+       u32 snap_count = le32_to_cpu(ondisk->snap_count);
+       int ret = -ENOMEM;
+
+       init_rwsem(&header->snap_rwsem);
+
+       header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
+       header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
+                               snap_count *
+                                sizeof(struct rbd_image_snap_ondisk),
+                               gfp_flags);
+       if (!header->snapc)
+               return -ENOMEM;
+       if (snap_count) {
+               header->snap_names = kmalloc(header->snap_names_len,
+                                            GFP_KERNEL);
+               if (!header->snap_names)
+                       goto err_snapc;
+               header->snap_sizes = kmalloc(snap_count * sizeof(u64),
+                                            GFP_KERNEL);
+               if (!header->snap_sizes)
+                       goto err_names;
+       } else {
+               header->snap_names = NULL;
+               header->snap_sizes = NULL;
+       }
+       memcpy(header->block_name, ondisk->block_name,
+              sizeof(ondisk->block_name));
+
+       header->image_size = le64_to_cpu(ondisk->image_size);
+       header->obj_order = ondisk->options.order;
+       header->crypt_type = ondisk->options.crypt_type;
+       header->comp_type = ondisk->options.comp_type;
+
+       atomic_set(&header->snapc->nref, 1);
+       header->snap_seq = le64_to_cpu(ondisk->snap_seq);
+       header->snapc->num_snaps = snap_count;
+       header->total_snaps = snap_count;
+
+       if (snap_count &&
+           allocated_snaps == snap_count) {
+               for (i = 0; i < snap_count; i++) {
+                       header->snapc->snaps[i] =
+                               le64_to_cpu(ondisk->snaps[i].id);
+                       header->snap_sizes[i] =
+                               le64_to_cpu(ondisk->snaps[i].image_size);
+               }
+
+               /* copy snapshot names */
+               memcpy(header->snap_names, &ondisk->snaps[i],
+                       header->snap_names_len);
+       }
+
+       return 0;
+
+err_names:
+       kfree(header->snap_names);
+err_snapc:
+       kfree(header->snapc);
+       return ret;
+}
+
+static int snap_index(struct rbd_image_header *header, int snap_num)
+{
+       return header->total_snaps - snap_num;
+}
+
+static u64 cur_snap_id(struct rbd_device *rbd_dev)
+{
+       struct rbd_image_header *header = &rbd_dev->header;
+
+       if (!rbd_dev->cur_snap)
+               return 0;
+
+       return header->snapc->snaps[snap_index(header, rbd_dev->cur_snap)];
+}
+
+static int snap_by_name(struct rbd_image_header *header, const char *snap_name,
+                       u64 *seq, u64 *size)
+{
+       int i;
+       char *p = header->snap_names;
+
+       for (i = 0; i < header->total_snaps; i++, p += strlen(p) + 1) {
+               if (strcmp(snap_name, p) == 0)
+                       break;
+       }
+       if (i == header->total_snaps)
+               return -ENOENT;
+       if (seq)
+               *seq = header->snapc->snaps[i];
+
+       if (size)
+               *size = header->snap_sizes[i];
+
+       return i;
+}
+
+static int rbd_header_set_snap(struct rbd_device *dev,
+                              const char *snap_name,
+                              u64 *size)
+{
+       struct rbd_image_header *header = &dev->header;
+       struct ceph_snap_context *snapc = header->snapc;
+       int ret = -ENOENT;
+
+       down_write(&header->snap_rwsem);
+
+       if (!snap_name ||
+           !*snap_name ||
+           strcmp(snap_name, "-") == 0 ||
+           strcmp(snap_name, RBD_SNAP_HEAD_NAME) == 0) {
+               if (header->total_snaps)
+                       snapc->seq = header->snap_seq;
+               else
+                       snapc->seq = 0;
+               dev->cur_snap = 0;
+               dev->read_only = 0;
+               if (size)
+                       *size = header->image_size;
+       } else {
+               ret = snap_by_name(header, snap_name, &snapc->seq, size);
+               if (ret < 0)
+                       goto done;
+
+               dev->cur_snap = header->total_snaps - ret;
+               dev->read_only = 1;
+       }
+
+       ret = 0;
+done:
+       up_write(&header->snap_rwsem);
+       return ret;
+}
+
+static void rbd_header_free(struct rbd_image_header *header)
+{
+       kfree(header->snapc);
+       kfree(header->snap_names);
+       kfree(header->snap_sizes);
+}
+
+/*
+ * get the actual striped segment name, offset and length
+ */
+static u64 rbd_get_segment(struct rbd_image_header *header,
+                          const char *block_name,
+                          u64 ofs, u64 len,
+                          char *seg_name, u64 *segofs)
+{
+       u64 seg = ofs >> header->obj_order;
+
+       if (seg_name)
+               snprintf(seg_name, RBD_MAX_SEG_NAME_LEN,
+                        "%s.%012llx", block_name, seg);
+
+       ofs = ofs & ((1 << header->obj_order) - 1);
+       len = min_t(u64, len, (1 << header->obj_order) - ofs);
+
+       if (segofs)
+               *segofs = ofs;
+
+       return len;
+}
+
+/*
+ * bio helpers
+ */
+
+static void bio_chain_put(struct bio *chain)
+{
+       struct bio *tmp;
+
+       while (chain) {
+               tmp = chain;
+               chain = chain->bi_next;
+               bio_put(tmp);
+       }
+}
+
+/*
+ * zeros a bio chain, starting at specific offset
+ */
+static void zero_bio_chain(struct bio *chain, int start_ofs)
+{
+       struct bio_vec *bv;
+       unsigned long flags;
+       void *buf;
+       int i;
+       int pos = 0;
+
+       while (chain) {
+               bio_for_each_segment(bv, chain, i) {
+                       if (pos + bv->bv_len > start_ofs) {
+                               int remainder = max(start_ofs - pos, 0);
+                               buf = bvec_kmap_irq(bv, &flags);
+                               memset(buf + remainder, 0,
+                                      bv->bv_len - remainder);
+                               bvec_kunmap_irq(buf, &flags);
+                       }
+                       pos += bv->bv_len;
+               }
+
+               chain = chain->bi_next;
+       }
+}
+
+/*
+ * bio_chain_clone - clone a chain of bios up to a certain length.
+ * might return a bio_pair that will need to be released.
+ */
+static struct bio *bio_chain_clone(struct bio **old, struct bio **next,
+                                  struct bio_pair **bp,
+                                  int len, gfp_t gfpmask)
+{
+       struct bio *tmp, *old_chain = *old, *new_chain = NULL, *tail = NULL;
+       int total = 0;
+
+       if (*bp) {
+               bio_pair_release(*bp);
+               *bp = NULL;
+       }
+
+       while (old_chain && (total < len)) {
+               tmp = bio_kmalloc(gfpmask, old_chain->bi_max_vecs);
+               if (!tmp)
+                       goto err_out;
+
+               if (total + old_chain->bi_size > len) {
+                       struct bio_pair *bp;
+
+                       /*
+                        * this split can only happen with a single paged bio,
+                        * split_bio will BUG_ON if this is not the case
+                        */
+                       dout("bio_chain_clone split! total=%d remaining=%d"
+                            "bi_size=%d\n",
+                            (int)total, (int)len-total,
+                            (int)old_chain->bi_size);
+
+                       /* split the bio. We'll release it either in the next
+                          call, or it will have to be released outside */
+                       bp = bio_split(old_chain, (len - total) / 512ULL);
+                       if (!bp)
+                               goto err_out;
+
+                       __bio_clone(tmp, &bp->bio1);
+
+                       *next = &bp->bio2;
+               } else {
+                       __bio_clone(tmp, old_chain);
+                       *next = old_chain->bi_next;
+               }
+
+               tmp->bi_bdev = NULL;
+               gfpmask &= ~__GFP_WAIT;
+               tmp->bi_next = NULL;
+
+               if (!new_chain) {
+                       new_chain = tail = tmp;
+               } else {
+                       tail->bi_next = tmp;
+                       tail = tmp;
+               }
+               old_chain = old_chain->bi_next;
+
+               total += tmp->bi_size;
+       }
+
+       BUG_ON(total < len);
+
+       if (tail)
+               tail->bi_next = NULL;
+
+       *old = old_chain;
+
+       return new_chain;
+
+err_out:
+       dout("bio_chain_clone with err\n");
+       bio_chain_put(new_chain);
+       return NULL;
+}
+
+/*
+ * helpers for osd request op vectors.
+ */
+static int rbd_create_rw_ops(struct ceph_osd_req_op **ops,
+                           int num_ops,
+                           int opcode,
+                           u32 payload_len)
+{
+       *ops = kzalloc(sizeof(struct ceph_osd_req_op) * (num_ops + 1),
+                      GFP_NOIO);
+       if (!*ops)
+               return -ENOMEM;
+       (*ops)[0].op = opcode;
+       /*
+        * op extent offset and length will be set later on
+        * in calc_raw_layout()
+        */
+       (*ops)[0].payload_len = payload_len;
+       return 0;
+}
+
+static void rbd_destroy_ops(struct ceph_osd_req_op *ops)
+{
+       kfree(ops);
+}
+
+/*
+ * Send ceph osd request
+ */
+static int rbd_do_request(struct request *rq,
+                         struct rbd_device *dev,
+                         struct ceph_snap_context *snapc,
+                         u64 snapid,
+                         const char *obj, u64 ofs, u64 len,
+                         struct bio *bio,
+                         struct page **pages,
+                         int num_pages,
+                         int flags,
+                         struct ceph_osd_req_op *ops,
+                         int num_reply,
+                         void (*rbd_cb)(struct ceph_osd_request *req,
+                                        struct ceph_msg *msg))
+{
+       struct ceph_osd_request *req;
+       struct ceph_file_layout *layout;
+       int ret;
+       u64 bno;
+       struct timespec mtime = CURRENT_TIME;
+       struct rbd_request *req_data;
+       struct ceph_osd_request_head *reqhead;
+       struct rbd_image_header *header = &dev->header;
+
+       ret = -ENOMEM;
+       req_data = kzalloc(sizeof(*req_data), GFP_NOIO);
+       if (!req_data)
+               goto done;
+
+       dout("rbd_do_request len=%lld ofs=%lld\n", len, ofs);
+
+       down_read(&header->snap_rwsem);
+
+       req = ceph_osdc_alloc_request(&dev->client->osdc, flags,
+                                     snapc,
+                                     ops,
+                                     false,
+                                     GFP_NOIO, pages, bio);
+       if (IS_ERR(req)) {
+               up_read(&header->snap_rwsem);
+               ret = PTR_ERR(req);
+               goto done_pages;
+       }
+
+       req->r_callback = rbd_cb;
+
+       req_data->rq = rq;
+       req_data->bio = bio;
+       req_data->pages = pages;
+       req_data->len = len;
+
+       req->r_priv = req_data;
+
+       reqhead = req->r_request->front.iov_base;
+       reqhead->snapid = cpu_to_le64(CEPH_NOSNAP);
+
+       strncpy(req->r_oid, obj, sizeof(req->r_oid));
+       req->r_oid_len = strlen(req->r_oid);
+
+       layout = &req->r_file_layout;
+       memset(layout, 0, sizeof(*layout));
+       layout->fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
+       layout->fl_stripe_count = cpu_to_le32(1);
+       layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
+       layout->fl_pg_preferred = cpu_to_le32(-1);
+       layout->fl_pg_pool = cpu_to_le32(dev->poolid);
+       ceph_calc_raw_layout(&dev->client->osdc, layout, snapid,
+                            ofs, &len, &bno, req, ops);
+
+       ceph_osdc_build_request(req, ofs, &len,
+                               ops,
+                               snapc,
+                               &mtime,
+                               req->r_oid, req->r_oid_len);
+       up_read(&header->snap_rwsem);
+
+       ret = ceph_osdc_start_request(&dev->client->osdc, req, false);
+       if (ret < 0)
+               goto done_err;
+
+       if (!rbd_cb) {
+               ret = ceph_osdc_wait_request(&dev->client->osdc, req);
+               ceph_osdc_put_request(req);
+       }
+       return ret;
+
+done_err:
+       bio_chain_put(req_data->bio);
+       ceph_osdc_put_request(req);
+done_pages:
+       kfree(req_data);
+done:
+       if (rq)
+               blk_end_request(rq, ret, len);
+       return ret;
+}
+
+/*
+ * Ceph osd op callback
+ */
+static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
+{
+       struct rbd_request *req_data = req->r_priv;
+       struct ceph_osd_reply_head *replyhead;
+       struct ceph_osd_op *op;
+       __s32 rc;
+       u64 bytes;
+       int read_op;
+
+       /* parse reply */
+       replyhead = msg->front.iov_base;
+       WARN_ON(le32_to_cpu(replyhead->num_ops) == 0);
+       op = (void *)(replyhead + 1);
+       rc = le32_to_cpu(replyhead->result);
+       bytes = le64_to_cpu(op->extent.length);
+       read_op = (le32_to_cpu(op->op) == CEPH_OSD_OP_READ);
+
+       dout("rbd_req_cb bytes=%lld readop=%d rc=%d\n", bytes, read_op, rc);
+
+       if (rc == -ENOENT && read_op) {
+               zero_bio_chain(req_data->bio, 0);
+               rc = 0;
+       } else if (rc == 0 && read_op && bytes < req_data->len) {
+               zero_bio_chain(req_data->bio, bytes);
+               bytes = req_data->len;
+       }
+
+       blk_end_request(req_data->rq, rc, bytes);
+
+       if (req_data->bio)
+               bio_chain_put(req_data->bio);
+
+       ceph_osdc_put_request(req);
+       kfree(req_data);
+}
+
+/*
+ * Do a synchronous ceph osd operation
+ */
+static int rbd_req_sync_op(struct rbd_device *dev,
+                          struct ceph_snap_context *snapc,
+                          u64 snapid,
+                          int opcode,
+                          int flags,
+                          struct ceph_osd_req_op *orig_ops,
+                          int num_reply,
+                          const char *obj,
+                          u64 ofs, u64 len,
+                          char *buf)
+{
+       int ret;
+       struct page **pages;
+       int num_pages;
+       struct ceph_osd_req_op *ops = orig_ops;
+       u32 payload_len;
+
+       num_pages = calc_pages_for(ofs , len);
+       pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
+       if (IS_ERR(pages))
+               return PTR_ERR(pages);
+
+       if (!orig_ops) {
+               payload_len = (flags & CEPH_OSD_FLAG_WRITE ? len : 0);
+               ret = rbd_create_rw_ops(&ops, 1, opcode, payload_len);
+               if (ret < 0)
+                       goto done;
+
+               if ((flags & CEPH_OSD_FLAG_WRITE) && buf) {
+                       ret = ceph_copy_to_page_vector(pages, buf, ofs, len);
+                       if (ret < 0)
+                               goto done_ops;
+               }
+       }
+
+       ret = rbd_do_request(NULL, dev, snapc, snapid,
+                         obj, ofs, len, NULL,
+                         pages, num_pages,
+                         flags,
+                         ops,
+                         2,
+                         NULL);
+       if (ret < 0)
+               goto done_ops;
+
+       if ((flags & CEPH_OSD_FLAG_READ) && buf)
+               ret = ceph_copy_from_page_vector(pages, buf, ofs, ret);
+
+done_ops:
+       if (!orig_ops)
+               rbd_destroy_ops(ops);
+done:
+       ceph_release_page_vector(pages, num_pages);
+       return ret;
+}
+
+/*
+ * Do an asynchronous ceph osd operation
+ */
+static int rbd_do_op(struct request *rq,
+                    struct rbd_device *rbd_dev ,
+                    struct ceph_snap_context *snapc,
+                    u64 snapid,
+                    int opcode, int flags, int num_reply,
+                    u64 ofs, u64 len,
+                    struct bio *bio)
+{
+       char *seg_name;
+       u64 seg_ofs;
+       u64 seg_len;
+       int ret;
+       struct ceph_osd_req_op *ops;
+       u32 payload_len;
+
+       seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
+       if (!seg_name)
+               return -ENOMEM;
+
+       seg_len = rbd_get_segment(&rbd_dev->header,
+                                 rbd_dev->header.block_name,
+                                 ofs, len,
+                                 seg_name, &seg_ofs);
+
+       payload_len = (flags & CEPH_OSD_FLAG_WRITE ? seg_len : 0);
+
+       ret = rbd_create_rw_ops(&ops, 1, opcode, payload_len);
+       if (ret < 0)
+               goto done;
+
+       /* we've taken care of segment sizes earlier when we
+          cloned the bios. We should never have a segment
+          truncated at this point */
+       BUG_ON(seg_len < len);
+
+       ret = rbd_do_request(rq, rbd_dev, snapc, snapid,
+                            seg_name, seg_ofs, seg_len,
+                            bio,
+                            NULL, 0,
+                            flags,
+                            ops,
+                            num_reply,
+                            rbd_req_cb);
+done:
+       kfree(seg_name);
+       return ret;
+}
+
+/*
+ * Request async osd write
+ */
+static int rbd_req_write(struct request *rq,
+                        struct rbd_device *rbd_dev,
+                        struct ceph_snap_context *snapc,
+                        u64 ofs, u64 len,
+                        struct bio *bio)
+{
+       return rbd_do_op(rq, rbd_dev, snapc, CEPH_NOSNAP,
+                        CEPH_OSD_OP_WRITE,
+                        CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
+                        2,
+                        ofs, len, bio);
+}
+
+/*
+ * Request async osd read
+ */
+static int rbd_req_read(struct request *rq,
+                        struct rbd_device *rbd_dev,
+                        u64 snapid,
+                        u64 ofs, u64 len,
+                        struct bio *bio)
+{
+       return rbd_do_op(rq, rbd_dev, NULL,
+                        (snapid ? snapid : CEPH_NOSNAP),
+                        CEPH_OSD_OP_READ,
+                        CEPH_OSD_FLAG_READ,
+                        2,
+                        ofs, len, bio);
+}
+
+/*
+ * Request sync osd read
+ */
+static int rbd_req_sync_read(struct rbd_device *dev,
+                         struct ceph_snap_context *snapc,
+                         u64 snapid,
+                         const char *obj,
+                         u64 ofs, u64 len,
+                         char *buf)
+{
+       return rbd_req_sync_op(dev, NULL,
+                              (snapid ? snapid : CEPH_NOSNAP),
+                              CEPH_OSD_OP_READ,
+                              CEPH_OSD_FLAG_READ,
+                              NULL,
+                              1, obj, ofs, len, buf);
+}
+
+/*
+ * Request sync osd read
+ */
+static int rbd_req_sync_rollback_obj(struct rbd_device *dev,
+                                    u64 snapid,
+                                    const char *obj)
+{
+       struct ceph_osd_req_op *ops;
+       int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_ROLLBACK, 0);
+       if (ret < 0)
+               return ret;
+
+       ops[0].snap.snapid = snapid;
+
+       ret = rbd_req_sync_op(dev, NULL,
+                              CEPH_NOSNAP,
+                              0,
+                              CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
+                              ops,
+                              1, obj, 0, 0, NULL);
+
+       rbd_destroy_ops(ops);
+
+       if (ret < 0)
+               return ret;
+
+       return ret;
+}
+
+/*
+ * Request sync osd read
+ */
+static int rbd_req_sync_exec(struct rbd_device *dev,
+                            const char *obj,
+                            const char *cls,
+                            const char *method,
+                            const char *data,
+                            int len)
+{
+       struct ceph_osd_req_op *ops;
+       int cls_len = strlen(cls);
+       int method_len = strlen(method);
+       int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_CALL,
+                                   cls_len + method_len + len);
+       if (ret < 0)
+               return ret;
+
+       ops[0].cls.class_name = cls;
+       ops[0].cls.class_len = (__u8)cls_len;
+       ops[0].cls.method_name = method;
+       ops[0].cls.method_len = (__u8)method_len;
+       ops[0].cls.argc = 0;
+       ops[0].cls.indata = data;
+       ops[0].cls.indata_len = len;
+
+       ret = rbd_req_sync_op(dev, NULL,
+                              CEPH_NOSNAP,
+                              0,
+                              CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
+                              ops,
+                              1, obj, 0, 0, NULL);
+
+       rbd_destroy_ops(ops);
+
+       dout("cls_exec returned %d\n", ret);
+       return ret;
+}
+
+/*
+ * block device queue callback
+ */
+static void rbd_rq_fn(struct request_queue *q)
+{
+       struct rbd_device *rbd_dev = q->queuedata;
+       struct request *rq;
+       struct bio_pair *bp = NULL;
+
+       rq = blk_fetch_request(q);
+
+       while (1) {
+               struct bio *bio;
+               struct bio *rq_bio, *next_bio = NULL;
+               bool do_write;
+               int size, op_size = 0;
+               u64 ofs;
+
+               /* peek at request from block layer */
+               if (!rq)
+                       break;
+
+               dout("fetched request\n");
+
+               /* filter out block requests we don't understand */
+               if ((rq->cmd_type != REQ_TYPE_FS)) {
+                       __blk_end_request_all(rq, 0);
+                       goto next;
+               }
+
+               /* deduce our operation (read, write) */
+               do_write = (rq_data_dir(rq) == WRITE);
+
+               size = blk_rq_bytes(rq);
+               ofs = blk_rq_pos(rq) * 512ULL;
+               rq_bio = rq->bio;
+               if (do_write && rbd_dev->read_only) {
+                       __blk_end_request_all(rq, -EROFS);
+                       goto next;
+               }
+
+               spin_unlock_irq(q->queue_lock);
+
+               dout("%s 0x%x bytes at 0x%llx\n",
+                    do_write ? "write" : "read",
+                    size, blk_rq_pos(rq) * 512ULL);
+
+               do {
+                       /* a bio clone to be passed down to OSD req */
+                       dout("rq->bio->bi_vcnt=%d\n", rq->bio->bi_vcnt);
+                       op_size = rbd_get_segment(&rbd_dev->header,
+                                                 rbd_dev->header.block_name,
+                                                 ofs, size,
+                                                 NULL, NULL);
+                       bio = bio_chain_clone(&rq_bio, &next_bio, &bp,
+                                             op_size, GFP_ATOMIC);
+                       if (!bio) {
+                               spin_lock_irq(q->queue_lock);
+                               __blk_end_request_all(rq, -ENOMEM);
+                               goto next;
+                       }
+
+                       /* init OSD command: write or read */
+                       if (do_write)
+                               rbd_req_write(rq, rbd_dev,
+                                             rbd_dev->header.snapc,
+                                             ofs,
+                                             op_size, bio);
+                       else
+                               rbd_req_read(rq, rbd_dev,
+                                            cur_snap_id(rbd_dev),
+                                            ofs,
+                                            op_size, bio);
+
+                       size -= op_size;
+                       ofs += op_size;
+
+                       rq_bio = next_bio;
+               } while (size > 0);
+
+               if (bp)
+                       bio_pair_release(bp);
+
+               spin_lock_irq(q->queue_lock);
+next:
+               rq = blk_fetch_request(q);
+       }
+}
+
+/*
+ * a queue callback. Makes sure that we don't create a bio that spans across
+ * multiple osd objects. One exception would be with a single page bios,
+ * which we handle later at bio_chain_clone
+ */
+static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
+                         struct bio_vec *bvec)
+{
+       struct rbd_device *rbd_dev = q->queuedata;
+       unsigned int chunk_sectors = 1 << (rbd_dev->header.obj_order - 9);
+       sector_t sector = bmd->bi_sector + get_start_sect(bmd->bi_bdev);
+       unsigned int bio_sectors = bmd->bi_size >> 9;
+       int max;
+
+       max =  (chunk_sectors - ((sector & (chunk_sectors - 1))
+                                + bio_sectors)) << 9;
+       if (max < 0)
+               max = 0; /* bio_add cannot handle a negative return */
+       if (max <= bvec->bv_len && bio_sectors == 0)
+               return bvec->bv_len;
+       return max;
+}
+
+static void rbd_free_disk(struct rbd_device *rbd_dev)
+{
+       struct gendisk *disk = rbd_dev->disk;
+
+       if (!disk)
+               return;
+
+       rbd_header_free(&rbd_dev->header);
+
+       if (disk->flags & GENHD_FL_UP)
+               del_gendisk(disk);
+       if (disk->queue)
+               blk_cleanup_queue(disk->queue);
+       put_disk(disk);
+}
+
+/*
+ * reload the ondisk the header 
+ */
+static int rbd_read_header(struct rbd_device *rbd_dev,
+                          struct rbd_image_header *header)
+{
+       ssize_t rc;
+       struct rbd_image_header_ondisk *dh;
+       int snap_count = 0;
+       u64 snap_names_len = 0;
+
+       while (1) {
+               int len = sizeof(*dh) +
+                         snap_count * sizeof(struct rbd_image_snap_ondisk) +
+                         snap_names_len;
+
+               rc = -ENOMEM;
+               dh = kmalloc(len, GFP_KERNEL);
+               if (!dh)
+                       return -ENOMEM;
+
+               rc = rbd_req_sync_read(rbd_dev,
+                                      NULL, CEPH_NOSNAP,
+                                      rbd_dev->obj_md_name,
+                                      0, len,
+                                      (char *)dh);
+               if (rc < 0)
+                       goto out_dh;
+
+               rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL);
+               if (rc < 0)
+                       goto out_dh;
+
+               if (snap_count != header->total_snaps) {
+                       snap_count = header->total_snaps;
+                       snap_names_len = header->snap_names_len;
+                       rbd_header_free(header);
+                       kfree(dh);
+                       continue;
+               }
+               break;
+       }
+
+out_dh:
+       kfree(dh);
+       return rc;
+}
+
+/*
+ * create a snapshot
+ */
+static int rbd_header_add_snap(struct rbd_device *dev,
+                              const char *snap_name,
+                              gfp_t gfp_flags)
+{
+       int name_len = strlen(snap_name);
+       u64 new_snapid;
+       int ret;
+       void *data, *data_start, *data_end;
+
+       /* we should create a snapshot only if we're pointing at the head */
+       if (dev->cur_snap)
+               return -EINVAL;
+
+       ret = ceph_monc_create_snapid(&dev->client->monc, dev->poolid,
+                                     &new_snapid);
+       dout("created snapid=%lld\n", new_snapid);
+       if (ret < 0)
+               return ret;
+
+       data = kmalloc(name_len + 16, gfp_flags);
+       if (!data)
+               return -ENOMEM;
+
+       data_start = data;
+       data_end = data + name_len + 16;
+
+       ceph_encode_string_safe(&data, data_end, snap_name, name_len, bad);
+       ceph_encode_64_safe(&data, data_end, new_snapid, bad);
+
+       ret = rbd_req_sync_exec(dev, dev->obj_md_name, "rbd", "snap_add",
+                               data_start, data - data_start);
+
+       kfree(data_start);
+
+       if (ret < 0)
+               return ret;
+
+       dev->header.snapc->seq =  new_snapid;
+
+       return 0;
+bad:
+       return -ERANGE;
+}
+
+/*
+ * only read the first part of the ondisk header, without the snaps info
+ */
+static int rbd_update_snaps(struct rbd_device *rbd_dev)
+{
+       int ret;
+       struct rbd_image_header h;
+       u64 snap_seq;
+
+       ret = rbd_read_header(rbd_dev, &h);
+       if (ret < 0)
+               return ret;
+
+       down_write(&rbd_dev->header.snap_rwsem);
+
+       snap_seq = rbd_dev->header.snapc->seq;
+
+       kfree(rbd_dev->header.snapc);
+       kfree(rbd_dev->header.snap_names);
+       kfree(rbd_dev->header.snap_sizes);
+
+       rbd_dev->header.total_snaps = h.total_snaps;
+       rbd_dev->header.snapc = h.snapc;
+       rbd_dev->header.snap_names = h.snap_names;
+       rbd_dev->header.snap_sizes = h.snap_sizes;
+       rbd_dev->header.snapc->seq = snap_seq;
+
+       up_write(&rbd_dev->header.snap_rwsem);
+
+       return 0;
+}
+
+static int rbd_init_disk(struct rbd_device *rbd_dev)
+{
+       struct gendisk *disk;
+       struct request_queue *q;
+       int rc;
+       u64 total_size = 0;
+
+       /* contact OSD, request size info about the object being mapped */
+       rc = rbd_read_header(rbd_dev, &rbd_dev->header);
+       if (rc)
+               return rc;
+
+       rc = rbd_header_set_snap(rbd_dev, rbd_dev->snap_name, &total_size);
+       if (rc)
+               return rc;
+
+       /* create gendisk info */
+       rc = -ENOMEM;
+       disk = alloc_disk(RBD_MINORS_PER_MAJOR);
+       if (!disk)
+               goto out;
+
+       sprintf(disk->disk_name, DRV_NAME "%d", rbd_dev->id);
+       disk->major = rbd_dev->major;
+       disk->first_minor = 0;
+       disk->fops = &rbd_bd_ops;
+       disk->private_data = rbd_dev;
+
+       /* init rq */
+       rc = -ENOMEM;
+       q = blk_init_queue(rbd_rq_fn, &rbd_dev->lock);
+       if (!q)
+               goto out_disk;
+       blk_queue_merge_bvec(q, rbd_merge_bvec);
+       disk->queue = q;
+
+       q->queuedata = rbd_dev;
+
+       rbd_dev->disk = disk;
+       rbd_dev->q = q;
+
+       /* finally, announce the disk to the world */
+       set_capacity(disk, total_size / 512ULL);
+       add_disk(disk);
+
+       pr_info("%s: added with size 0x%llx\n",
+               disk->disk_name, (unsigned long long)total_size);
+       return 0;
+
+out_disk:
+       put_disk(disk);
+out:
+       return rc;
+}
+
+/********************************************************************
+ * /sys/class/rbd/
+ *                   add       map rados objects to blkdev
+ *                   remove    unmap rados objects
+ *                   list      show mappings
+ *******************************************************************/
+
+static void class_rbd_release(struct class *cls)
+{
+       kfree(cls);
+}
+
+static ssize_t class_rbd_list(struct class *c,
+                             struct class_attribute *attr,
+                             char *data)
+{
+       int n = 0;
+       struct list_head *tmp;
+       int max = PAGE_SIZE;
+
+       mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+
+       n += snprintf(data, max,
+                     "#id\tmajor\tclient_name\tpool\tname\tsnap\tKB\n");
+
+       list_for_each(tmp, &rbd_dev_list) {
+               struct rbd_device *rbd_dev;
+
+               rbd_dev = list_entry(tmp, struct rbd_device, node);
+               n += snprintf(data+n, max-n,
+                             "%d\t%d\tclient%lld\t%s\t%s\t%s\t%lld\n",
+                             rbd_dev->id,
+                             rbd_dev->major,
+                             ceph_client_id(rbd_dev->client),
+                             rbd_dev->pool_name,
+                             rbd_dev->obj, rbd_dev->snap_name,
+                             rbd_dev->header.image_size >> 10);
+               if (n == max)
+                       break;
+       }
+
+       mutex_unlock(&ctl_mutex);
+       return n;
+}
+
+static ssize_t class_rbd_add(struct class *c,
+                            struct class_attribute *attr,
+                            const char *buf, size_t count)
+{
+       struct ceph_osd_client *osdc;
+       struct rbd_device *rbd_dev;
+       ssize_t rc = -ENOMEM;
+       int irc, new_id = 0;
+       struct list_head *tmp;
+       char *mon_dev_name;
+       char *options;
+
+       if (!try_module_get(THIS_MODULE))
+               return -ENODEV;
+
+       mon_dev_name = kmalloc(RBD_MAX_OPT_LEN, GFP_KERNEL);
+       if (!mon_dev_name)
+               goto err_out_mod;
+
+       options = kmalloc(RBD_MAX_OPT_LEN, GFP_KERNEL);
+       if (!options)
+               goto err_mon_dev;
+
+       /* new rbd_device object */
+       rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
+       if (!rbd_dev)
+               goto err_out_opt;
+
+       /* static rbd_device initialization */
+       spin_lock_init(&rbd_dev->lock);
+       INIT_LIST_HEAD(&rbd_dev->node);
+
+       /* generate unique id: find highest unique id, add one */
+       mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+
+       list_for_each(tmp, &rbd_dev_list) {
+               struct rbd_device *rbd_dev;
+
+               rbd_dev = list_entry(tmp, struct rbd_device, node);
+               if (rbd_dev->id >= new_id)
+                       new_id = rbd_dev->id + 1;
+       }
+
+       rbd_dev->id = new_id;
+
+       /* add to global list */
+       list_add_tail(&rbd_dev->node, &rbd_dev_list);
+
+       /* parse add command */
+       if (sscanf(buf, "%" __stringify(RBD_MAX_OPT_LEN) "s "
+                  "%" __stringify(RBD_MAX_OPT_LEN) "s "
+                  "%" __stringify(RBD_MAX_POOL_NAME_LEN) "s "
+                  "%" __stringify(RBD_MAX_OBJ_NAME_LEN) "s"
+                  "%" __stringify(RBD_MAX_SNAP_NAME_LEN) "s",
+                  mon_dev_name, options, rbd_dev->pool_name,
+                  rbd_dev->obj, rbd_dev->snap_name) < 4) {
+               rc = -EINVAL;
+               goto err_out_slot;
+       }
+
+       if (rbd_dev->snap_name[0] == 0)
+               rbd_dev->snap_name[0] = '-';
+
+       rbd_dev->obj_len = strlen(rbd_dev->obj);
+       snprintf(rbd_dev->obj_md_name, sizeof(rbd_dev->obj_md_name), "%s%s",
+                rbd_dev->obj, RBD_SUFFIX);
+
+       /* initialize rest of new object */
+       snprintf(rbd_dev->name, DEV_NAME_LEN, DRV_NAME "%d", rbd_dev->id);
+       rc = rbd_get_client(rbd_dev, mon_dev_name, options);
+       if (rc < 0)
+               goto err_out_slot;
+
+       mutex_unlock(&ctl_mutex);
+
+       /* pick the pool */
+       osdc = &rbd_dev->client->osdc;
+       rc = ceph_pg_poolid_by_name(osdc->osdmap, rbd_dev->pool_name);
+       if (rc < 0)
+               goto err_out_client;
+       rbd_dev->poolid = rc;
+
+       /* register our block device */
+       irc = register_blkdev(0, rbd_dev->name);
+       if (irc < 0) {
+               rc = irc;
+               goto err_out_client;
+       }
+       rbd_dev->major = irc;
+
+       /* set up and announce blkdev mapping */
+       rc = rbd_init_disk(rbd_dev);
+       if (rc)
+               goto err_out_blkdev;
+
+       return count;
+
+err_out_blkdev:
+       unregister_blkdev(rbd_dev->major, rbd_dev->name);
+err_out_client:
+       rbd_put_client(rbd_dev);
+       mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+err_out_slot:
+       list_del_init(&rbd_dev->node);
+       mutex_unlock(&ctl_mutex);
+
+       kfree(rbd_dev);
+err_out_opt:
+       kfree(options);
+err_mon_dev:
+       kfree(mon_dev_name);
+err_out_mod:
+       dout("Error adding device %s\n", buf);
+       module_put(THIS_MODULE);
+       return rc;
+}
+
+static struct rbd_device *__rbd_get_dev(unsigned long id)
+{
+       struct list_head *tmp;
+       struct rbd_device *rbd_dev;
+
+       list_for_each(tmp, &rbd_dev_list) {
+               rbd_dev = list_entry(tmp, struct rbd_device, node);
+               if (rbd_dev->id == id)
+                       return rbd_dev;
+       }
+       return NULL;
+}
+
+static ssize_t class_rbd_remove(struct class *c,
+                               struct class_attribute *attr,
+                               const char *buf,
+                               size_t count)
+{
+       struct rbd_device *rbd_dev = NULL;
+       int target_id, rc;
+       unsigned long ul;
+
+       rc = strict_strtoul(buf, 10, &ul);
+       if (rc)
+               return rc;
+
+       /* convert to int; abort if we lost anything in the conversion */
+       target_id = (int) ul;
+       if (target_id != ul)
+               return -EINVAL;
+
+       /* remove object from list immediately */
+       mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+
+       rbd_dev = __rbd_get_dev(target_id);
+       if (rbd_dev)
+               list_del_init(&rbd_dev->node);
+
+       mutex_unlock(&ctl_mutex);
+
+       if (!rbd_dev)
+               return -ENOENT;
+
+       rbd_put_client(rbd_dev);
+
+       /* clean up and free blkdev */
+       rbd_free_disk(rbd_dev);
+       unregister_blkdev(rbd_dev->major, rbd_dev->name);
+       kfree(rbd_dev);
+
+       /* release module ref */
+       module_put(THIS_MODULE);
+
+       return count;
+}
+
+static ssize_t class_rbd_snaps_list(struct class *c,
+                             struct class_attribute *attr,
+                             char *data)
+{
+       struct rbd_device *rbd_dev = NULL;
+       struct list_head *tmp;
+       struct rbd_image_header *header;
+       int i, n = 0, max = PAGE_SIZE;
+       int ret;
+
+       mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+
+       n += snprintf(data, max, "#id\tsnap\tKB\n");
+
+       list_for_each(tmp, &rbd_dev_list) {
+               char *names, *p;
+               struct ceph_snap_context *snapc;
+
+               rbd_dev = list_entry(tmp, struct rbd_device, node);
+               header = &rbd_dev->header;
+
+               down_read(&header->snap_rwsem);
+
+               names = header->snap_names;
+               snapc = header->snapc;
+
+               n += snprintf(data + n, max - n, "%d\t%s\t%lld%s\n",
+                             rbd_dev->id, RBD_SNAP_HEAD_NAME,
+                             header->image_size >> 10,
+                             (!rbd_dev->cur_snap ? " (*)" : ""));
+               if (n == max)
+                       break;
+
+               p = names;
+               for (i = 0; i < header->total_snaps; i++, p += strlen(p) + 1) {
+                       n += snprintf(data + n, max - n, "%d\t%s\t%lld%s\n",
+                             rbd_dev->id, p, header->snap_sizes[i] >> 10,
+                             (rbd_dev->cur_snap &&
+                              (snap_index(header, i) == rbd_dev->cur_snap) ?
+                              " (*)" : ""));
+                       if (n == max)
+                               break;
+               }
+
+               up_read(&header->snap_rwsem);
+       }
+
+
+       ret = n;
+       mutex_unlock(&ctl_mutex);
+       return ret;
+}
+
+static ssize_t class_rbd_snaps_refresh(struct class *c,
+                               struct class_attribute *attr,
+                               const char *buf,
+                               size_t count)
+{
+       struct rbd_device *rbd_dev = NULL;
+       int target_id, rc;
+       unsigned long ul;
+       int ret = count;
+
+       rc = strict_strtoul(buf, 10, &ul);
+       if (rc)
+               return rc;
+
+       /* convert to int; abort if we lost anything in the conversion */
+       target_id = (int) ul;
+       if (target_id != ul)
+               return -EINVAL;
+
+       mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+
+       rbd_dev = __rbd_get_dev(target_id);
+       if (!rbd_dev) {
+               ret = -ENOENT;
+               goto done;
+       }
+
+       rc = rbd_update_snaps(rbd_dev);
+       if (rc < 0)
+               ret = rc;
+
+done:
+       mutex_unlock(&ctl_mutex);
+       return ret;
+}
+
+static ssize_t class_rbd_snap_create(struct class *c,
+                               struct class_attribute *attr,
+                               const char *buf,
+                               size_t count)
+{
+       struct rbd_device *rbd_dev = NULL;
+       int target_id, ret;
+       char *name;
+
+       name = kmalloc(RBD_MAX_SNAP_NAME_LEN + 1, GFP_KERNEL);
+       if (!name)
+               return -ENOMEM;
+
+       /* parse snaps add command */
+       if (sscanf(buf, "%d "
+                  "%" __stringify(RBD_MAX_SNAP_NAME_LEN) "s",
+                  &target_id,
+                  name) != 2) {
+               ret = -EINVAL;
+               goto done;
+       }
+
+       mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+
+       rbd_dev = __rbd_get_dev(target_id);
+       if (!rbd_dev) {
+               ret = -ENOENT;
+               goto done_unlock;
+       }
+
+       ret = rbd_header_add_snap(rbd_dev,
+                                 name, GFP_KERNEL);
+       if (ret < 0)
+               goto done_unlock;
+
+       ret = rbd_update_snaps(rbd_dev);
+       if (ret < 0)
+               goto done_unlock;
+
+       ret = count;
+done_unlock:
+       mutex_unlock(&ctl_mutex);
+done:
+       kfree(name);
+       return ret;
+}
+
+static ssize_t class_rbd_rollback(struct class *c,
+                               struct class_attribute *attr,
+                               const char *buf,
+                               size_t count)
+{
+       struct rbd_device *rbd_dev = NULL;
+       int target_id, ret;
+       u64 snapid;
+       char snap_name[RBD_MAX_SNAP_NAME_LEN];
+       u64 cur_ofs;
+       char *seg_name;
+
+       /* parse snaps add command */
+       if (sscanf(buf, "%d "
+                  "%" __stringify(RBD_MAX_SNAP_NAME_LEN) "s",
+                  &target_id,
+                  snap_name) != 2) {
+               return -EINVAL;
+       }
+
+       ret = -ENOMEM;
+       seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
+       if (!seg_name)
+               return ret;
+
+       mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+
+       rbd_dev = __rbd_get_dev(target_id);
+       if (!rbd_dev) {
+               ret = -ENOENT;
+               goto done_unlock;
+       }
+
+       ret = snap_by_name(&rbd_dev->header, snap_name, &snapid, NULL);
+       if (ret < 0)
+               goto done_unlock;
+
+       dout("snapid=%lld\n", snapid);
+
+       cur_ofs = 0;
+       while (cur_ofs < rbd_dev->header.image_size) {
+               cur_ofs += rbd_get_segment(&rbd_dev->header,
+                                          rbd_dev->obj,
+                                          cur_ofs, (u64)-1,
+                                          seg_name, NULL);
+               dout("seg_name=%s\n", seg_name);
+
+               ret = rbd_req_sync_rollback_obj(rbd_dev, snapid, seg_name);
+               if (ret < 0)
+                       pr_warning("could not roll back obj %s err=%d\n",
+                                  seg_name, ret);
+       }
+
+       ret = rbd_update_snaps(rbd_dev);
+       if (ret < 0)
+               goto done_unlock;
+
+       ret = count;
+
+done_unlock:
+       mutex_unlock(&ctl_mutex);
+       kfree(seg_name);
+
+       return ret;
+}
+
+static struct class_attribute class_rbd_attrs[] = {
+       __ATTR(add,             0200, NULL, class_rbd_add),
+       __ATTR(remove,          0200, NULL, class_rbd_remove),
+       __ATTR(list,            0444, class_rbd_list, NULL),
+       __ATTR(snaps_refresh,   0200, NULL, class_rbd_snaps_refresh),
+       __ATTR(snap_create,     0200, NULL, class_rbd_snap_create),
+       __ATTR(snaps_list,      0444, class_rbd_snaps_list, NULL),
+       __ATTR(snap_rollback,   0200, NULL, class_rbd_rollback),
+       __ATTR_NULL
+};
+
+/*
+ * create control files in sysfs
+ * /sys/class/rbd/...
+ */
+static int rbd_sysfs_init(void)
+{
+       int ret = -ENOMEM;
+
+       class_rbd = kzalloc(sizeof(*class_rbd), GFP_KERNEL);
+       if (!class_rbd)
+               goto out;
+
+       class_rbd->name = DRV_NAME;
+       class_rbd->owner = THIS_MODULE;
+       class_rbd->class_release = class_rbd_release;
+       class_rbd->class_attrs = class_rbd_attrs;
+
+       ret = class_register(class_rbd);
+       if (ret)
+               goto out_class;
+       return 0;
+
+out_class:
+       kfree(class_rbd);
+       class_rbd = NULL;
+       pr_err(DRV_NAME ": failed to create class rbd\n");
+out:
+       return ret;
+}
+
+static void rbd_sysfs_cleanup(void)
+{
+       if (class_rbd)
+               class_destroy(class_rbd);
+       class_rbd = NULL;
+}
+
+int __init rbd_init(void)
+{
+       int rc;
+
+       rc = rbd_sysfs_init();
+       if (rc)
+               return rc;
+       spin_lock_init(&node_lock);
+       pr_info("loaded " DRV_NAME_LONG "\n");
+       return 0;
+}
+
+void __exit rbd_exit(void)
+{
+       rbd_sysfs_cleanup();
+}
+
+module_init(rbd_init);
+module_exit(rbd_exit);
+
+MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
+MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
+MODULE_DESCRIPTION("rados block device");
+
+/* following authorship retained from original osdblk.c */
+MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/block/rbd_types.h b/drivers/block/rbd_types.h
new file mode 100644 (file)
index 0000000..fc6c678
--- /dev/null
@@ -0,0 +1,73 @@
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright (C) 2004-2010 Sage Weil <sage@newdream.net>
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation.  See file COPYING.
+ *
+ */
+
+#ifndef CEPH_RBD_TYPES_H
+#define CEPH_RBD_TYPES_H
+
+#include <linux/types.h>
+
+/*
+ * rbd image 'foo' consists of objects
+ *   foo.rbd      - image metadata
+ *   foo.00000000
+ *   foo.00000001
+ *   ...          - data
+ */
+
+#define RBD_SUFFIX             ".rbd"
+#define RBD_DIRECTORY           "rbd_directory"
+#define RBD_INFO                "rbd_info"
+
+#define RBD_DEFAULT_OBJ_ORDER  22   /* 4MB */
+#define RBD_MIN_OBJ_ORDER       16
+#define RBD_MAX_OBJ_ORDER       30
+
+#define RBD_MAX_OBJ_NAME_LEN   96
+#define RBD_MAX_SEG_NAME_LEN   128
+
+#define RBD_COMP_NONE          0
+#define RBD_CRYPT_NONE         0
+
+#define RBD_HEADER_TEXT                "<<< Rados Block Device Image >>>\n"
+#define RBD_HEADER_SIGNATURE   "RBD"
+#define RBD_HEADER_VERSION     "001.005"
+
+struct rbd_info {
+       __le64 max_id;
+} __attribute__ ((packed));
+
+struct rbd_image_snap_ondisk {
+       __le64 id;
+       __le64 image_size;
+} __attribute__((packed));
+
+struct rbd_image_header_ondisk {
+       char text[40];
+       char block_name[24];
+       char signature[4];
+       char version[8];
+       struct {
+               __u8 order;
+               __u8 crypt_type;
+               __u8 comp_type;
+               __u8 unused;
+       } __attribute__((packed)) options;
+       __le64 image_size;
+       __le64 snap_seq;
+       __le32 snap_count;
+       __le32 reserved;
+       __le64 snap_names_len;
+       struct rbd_image_snap_ondisk snaps[0];
+} __attribute__((packed));
+
+
+#endif
index 2aafafca2b1374b11714546fb3c063044ed9200c..8320490226b78145f95c7e7801a15080419adc19 100644 (file)
@@ -2,7 +2,6 @@
 #include <linux/spinlock.h>
 #include <linux/slab.h>
 #include <linux/blkdev.h>
-#include <linux/smp_lock.h>
 #include <linux/hdreg.h>
 #include <linux/virtio.h>
 #include <linux/virtio_blk.h>
@@ -202,6 +201,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
        struct virtio_blk *vblk = disk->private_data;
        struct request *req;
        struct bio *bio;
+       int err;
 
        bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES,
                           GFP_KERNEL);
@@ -215,11 +215,14 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
        }
 
        req->cmd_type = REQ_TYPE_SPECIAL;
-       return blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
+       err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
+       blk_put_request(req);
+
+       return err;
 }
 
-static int virtblk_locked_ioctl(struct block_device *bdev, fmode_t mode,
-                        unsigned cmd, unsigned long data)
+static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
+                            unsigned int cmd, unsigned long data)
 {
        struct gendisk *disk = bdev->bd_disk;
        struct virtio_blk *vblk = disk->private_data;
@@ -234,18 +237,6 @@ static int virtblk_locked_ioctl(struct block_device *bdev, fmode_t mode,
                              (void __user *)data);
 }
 
-static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
-                            unsigned int cmd, unsigned long param)
-{
-       int ret;
-
-       lock_kernel();
-       ret = virtblk_locked_ioctl(bdev, mode, cmd, param);
-       unlock_kernel();
-
-       return ret;
-}
-
 /* We provide getgeo only to please some old bootloader/partitioning tools */
 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
 {
index 4b66c69eaf5790d104cf653dd48a14e5eb2940bf..5ddf67e76f8bbccf3242da3bc2e2373d2255648c 100644 (file)
@@ -57,7 +57,7 @@ config AGP_AMD
 
 config AGP_AMD64
        tristate "AMD Opteron/Athlon64 on-CPU GART support"
-       depends on AGP && X86 && K8_NB
+       depends on AGP && X86 && AMD_NB
        help
          This option gives you AGP support for the GLX component of
          X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs.
index 70312da4c968f9e4af7c39e9a5a649b4952f8bd3..42396df555567660d597ac524631e9020edfbea3 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/mmzone.h>
 #include <asm/page.h>          /* PAGE_SIZE */
 #include <asm/e820.h>
-#include <asm/k8.h>
+#include <asm/amd_nb.h>
 #include <asm/gart.h>
 #include "agp.h"
 
@@ -124,7 +124,7 @@ static int amd64_fetch_size(void)
        u32 temp;
        struct aper_size_info_32 *values;
 
-       dev = k8_northbridges[0];
+       dev = k8_northbridges.nb_misc[0];
        if (dev==NULL)
                return 0;
 
@@ -181,10 +181,14 @@ static int amd_8151_configure(void)
        unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real);
        int i;
 
+       if (!k8_northbridges.gart_supported)
+               return 0;
+
        /* Configure AGP regs in each x86-64 host bridge. */
-        for (i = 0; i < num_k8_northbridges; i++) {
+       for (i = 0; i < k8_northbridges.num; i++) {
                agp_bridge->gart_bus_addr =
-                               amd64_configure(k8_northbridges[i], gatt_bus);
+                               amd64_configure(k8_northbridges.nb_misc[i],
+                                               gatt_bus);
        }
        k8_flush_garts();
        return 0;
@@ -195,11 +199,15 @@ static void amd64_cleanup(void)
 {
        u32 tmp;
        int i;
-        for (i = 0; i < num_k8_northbridges; i++) {
-               struct pci_dev *dev = k8_northbridges[i];
+
+       if (!k8_northbridges.gart_supported)
+               return;
+
+       for (i = 0; i < k8_northbridges.num; i++) {
+               struct pci_dev *dev = k8_northbridges.nb_misc[i];
                /* disable gart translation */
                pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp);
-               tmp &= ~AMD64_GARTEN;
+               tmp &= ~GARTEN;
                pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, tmp);
        }
 }
@@ -313,22 +321,25 @@ static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp,
        if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order))
                return -1;
 
-       pci_write_config_dword(nb, AMD64_GARTAPERTURECTL, order << 1);
+       gart_set_size_and_enable(nb, order);
        pci_write_config_dword(nb, AMD64_GARTAPERTUREBASE, aper >> 25);
 
        return 0;
 }
 
-static __devinit int cache_nbs (struct pci_dev *pdev, u32 cap_ptr)
+static __devinit int cache_nbs(struct pci_dev *pdev, u32 cap_ptr)
 {
        int i;
 
        if (cache_k8_northbridges() < 0)
                return -ENODEV;
 
+       if (!k8_northbridges.gart_supported)
+               return -ENODEV;
+
        i = 0;
-       for (i = 0; i < num_k8_northbridges; i++) {
-               struct pci_dev *dev = k8_northbridges[i];
+       for (i = 0; i < k8_northbridges.num; i++) {
+               struct pci_dev *dev = k8_northbridges.nb_misc[i];
                if (fix_northbridge(dev, pdev, cap_ptr) < 0) {
                        dev_err(&dev->dev, "no usable aperture found\n");
 #ifdef __x86_64__
@@ -405,7 +416,8 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
        }
 
        /* shadow x86-64 registers into ULi registers */
-       pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &httfea);
+       pci_read_config_dword (k8_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE,
+                              &httfea);
 
        /* if x86-64 aperture base is beyond 4G, exit here */
        if ((httfea & 0x7fff) >> (32 - 25)) {
@@ -472,7 +484,8 @@ static int nforce3_agp_init(struct pci_dev *pdev)
        pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp);
 
        /* shadow x86-64 registers into NVIDIA registers */
-       pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &apbase);
+       pci_read_config_dword (k8_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE,
+                              &apbase);
 
        /* if x86-64 aperture base is beyond 4G, exit here */
        if ( (apbase & 0x7fff) >> (32 - 25) ) {
index d2abf51439836383fd9b03612a44bdf1779448fa..64255cef8a7db93f780c700d712c5fa4b7fe5907 100644 (file)
@@ -984,7 +984,9 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
 
        bridge->driver->cache_flush();
 #ifdef CONFIG_X86
-       set_memory_uc((unsigned long)table, 1 << page_order);
+       if (set_memory_uc((unsigned long)table, 1 << page_order))
+               printk(KERN_WARNING "Could not set GATT table memory to UC!");
+
        bridge->gatt_table = (void *)table;
 #else
        bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
index eab58db5f91cd9cfd6ffa5302761780ac8b3f373..cd18493c952795317904229197016e40d7c408cd 100644 (file)
@@ -806,6 +806,8 @@ static const struct intel_driver_description {
            "G45/G43", NULL, &intel_i965_driver },
        { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG,
            "B43", NULL, &intel_i965_driver },
+       { PCI_DEVICE_ID_INTEL_B43_1_HB, PCI_DEVICE_ID_INTEL_B43_1_IG,
+           "B43", NULL, &intel_i965_driver },
        { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG,
            "G41", NULL, &intel_i965_driver },
        { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
index ee189c74d345ea98062ddbb914c87cb206691c59..d09b1ab7e8abeac5bbdf0cc8a8dd5f333f9c22a3 100644 (file)
 #define PCI_DEVICE_ID_INTEL_Q33_IG          0x29D2
 #define PCI_DEVICE_ID_INTEL_B43_HB          0x2E40
 #define PCI_DEVICE_ID_INTEL_B43_IG          0x2E42
+#define PCI_DEVICE_ID_INTEL_B43_1_HB        0x2E90
+#define PCI_DEVICE_ID_INTEL_B43_1_IG        0x2E92
 #define PCI_DEVICE_ID_INTEL_GM45_HB         0x2A40
 #define PCI_DEVICE_ID_INTEL_GM45_IG         0x2A42
 #define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB        0x2E00
index 3822b4f49c84a360145a085e329fe0b76df4d47c..7bd7c45b53efc849225f7f0a604c41f2f0a4fded 100644 (file)
@@ -305,6 +305,9 @@ static int num_force_kipmid;
 #ifdef CONFIG_PCI
 static int pci_registered;
 #endif
+#ifdef CONFIG_ACPI
+static int pnp_registered;
+#endif
 #ifdef CONFIG_PPC_OF
 static int of_registered;
 #endif
@@ -2126,7 +2129,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
 {
        struct acpi_device *acpi_dev;
        struct smi_info *info;
-       struct resource *res;
+       struct resource *res, *res_second;
        acpi_handle handle;
        acpi_status status;
        unsigned long long tmp;
@@ -2182,13 +2185,13 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
        info->io.addr_data = res->start;
 
        info->io.regspacing = DEFAULT_REGSPACING;
-       res = pnp_get_resource(dev,
+       res_second = pnp_get_resource(dev,
                               (info->io.addr_type == IPMI_IO_ADDR_SPACE) ?
                                        IORESOURCE_IO : IORESOURCE_MEM,
                               1);
-       if (res) {
-               if (res->start > info->io.addr_data)
-                       info->io.regspacing = res->start - info->io.addr_data;
+       if (res_second) {
+               if (res_second->start > info->io.addr_data)
+                       info->io.regspacing = res_second->start - info->io.addr_data;
        }
        info->io.regsize = DEFAULT_REGSPACING;
        info->io.regshift = 0;
@@ -3359,6 +3362,7 @@ static __devinit int init_ipmi_si(void)
 
 #ifdef CONFIG_ACPI
        pnp_register_driver(&ipmi_pnp_driver);
+       pnp_registered = 1;
 #endif
 
 #ifdef CONFIG_DMI
@@ -3526,7 +3530,8 @@ static __exit void cleanup_ipmi_si(void)
                pci_unregister_driver(&ipmi_pci_driver);
 #endif
 #ifdef CONFIG_ACPI
-       pnp_unregister_driver(&ipmi_pnp_driver);
+       if (pnp_registered)
+               pnp_unregister_driver(&ipmi_pnp_driver);
 #endif
 
 #ifdef CONFIG_PPC_OF
index a398ecdbd758058104e81223f1cbdfe7057e9a4b..1f528fad3516827754270806a4c6afdee505c9d0 100644 (file)
@@ -788,10 +788,11 @@ static const struct file_operations zero_fops = {
 /*
  * capabilities for /dev/zero
  * - permits private mappings, "copies" are taken of the source of zeros
+ * - no writeback happens
  */
 static struct backing_dev_info zero_bdi = {
        .name           = "char/mem",
-       .capabilities   = BDI_CAP_MAP_COPY,
+       .capabilities   = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
 };
 
 static const struct file_operations full_fops = {
index 05ad4a17a28f238ecc197e67051f60a50faad769..7c4133582dbae484f449d49f97b2ee0f179a9891 100644 (file)
@@ -47,6 +47,16 @@ enum tpm_duration {
 #define TPM_MAX_PROTECTED_ORDINAL 12
 #define TPM_PROTECTED_ORDINAL_MASK 0xFF
 
+/*
+ * Bug workaround - some TPM's don't flush the most
+ * recently changed pcr on suspend, so force the flush
+ * with an extend to the selected _unused_ non-volatile pcr.
+ */
+static int tpm_suspend_pcr;
+module_param_named(suspend_pcr, tpm_suspend_pcr, uint, 0644);
+MODULE_PARM_DESC(suspend_pcr,
+                "PCR to use for dummy writes to faciltate flush on suspend.");
+
 static LIST_HEAD(tpm_chip_list);
 static DEFINE_SPINLOCK(driver_lock);
 static DECLARE_BITMAP(dev_mask, TPM_NUM_DEVICES);
@@ -1077,18 +1087,6 @@ static struct tpm_input_header savestate_header = {
        .ordinal = TPM_ORD_SAVESTATE
 };
 
-/* Bug workaround - some TPM's don't flush the most
- * recently changed pcr on suspend, so force the flush
- * with an extend to the selected _unused_ non-volatile pcr.
- */
-static int tpm_suspend_pcr;
-static int __init tpm_suspend_setup(char *str)
-{
-       get_option(&str, &tpm_suspend_pcr);
-       return 1;
-}
-__setup("tpm_suspend_pcr=", tpm_suspend_setup);
-
 /*
  * We are about to suspend. Save the TPM state
  * so that it can be restored.
index 942a9826bd23ed64b83095fcdeb65ed059c9ce02..6c1b676643a9ef43fe3984bb8495996960cf4337 100644 (file)
@@ -48,6 +48,9 @@ struct ports_driver_data {
        /* Used for exporting per-port information to debugfs */
        struct dentry *debugfs_dir;
 
+       /* List of all the devices we're handling */
+       struct list_head portdevs;
+
        /* Number of devices this driver is handling */
        unsigned int index;
 
@@ -108,6 +111,9 @@ struct port_buffer {
  * ports for that device (vdev->priv).
  */
 struct ports_device {
+       /* Next portdev in the list, head is in the pdrvdata struct */
+       struct list_head list;
+
        /*
         * Workqueue handlers where we process deferred work after
         * notification
@@ -178,15 +184,21 @@ struct port {
        struct console cons;
 
        /* Each port associates with a separate char device */
-       struct cdev cdev;
+       struct cdev *cdev;
        struct device *dev;
 
+       /* Reference-counting to handle port hot-unplugs and file operations */
+       struct kref kref;
+
        /* A waitqueue for poll() or blocking read operations */
        wait_queue_head_t waitqueue;
 
        /* The 'name' of the port that we expose via sysfs properties */
        char *name;
 
+       /* We can notify apps of host connect / disconnect events via SIGIO */
+       struct fasync_struct *async_queue;
+
        /* The 'id' to identify the port with the Host */
        u32 id;
 
@@ -221,6 +233,41 @@ out:
        return port;
 }
 
+static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev,
+                                                dev_t dev)
+{
+       struct port *port;
+       unsigned long flags;
+
+       spin_lock_irqsave(&portdev->ports_lock, flags);
+       list_for_each_entry(port, &portdev->ports, list)
+               if (port->cdev->dev == dev)
+                       goto out;
+       port = NULL;
+out:
+       spin_unlock_irqrestore(&portdev->ports_lock, flags);
+
+       return port;
+}
+
+static struct port *find_port_by_devt(dev_t dev)
+{
+       struct ports_device *portdev;
+       struct port *port;
+       unsigned long flags;
+
+       spin_lock_irqsave(&pdrvdata_lock, flags);
+       list_for_each_entry(portdev, &pdrvdata.portdevs, list) {
+               port = find_port_by_devt_in_portdev(portdev, dev);
+               if (port)
+                       goto out;
+       }
+       port = NULL;
+out:
+       spin_unlock_irqrestore(&pdrvdata_lock, flags);
+       return port;
+}
+
 static struct port *find_port_by_id(struct ports_device *portdev, u32 id)
 {
        struct port *port;
@@ -410,7 +457,10 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
 static ssize_t send_control_msg(struct port *port, unsigned int event,
                                unsigned int value)
 {
-       return __send_control_msg(port->portdev, port->id, event, value);
+       /* Did the port get unplugged before userspace closed it? */
+       if (port->portdev)
+               return __send_control_msg(port->portdev, port->id, event, value);
+       return 0;
 }
 
 /* Callers must take the port->outvq_lock */
@@ -459,9 +509,12 @@ static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count,
 
        /*
         * Wait till the host acknowledges it pushed out the data we
-        * sent.  This is done for ports in blocking mode or for data
-        * from the hvc_console; the tty operations are performed with
-        * spinlocks held so we can't sleep here.
+        * sent.  This is done for data from the hvc_console; the tty
+        * operations are performed with spinlocks held so we can't
+        * sleep here.  An alternative would be to copy the data to a
+        * buffer and relax the spinning requirement.  The downside is
+        * we need to kmalloc a GFP_ATOMIC buffer each time the
+        * console driver writes something out.
         */
        while (!virtqueue_get_buf(out_vq, &len))
                cpu_relax();
@@ -522,6 +575,10 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
 /* The condition that must be true for polling to end */
 static bool will_read_block(struct port *port)
 {
+       if (!port->guest_connected) {
+               /* Port got hot-unplugged. Let's exit. */
+               return false;
+       }
        return !port_has_data(port) && port->host_connected;
 }
 
@@ -572,6 +629,9 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
                if (ret < 0)
                        return ret;
        }
+       /* Port got hot-unplugged. */
+       if (!port->guest_connected)
+               return -ENODEV;
        /*
         * We could've received a disconnection message while we were
         * waiting for more data.
@@ -596,6 +656,10 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
        ssize_t ret;
        bool nonblock;
 
+       /* Userspace could be out to fool us */
+       if (!count)
+               return 0;
+
        port = filp->private_data;
 
        nonblock = filp->f_flags & O_NONBLOCK;
@@ -609,6 +673,9 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
                if (ret < 0)
                        return ret;
        }
+       /* Port got hot-unplugged. */
+       if (!port->guest_connected)
+               return -ENODEV;
 
        count = min((size_t)(32 * 1024), count);
 
@@ -622,6 +689,14 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
                goto free_buf;
        }
 
+       /*
+        * We now ask send_buf() to not spin for generic ports -- we
+        * can re-use the same code path that non-blocking file
+        * descriptors take for blocking file descriptors since the
+        * wait is already done and we're certain the write will go
+        * through to the host.
+        */
+       nonblock = true;
        ret = send_buf(port, buf, count, nonblock);
 
        if (nonblock && ret > 0)
@@ -641,8 +716,12 @@ static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
        port = filp->private_data;
        poll_wait(filp, &port->waitqueue, wait);
 
+       if (!port->guest_connected) {
+               /* Port got unplugged */
+               return POLLHUP;
+       }
        ret = 0;
-       if (port->inbuf)
+       if (!will_read_block(port))
                ret |= POLLIN | POLLRDNORM;
        if (!will_write_block(port))
                ret |= POLLOUT;
@@ -652,6 +731,8 @@ static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
        return ret;
 }
 
+static void remove_port(struct kref *kref);
+
 static int port_fops_release(struct inode *inode, struct file *filp)
 {
        struct port *port;
@@ -672,6 +753,16 @@ static int port_fops_release(struct inode *inode, struct file *filp)
        reclaim_consumed_buffers(port);
        spin_unlock_irq(&port->outvq_lock);
 
+       /*
+        * Locks aren't necessary here as a port can't be opened after
+        * unplug, and if a port isn't unplugged, a kref would already
+        * exist for the port.  Plus, taking ports_lock here would
+        * create a dependency on other locks taken by functions
+        * inside remove_port if we're the last holder of the port,
+        * creating many problems.
+        */
+       kref_put(&port->kref, remove_port);
+
        return 0;
 }
 
@@ -679,22 +770,31 @@ static int port_fops_open(struct inode *inode, struct file *filp)
 {
        struct cdev *cdev = inode->i_cdev;
        struct port *port;
+       int ret;
 
-       port = container_of(cdev, struct port, cdev);
+       port = find_port_by_devt(cdev->dev);
        filp->private_data = port;
 
+       /* Prevent against a port getting hot-unplugged at the same time */
+       spin_lock_irq(&port->portdev->ports_lock);
+       kref_get(&port->kref);
+       spin_unlock_irq(&port->portdev->ports_lock);
+
        /*
         * Don't allow opening of console port devices -- that's done
         * via /dev/hvc
         */
-       if (is_console_port(port))
-               return -ENXIO;
+       if (is_console_port(port)) {
+               ret = -ENXIO;
+               goto out;
+       }
 
        /* Allow only one process to open a particular port at a time */
        spin_lock_irq(&port->inbuf_lock);
        if (port->guest_connected) {
                spin_unlock_irq(&port->inbuf_lock);
-               return -EMFILE;
+               ret = -EMFILE;
+               goto out;
        }
 
        port->guest_connected = true;
@@ -709,10 +809,23 @@ static int port_fops_open(struct inode *inode, struct file *filp)
        reclaim_consumed_buffers(port);
        spin_unlock_irq(&port->outvq_lock);
 
+       nonseekable_open(inode, filp);
+
        /* Notify host of port being opened */
        send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1);
 
        return 0;
+out:
+       kref_put(&port->kref, remove_port);
+       return ret;
+}
+
+static int port_fops_fasync(int fd, struct file *filp, int mode)
+{
+       struct port *port;
+
+       port = filp->private_data;
+       return fasync_helper(fd, filp, mode, &port->async_queue);
 }
 
 /*
@@ -728,6 +841,8 @@ static const struct file_operations port_fops = {
        .write = port_fops_write,
        .poll  = port_fops_poll,
        .release = port_fops_release,
+       .fasync = port_fops_fasync,
+       .llseek = no_llseek,
 };
 
 /*
@@ -986,6 +1101,12 @@ static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
        return nr_added_bufs;
 }
 
+static void send_sigio_to_port(struct port *port)
+{
+       if (port->async_queue && port->guest_connected)
+               kill_fasync(&port->async_queue, SIGIO, POLL_OUT);
+}
+
 static int add_port(struct ports_device *portdev, u32 id)
 {
        char debugfs_name[16];
@@ -1000,6 +1121,7 @@ static int add_port(struct ports_device *portdev, u32 id)
                err = -ENOMEM;
                goto fail;
        }
+       kref_init(&port->kref);
 
        port->portdev = portdev;
        port->id = id;
@@ -1007,6 +1129,7 @@ static int add_port(struct ports_device *portdev, u32 id)
        port->name = NULL;
        port->inbuf = NULL;
        port->cons.hvc = NULL;
+       port->async_queue = NULL;
 
        port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
 
@@ -1017,14 +1140,20 @@ static int add_port(struct ports_device *portdev, u32 id)
        port->in_vq = portdev->in_vqs[port->id];
        port->out_vq = portdev->out_vqs[port->id];
 
-       cdev_init(&port->cdev, &port_fops);
+       port->cdev = cdev_alloc();
+       if (!port->cdev) {
+               dev_err(&port->portdev->vdev->dev, "Error allocating cdev\n");
+               err = -ENOMEM;
+               goto free_port;
+       }
+       port->cdev->ops = &port_fops;
 
        devt = MKDEV(portdev->chr_major, id);
-       err = cdev_add(&port->cdev, devt, 1);
+       err = cdev_add(port->cdev, devt, 1);
        if (err < 0) {
                dev_err(&port->portdev->vdev->dev,
                        "Error %d adding cdev for port %u\n", err, id);
-               goto free_port;
+               goto free_cdev;
        }
        port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev,
                                  devt, port, "vport%up%u",
@@ -1089,7 +1218,7 @@ free_inbufs:
 free_device:
        device_destroy(pdrvdata.class, port->dev->devt);
 free_cdev:
-       cdev_del(&port->cdev);
+       cdev_del(port->cdev);
 free_port:
        kfree(port);
 fail:
@@ -1098,21 +1227,45 @@ fail:
        return err;
 }
 
-/* Remove all port-specific data. */
-static int remove_port(struct port *port)
+/* No users remain, remove all port-specific data. */
+static void remove_port(struct kref *kref)
+{
+       struct port *port;
+
+       port = container_of(kref, struct port, kref);
+
+       sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
+       device_destroy(pdrvdata.class, port->dev->devt);
+       cdev_del(port->cdev);
+
+       kfree(port->name);
+
+       debugfs_remove(port->debugfs_file);
+
+       kfree(port);
+}
+
+/*
+ * Port got unplugged.  Remove port from portdev's list and drop the
+ * kref reference.  If no userspace has this port opened, it will
+ * result in immediate removal the port.
+ */
+static void unplug_port(struct port *port)
 {
        struct port_buffer *buf;
 
+       spin_lock_irq(&port->portdev->ports_lock);
+       list_del(&port->list);
+       spin_unlock_irq(&port->portdev->ports_lock);
+
        if (port->guest_connected) {
                port->guest_connected = false;
                port->host_connected = false;
                wake_up_interruptible(&port->waitqueue);
-               send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0);
-       }
 
-       spin_lock_irq(&port->portdev->ports_lock);
-       list_del(&port->list);
-       spin_unlock_irq(&port->portdev->ports_lock);
+               /* Let the app know the port is going down. */
+               send_sigio_to_port(port);
+       }
 
        if (is_console_port(port)) {
                spin_lock_irq(&pdrvdata_lock);
@@ -1131,9 +1284,6 @@ static int remove_port(struct port *port)
                hvc_remove(port->cons.hvc);
 #endif
        }
-       sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
-       device_destroy(pdrvdata.class, port->dev->devt);
-       cdev_del(&port->cdev);
 
        /* Remove unused data this port might have received. */
        discard_port_data(port);
@@ -1144,12 +1294,19 @@ static int remove_port(struct port *port)
        while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
                free_buf(buf);
 
-       kfree(port->name);
-
-       debugfs_remove(port->debugfs_file);
+       /*
+        * We should just assume the device itself has gone off --
+        * else a close on an open port later will try to send out a
+        * control message.
+        */
+       port->portdev = NULL;
 
-       kfree(port);
-       return 0;
+       /*
+        * Locks around here are not necessary - a port can't be
+        * opened after we removed the port struct from ports_list
+        * above.
+        */
+       kref_put(&port->kref, remove_port);
 }
 
 /* Any private messages that the Host and Guest want to share */
@@ -1188,7 +1345,7 @@ static void handle_control_message(struct ports_device *portdev,
                add_port(portdev, cpkt->id);
                break;
        case VIRTIO_CONSOLE_PORT_REMOVE:
-               remove_port(port);
+               unplug_port(port);
                break;
        case VIRTIO_CONSOLE_CONSOLE_PORT:
                if (!cpkt->value)
@@ -1230,6 +1387,12 @@ static void handle_control_message(struct ports_device *portdev,
                spin_lock_irq(&port->outvq_lock);
                reclaim_consumed_buffers(port);
                spin_unlock_irq(&port->outvq_lock);
+
+               /*
+                * If the guest is connected, it'll be interested in
+                * knowing the host connection state changed.
+                */
+               send_sigio_to_port(port);
                break;
        case VIRTIO_CONSOLE_PORT_NAME:
                /*
@@ -1326,6 +1489,9 @@ static void in_intr(struct virtqueue *vq)
 
        wake_up_interruptible(&port->waitqueue);
 
+       /* Send a SIGIO indicating new data in case the process asked for it */
+       send_sigio_to_port(port);
+
        if (is_console_port(port) && hvc_poll(port->cons.hvc))
                hvc_kick();
 }
@@ -1562,6 +1728,10 @@ static int __devinit virtcons_probe(struct virtio_device *vdev)
                add_port(portdev, 0);
        }
 
+       spin_lock_irq(&pdrvdata_lock);
+       list_add_tail(&portdev->list, &pdrvdata.portdevs);
+       spin_unlock_irq(&pdrvdata_lock);
+
        __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
                           VIRTIO_CONSOLE_DEVICE_READY, 1);
        return 0;
@@ -1585,23 +1755,41 @@ static void virtcons_remove(struct virtio_device *vdev)
 {
        struct ports_device *portdev;
        struct port *port, *port2;
-       struct port_buffer *buf;
-       unsigned int len;
 
        portdev = vdev->priv;
 
+       spin_lock_irq(&pdrvdata_lock);
+       list_del(&portdev->list);
+       spin_unlock_irq(&pdrvdata_lock);
+
+       /* Disable interrupts for vqs */
+       vdev->config->reset(vdev);
+       /* Finish up work that's lined up */
        cancel_work_sync(&portdev->control_work);
 
        list_for_each_entry_safe(port, port2, &portdev->ports, list)
-               remove_port(port);
+               unplug_port(port);
 
        unregister_chrdev(portdev->chr_major, "virtio-portsdev");
 
-       while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
-               free_buf(buf);
+       /*
+        * When yanking out a device, we immediately lose the
+        * (device-side) queues.  So there's no point in keeping the
+        * guest side around till we drop our final reference.  This
+        * also means that any ports which are in an open state will
+        * have to just stop using the port, as the vqs are going
+        * away.
+        */
+       if (use_multiport(portdev)) {
+               struct port_buffer *buf;
+               unsigned int len;
 
-       while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
-               free_buf(buf);
+               while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
+                       free_buf(buf);
+
+               while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
+                       free_buf(buf);
+       }
 
        vdev->config->del_vqs(vdev);
        kfree(portdev->in_vqs);
@@ -1648,6 +1836,7 @@ static int __init init(void)
                           PTR_ERR(pdrvdata.debugfs_dir));
        }
        INIT_LIST_HEAD(&pdrvdata.consoles);
+       INIT_LIST_HEAD(&pdrvdata.portdevs);
 
        return register_virtio_driver(&virtio_console);
 }
index 2bbeaaea46e9b7765ce983374fb125b7dd5422c9..38df8c19e74cc56903d5985cdbee7d52df3dc0d9 100644 (file)
@@ -533,11 +533,14 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
        case KIOCSOUND:
                if (!perm)
                        goto eperm;
-               /* FIXME: This is an old broken API but we need to keep it
-                  supported and somehow separate the historic advertised
-                  tick rate from any real one */
+               /*
+                * The use of PIT_TICK_RATE is historic, it used to be
+                * the platform-dependent CLOCK_TICK_RATE between 2.6.12
+                * and 2.6.36, which was a minor but unfortunate ABI
+                * change.
+                */
                if (arg)
-                       arg = CLOCK_TICK_RATE / arg;
+                       arg = PIT_TICK_RATE / arg;
                kd_mksound(arg, 0);
                break;
 
@@ -553,11 +556,8 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                 */
                ticks = HZ * ((arg >> 16) & 0xffff) / 1000;
                count = ticks ? (arg & 0xffff) : 0;
-               /* FIXME: This is an old broken API but we need to keep it
-                  supported and somehow separate the historic advertised
-                  tick rate from any real one */
                if (count)
-                       count = CLOCK_TICK_RATE / count;
+                       count = PIT_TICK_RATE / count;
                kd_mksound(count, ticks);
                break;
        }
index c2408bbe9c2eed3521f4eb21a86c1e9671774534..f508690eb95859ef80e217f68db827daba606f29 100644 (file)
@@ -80,7 +80,7 @@
  * Limiting Performance Impact
  * ---------------------------
  * C states, especially those with large exit latencies, can have a real
- * noticable impact on workloads, which is not acceptable for most sysadmins,
+ * noticeable impact on workloads, which is not acceptable for most sysadmins,
  * and in addition, less performance has a power price of its own.
  *
  * As a general rule of thumb, menu assumes that the following heuristic
index 8661c84a105d86751e1990636f6dbcdeb4962cf3..b98c67664ae72b7638bdf06365fd0789eb76245e 100644 (file)
@@ -39,6 +39,10 @@ static DEFINE_SPINLOCK(dca_lock);
 
 static LIST_HEAD(dca_domains);
 
+static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
+
+static int dca_providers_blocked;
+
 static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
@@ -70,6 +74,60 @@ static void dca_free_domain(struct dca_domain *domain)
        kfree(domain);
 }
 
+static int dca_provider_ioat_ver_3_0(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+
+       return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
+               ((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
+               (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
+               (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
+               (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
+               (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
+               (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
+               (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
+               (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
+}
+
+static void unregister_dca_providers(void)
+{
+       struct dca_provider *dca, *_dca;
+       struct list_head unregistered_providers;
+       struct dca_domain *domain;
+       unsigned long flags;
+
+       blocking_notifier_call_chain(&dca_provider_chain,
+                                    DCA_PROVIDER_REMOVE, NULL);
+
+       INIT_LIST_HEAD(&unregistered_providers);
+
+       spin_lock_irqsave(&dca_lock, flags);
+
+       if (list_empty(&dca_domains)) {
+               spin_unlock_irqrestore(&dca_lock, flags);
+               return;
+       }
+
+       /* at this point only one domain in the list is expected */
+       domain = list_first_entry(&dca_domains, struct dca_domain, node);
+       if (!domain)
+               return;
+
+       list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node) {
+               list_del(&dca->node);
+               list_add(&dca->node, &unregistered_providers);
+       }
+
+       dca_free_domain(domain);
+
+       spin_unlock_irqrestore(&dca_lock, flags);
+
+       list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
+               dca_sysfs_remove_provider(dca);
+               list_del(&dca->node);
+       }
+}
+
 static struct dca_domain *dca_find_domain(struct pci_bus *rc)
 {
        struct dca_domain *domain;
@@ -90,9 +148,13 @@ static struct dca_domain *dca_get_domain(struct device *dev)
        domain = dca_find_domain(rc);
 
        if (!domain) {
-               domain = dca_allocate_domain(rc);
-               if (domain)
-                       list_add(&domain->node, &dca_domains);
+               if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) {
+                       dca_providers_blocked = 1;
+               } else {
+                       domain = dca_allocate_domain(rc);
+                       if (domain)
+                               list_add(&domain->node, &dca_domains);
+               }
        }
 
        return domain;
@@ -293,8 +355,6 @@ void free_dca_provider(struct dca_provider *dca)
 }
 EXPORT_SYMBOL_GPL(free_dca_provider);
 
-static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
-
 /**
  * register_dca_provider - register a dca provider
  * @dca - struct created by alloc_dca_provider()
@@ -306,6 +366,13 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
        unsigned long flags;
        struct dca_domain *domain;
 
+       spin_lock_irqsave(&dca_lock, flags);
+       if (dca_providers_blocked) {
+               spin_unlock_irqrestore(&dca_lock, flags);
+               return -ENODEV;
+       }
+       spin_unlock_irqrestore(&dca_lock, flags);
+
        err = dca_sysfs_add_provider(dca, dev);
        if (err)
                return err;
@@ -313,7 +380,13 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
        spin_lock_irqsave(&dca_lock, flags);
        domain = dca_get_domain(dev);
        if (!domain) {
-               spin_unlock_irqrestore(&dca_lock, flags);
+               if (dca_providers_blocked) {
+                       spin_unlock_irqrestore(&dca_lock, flags);
+                       dca_sysfs_remove_provider(dca);
+                       unregister_dca_providers();
+               } else {
+                       spin_unlock_irqrestore(&dca_lock, flags);
+               }
                return -ENODEV;
        }
        list_add(&dca->node, &domain->dca_providers);
index 216f9d383b5b7b1b0a4d2062c388518a7270de55..effd140fc042b827617bebce013dae190c7b4d76 100644 (file)
@@ -879,7 +879,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
        dma->device_issue_pending = ioat2_issue_pending;
        dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
        dma->device_free_chan_resources = ioat2_free_chan_resources;
-       dma->device_tx_status = ioat_tx_status;
+       dma->device_tx_status = ioat_dma_tx_status;
 
        err = ioat_probe(device);
        if (err)
index 86c5ae9fde34d3cf0f3f372a33739ccb4286fdf3..411d5bf50fc43cab437dff34d3d9f25dd9928fe0 100644 (file)
@@ -162,7 +162,7 @@ static int mv_is_err_intr(u32 intr_cause)
 
 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
 {
-       u32 val = (1 << (1 + (chan->idx * 16)));
+       u32 val = ~(1 << (chan->idx * 16));
        dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
        __raw_writel(val, XOR_INTR_CAUSE(chan));
 }
index fb64cf36ba61d0e786ecfeb802f43909ade4f2f2..eb6b54dbb8064a9a5d2e71eb3261132195ff6f8d 100644 (file)
@@ -580,7 +580,6 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
 
        sh_chan = to_sh_chan(chan);
        param = chan->private;
-       slave_addr = param->config->addr;
 
        /* Someone calling slave DMA on a public channel? */
        if (!param || !sg_len) {
@@ -589,6 +588,8 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
                return NULL;
        }
 
+       slave_addr = param->config->addr;
+
        /*
         * if (param != NULL), this is a successfully requested slave channel,
         * therefore param->config != NULL too.
index 70bb350de9964a813b6e4627e6c4a709e23ff930..9dbb28b9559f7d794dd7f6899a5ff52056ce4017 100644 (file)
@@ -39,7 +39,7 @@ config EDAC_DEBUG
          there're four debug levels (x=0,1,2,3 from low to high).
          Usually you should select 'N'.
 
- config EDAC_DECODE_MCE
+config EDAC_DECODE_MCE
        tristate "Decode MCEs in human-readable form (only on AMD for now)"
        depends on CPU_SUP_AMD && X86_MCE
        default y
@@ -51,6 +51,16 @@ config EDAC_DEBUG
          which occur really early upon boot, before the module infrastructure
          has been initialized.
 
+config EDAC_MCE_INJ
+       tristate "Simple MCE injection interface over /sysfs"
+       depends on EDAC_DECODE_MCE
+       default n
+       help
+         This is a simple interface to inject MCEs over /sysfs and test
+         the MCE decoding code in EDAC.
+
+         This is currently AMD-only.
+
 config EDAC_MM_EDAC
        tristate "Main Memory EDAC (Error Detection And Correction) reporting"
        help
@@ -66,13 +76,13 @@ config EDAC_MCE
 
 config EDAC_AMD64
        tristate "AMD64 (Opteron, Athlon64) K8, F10h, F11h"
-       depends on EDAC_MM_EDAC && K8_NB && X86_64 && PCI && EDAC_DECODE_MCE
+       depends on EDAC_MM_EDAC && AMD_NB && X86_64 && PCI && EDAC_DECODE_MCE
        help
          Support for error detection and correction on the AMD 64
          Families of Memory Controllers (K8, F10h and F11h)
 
 config EDAC_AMD64_ERROR_INJECTION
-       bool "Sysfs Error Injection facilities"
+       bool "Sysfs HW Error injection facilities"
        depends on EDAC_AMD64
        help
          Recent Opterons (Family 10h and later) provide for Memory Error
index ca6b1bb24ccc8e76109b13f38e1237eec98ec09e..32c7bc93c525ef1613d1c116b14b0e1a0e0f4f8a 100644 (file)
@@ -17,6 +17,9 @@ ifdef CONFIG_PCI
 edac_core-objs += edac_pci.o edac_pci_sysfs.o
 endif
 
+obj-$(CONFIG_EDAC_MCE_INJ)             += mce_amd_inj.o
+
+edac_mce_amd-objs                      := mce_amd.o
 obj-$(CONFIG_EDAC_DECODE_MCE)          += edac_mce_amd.o
 
 obj-$(CONFIG_EDAC_AMD76X)              += amd76x_edac.o
index e7d5d6b5dcf69683d5ac7c59d6608643c5ae4e53..8521401bbd751406e6f2752e10868f63c7e3b48a 100644 (file)
@@ -1,5 +1,5 @@
 #include "amd64_edac.h"
-#include <asm/k8.h>
+#include <asm/amd_nb.h>
 
 static struct edac_pci_ctl_info *amd64_ctl_pci;
 
@@ -2073,11 +2073,18 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
                amd64_handle_ue(mci, info);
 }
 
-void amd64_decode_bus_error(int node_id, struct err_regs *regs)
+void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg)
 {
        struct mem_ctl_info *mci = mci_lookup[node_id];
+       struct err_regs regs;
 
-       __amd64_decode_bus_error(mci, regs);
+       regs.nbsl  = (u32) m->status;
+       regs.nbsh  = (u32)(m->status >> 32);
+       regs.nbeal = (u32) m->addr;
+       regs.nbeah = (u32)(m->addr >> 32);
+       regs.nbcfg = nbcfg;
+
+       __amd64_decode_bus_error(mci, &regs);
 
        /*
         * Check the UE bit of the NB status high register, if set generate some
@@ -2086,7 +2093,7 @@ void amd64_decode_bus_error(int node_id, struct err_regs *regs)
         *
         * FIXME: this should go somewhere else, if at all.
         */
-       if (regs->nbsh & K8_NBSH_UC_ERR && !report_gart_errors)
+       if (regs.nbsh & K8_NBSH_UC_ERR && !report_gart_errors)
                edac_mc_handle_ue_no_info(mci, "UE bit is set");
 
 }
@@ -2927,7 +2934,7 @@ static int __init amd64_edac_init(void)
         * to finish initialization of the MC instances.
         */
        err = -ENODEV;
-       for (nb = 0; nb < num_k8_northbridges; nb++) {
+       for (nb = 0; nb < k8_northbridges.num; nb++) {
                if (!pvt_lookup[nb])
                        continue;
 
index 613b9381e71a658e76fd312beb0e1d05e5f10616..044aee4f944da6ae9a709584ae243c3e97c151a9 100644 (file)
@@ -72,7 +72,7 @@
 #include <linux/edac.h>
 #include <asm/msr.h>
 #include "edac_core.h"
-#include "edac_mce_amd.h"
+#include "mce_amd.h"
 
 #define amd64_printk(level, fmt, arg...) \
        edac_printk(level, "amd64", fmt, ##arg)
@@ -482,11 +482,10 @@ extern const char *rrrr_msgs[16];
 extern const char *to_msgs[2];
 extern const char *pp_msgs[4];
 extern const char *ii_msgs[4];
-extern const char *ext_msgs[32];
 extern const char *htlink_msgs[8];
 
 #ifdef CONFIG_EDAC_DEBUG
-#define NUM_DBG_ATTRS 9
+#define NUM_DBG_ATTRS 5
 #else
 #define NUM_DBG_ATTRS 0
 #endif
index 59cf2cf6e11ec3fc9628b5f6b32aa525ae3a80cf..e3562288f4ce80589ab0680f4fc179335da80822 100644 (file)
 #include "amd64_edac.h"
 
-/*
- * accept a hex value and store it into the virtual error register file, field:
- * nbeal and nbeah. Assume virtual error values have already been set for: NBSL,
- * NBSH and NBCFG. Then proceed to map the error values to a MC, CSROW and
- * CHANNEL
- */
-static ssize_t amd64_nbea_store(struct mem_ctl_info *mci, const char *data,
-                               size_t count)
-{
-       struct amd64_pvt *pvt = mci->pvt_info;
-       unsigned long long value;
-       int ret = 0;
-
-       ret = strict_strtoull(data, 16, &value);
-       if (ret != -EINVAL) {
-               debugf0("received NBEA= 0x%llx\n", value);
-
-               /* place the value into the virtual error packet */
-               pvt->ctl_error_info.nbeal = (u32) value;
-               value >>= 32;
-               pvt->ctl_error_info.nbeah = (u32) value;
-
-               /* Process the Mapping request */
-               /* TODO: Add race prevention */
-               amd_decode_nb_mce(pvt->mc_node_id, &pvt->ctl_error_info, 1);
-
-               return count;
-       }
-       return ret;
-}
-
-/* display back what the last NBEA (MCA NB Address (MC4_ADDR)) was written */
-static ssize_t amd64_nbea_show(struct mem_ctl_info *mci, char *data)
-{
-       struct amd64_pvt *pvt = mci->pvt_info;
-       u64 value;
-
-       value = pvt->ctl_error_info.nbeah;
-       value <<= 32;
-       value |= pvt->ctl_error_info.nbeal;
-
-       return sprintf(data, "%llx\n", value);
-}
-
-/* store the NBSL (MCA NB Status Low (MC4_STATUS)) value user desires */
-static ssize_t amd64_nbsl_store(struct mem_ctl_info *mci, const char *data,
-                               size_t count)
-{
-       struct amd64_pvt *pvt = mci->pvt_info;
-       unsigned long value;
-       int ret = 0;
-
-       ret = strict_strtoul(data, 16, &value);
-       if (ret != -EINVAL) {
-               debugf0("received NBSL= 0x%lx\n", value);
-
-               pvt->ctl_error_info.nbsl = (u32) value;
-
-               return count;
-       }
-       return ret;
-}
-
-/* display back what the last NBSL value written */
-static ssize_t amd64_nbsl_show(struct mem_ctl_info *mci, char *data)
-{
-       struct amd64_pvt *pvt = mci->pvt_info;
-       u32 value;
-
-       value = pvt->ctl_error_info.nbsl;
-
-       return sprintf(data, "%x\n", value);
-}
-
-/* store the NBSH (MCA NB Status High) value user desires */
-static ssize_t amd64_nbsh_store(struct mem_ctl_info *mci, const char *data,
-                               size_t count)
-{
-       struct amd64_pvt *pvt = mci->pvt_info;
-       unsigned long value;
-       int ret = 0;
-
-       ret = strict_strtoul(data, 16, &value);
-       if (ret != -EINVAL) {
-               debugf0("received NBSH= 0x%lx\n", value);
-
-               pvt->ctl_error_info.nbsh = (u32) value;
-
-               return count;
-       }
-       return ret;
-}
-
-/* display back what the last NBSH value written */
-static ssize_t amd64_nbsh_show(struct mem_ctl_info *mci, char *data)
-{
-       struct amd64_pvt *pvt = mci->pvt_info;
-       u32 value;
-
-       value = pvt->ctl_error_info.nbsh;
-
-       return sprintf(data, "%x\n", value);
+#define EDAC_DCT_ATTR_SHOW(reg)                                                \
+static ssize_t amd64_##reg##_show(struct mem_ctl_info *mci, char *data)        \
+{                                                                      \
+       struct amd64_pvt *pvt = mci->pvt_info;                          \
+               return sprintf(data, "0x%016llx\n", (u64)pvt->reg);     \
 }
 
-/* accept and store the NBCFG (MCA NB Configuration) value user desires */
-static ssize_t amd64_nbcfg_store(struct mem_ctl_info *mci,
-                                       const char *data, size_t count)
-{
-       struct amd64_pvt *pvt = mci->pvt_info;
-       unsigned long value;
-       int ret = 0;
-
-       ret = strict_strtoul(data, 16, &value);
-       if (ret != -EINVAL) {
-               debugf0("received NBCFG= 0x%lx\n", value);
-
-               pvt->ctl_error_info.nbcfg = (u32) value;
-
-               return count;
-       }
-       return ret;
-}
-
-/* various show routines for the controls of a MCI */
-static ssize_t amd64_nbcfg_show(struct mem_ctl_info *mci, char *data)
-{
-       struct amd64_pvt *pvt = mci->pvt_info;
-
-       return sprintf(data, "%x\n", pvt->ctl_error_info.nbcfg);
-}
-
-
-static ssize_t amd64_dhar_show(struct mem_ctl_info *mci, char *data)
-{
-       struct amd64_pvt *pvt = mci->pvt_info;
-
-       return sprintf(data, "%x\n", pvt->dhar);
-}
-
-
-static ssize_t amd64_dbam_show(struct mem_ctl_info *mci, char *data)
-{
-       struct amd64_pvt *pvt = mci->pvt_info;
-
-       return sprintf(data, "%x\n", pvt->dbam0);
-}
-
-
-static ssize_t amd64_topmem_show(struct mem_ctl_info *mci, char *data)
-{
-       struct amd64_pvt *pvt = mci->pvt_info;
-
-       return sprintf(data, "%llx\n", pvt->top_mem);
-}
-
-
-static ssize_t amd64_topmem2_show(struct mem_ctl_info *mci, char *data)
-{
-       struct amd64_pvt *pvt = mci->pvt_info;
-
-       return sprintf(data, "%llx\n", pvt->top_mem2);
-}
+EDAC_DCT_ATTR_SHOW(dhar);
+EDAC_DCT_ATTR_SHOW(dbam0);
+EDAC_DCT_ATTR_SHOW(top_mem);
+EDAC_DCT_ATTR_SHOW(top_mem2);
 
 static ssize_t amd64_hole_show(struct mem_ctl_info *mci, char *data)
 {
@@ -180,38 +29,6 @@ static ssize_t amd64_hole_show(struct mem_ctl_info *mci, char *data)
  */
 struct mcidev_sysfs_attribute amd64_dbg_attrs[] = {
 
-       {
-               .attr = {
-                       .name = "nbea_ctl",
-                       .mode = (S_IRUGO | S_IWUSR)
-               },
-               .show = amd64_nbea_show,
-               .store = amd64_nbea_store,
-       },
-       {
-               .attr = {
-                       .name = "nbsl_ctl",
-                       .mode = (S_IRUGO | S_IWUSR)
-               },
-               .show = amd64_nbsl_show,
-               .store = amd64_nbsl_store,
-       },
-       {
-               .attr = {
-                       .name = "nbsh_ctl",
-                       .mode = (S_IRUGO | S_IWUSR)
-               },
-               .show = amd64_nbsh_show,
-               .store = amd64_nbsh_store,
-       },
-       {
-               .attr = {
-                       .name = "nbcfg_ctl",
-                       .mode = (S_IRUGO | S_IWUSR)
-               },
-               .show = amd64_nbcfg_show,
-               .store = amd64_nbcfg_store,
-       },
        {
                .attr = {
                        .name = "dhar",
@@ -225,7 +42,7 @@ struct mcidev_sysfs_attribute amd64_dbg_attrs[] = {
                        .name = "dbam",
                        .mode = (S_IRUGO)
                },
-               .show = amd64_dbam_show,
+               .show = amd64_dbam0_show,
                .store = NULL,
        },
        {
@@ -233,7 +50,7 @@ struct mcidev_sysfs_attribute amd64_dbg_attrs[] = {
                        .name = "topmem",
                        .mode = (S_IRUGO)
                },
-               .show = amd64_topmem_show,
+               .show = amd64_top_mem_show,
                .store = NULL,
        },
        {
@@ -241,7 +58,7 @@ struct mcidev_sysfs_attribute amd64_dbg_attrs[] = {
                        .name = "topmem2",
                        .mode = (S_IRUGO)
                },
-               .show = amd64_topmem2_show,
+               .show = amd64_top_mem2_show,
                .store = NULL,
        },
        {
index 070968178a24a80703708dd50ba363301ad71aac..2941dca91aae3f949a04a29b02f32eed5a107bb8 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/ctype.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/edac.h>
 
 #include "edac_core.h"
 #include "edac_module.h"
@@ -235,7 +236,7 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
        debugf1("%s()\n", __func__);
 
        /* get the /sys/devices/system/edac reference */
-       edac_class = edac_get_edac_class();
+       edac_class = edac_get_sysfs_class();
        if (edac_class == NULL) {
                debugf1("%s() no edac_class error\n", __func__);
                err = -ENODEV;
@@ -255,7 +256,7 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
 
        if (!try_module_get(edac_dev->owner)) {
                err = -ENODEV;
-               goto err_out;
+               goto err_mod_get;
        }
 
        /* register */
@@ -282,6 +283,9 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
 err_kobj_reg:
        module_put(edac_dev->owner);
 
+err_mod_get:
+       edac_put_sysfs_class();
+
 err_out:
        return err;
 }
@@ -290,12 +294,11 @@ err_out:
  * edac_device_unregister_sysfs_main_kobj:
  *     the '..../edac/<name>' kobject
  */
-void edac_device_unregister_sysfs_main_kobj(
-                                       struct edac_device_ctl_info *edac_dev)
+void edac_device_unregister_sysfs_main_kobj(struct edac_device_ctl_info *dev)
 {
        debugf0("%s()\n", __func__);
        debugf4("%s() name of kobject is: %s\n",
-               __func__, kobject_name(&edac_dev->kobj));
+               __func__, kobject_name(&dev->kobj));
 
        /*
         * Unregister the edac device's kobject and
@@ -304,7 +307,8 @@ void edac_device_unregister_sysfs_main_kobj(
         *   a) module_put() this module
         *   b) 'kfree' the memory
         */
-       kobject_put(&edac_dev->kobj);
+       kobject_put(&dev->kobj);
+       edac_put_sysfs_class();
 }
 
 /* edac_dev -> instance information */
index 3630308e7b811a66f398193eae71cc12f5d7383f..6b21e25f7a84cc99ad6ea710b788745992d1170f 100644 (file)
@@ -339,6 +339,9 @@ static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
 {
        int status;
 
+       if (mci->op_state != OP_RUNNING_POLL)
+               return;
+
        status = cancel_delayed_work(&mci->work);
        if (status == 0) {
                debugf0("%s() not canceled, flush the queue\n",
index 8aad94d10c0cced51d10746f22b5086068c48d74..a4135860149b5592e6e73e8036796f63e6f045d4 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <linux/ctype.h>
 #include <linux/slab.h>
+#include <linux/edac.h>
 #include <linux/bug.h>
 
 #include "edac_core.h"
@@ -1011,13 +1012,13 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
  */
 int edac_sysfs_setup_mc_kset(void)
 {
-       int err = 0;
+       int err = -EINVAL;
        struct sysdev_class *edac_class;
 
        debugf1("%s()\n", __func__);
 
        /* get the /sys/devices/system/edac class reference */
-       edac_class = edac_get_edac_class();
+       edac_class = edac_get_sysfs_class();
        if (edac_class == NULL) {
                debugf1("%s() no edac_class error=%d\n", __func__, err);
                goto fail_out;
@@ -1028,15 +1029,16 @@ int edac_sysfs_setup_mc_kset(void)
        if (!mc_kset) {
                err = -ENOMEM;
                debugf1("%s() Failed to register '.../edac/mc'\n", __func__);
-               goto fail_out;
+               goto fail_kset;
        }
 
        debugf1("%s() Registered '.../edac/mc' kobject\n", __func__);
 
        return 0;
 
+fail_kset:
+       edac_put_sysfs_class();
 
-       /* error unwind stack */
 fail_out:
        return err;
 }
@@ -1049,5 +1051,6 @@ fail_out:
 void edac_sysfs_teardown_mc_kset(void)
 {
        kset_unregister(mc_kset);
+       edac_put_sysfs_class();
 }
 
diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c
deleted file mode 100644 (file)
index 9014df6..0000000
+++ /dev/null
@@ -1,452 +0,0 @@
-#include <linux/module.h>
-#include "edac_mce_amd.h"
-
-static bool report_gart_errors;
-static void (*nb_bus_decoder)(int node_id, struct err_regs *regs);
-
-void amd_report_gart_errors(bool v)
-{
-       report_gart_errors = v;
-}
-EXPORT_SYMBOL_GPL(amd_report_gart_errors);
-
-void amd_register_ecc_decoder(void (*f)(int, struct err_regs *))
-{
-       nb_bus_decoder = f;
-}
-EXPORT_SYMBOL_GPL(amd_register_ecc_decoder);
-
-void amd_unregister_ecc_decoder(void (*f)(int, struct err_regs *))
-{
-       if (nb_bus_decoder) {
-               WARN_ON(nb_bus_decoder != f);
-
-               nb_bus_decoder = NULL;
-       }
-}
-EXPORT_SYMBOL_GPL(amd_unregister_ecc_decoder);
-
-/*
- * string representation for the different MCA reported error types, see F3x48
- * or MSR0000_0411.
- */
-const char *tt_msgs[] = {        /* transaction type */
-       "instruction",
-       "data",
-       "generic",
-       "reserved"
-};
-EXPORT_SYMBOL_GPL(tt_msgs);
-
-const char *ll_msgs[] = {      /* cache level */
-       "L0",
-       "L1",
-       "L2",
-       "L3/generic"
-};
-EXPORT_SYMBOL_GPL(ll_msgs);
-
-const char *rrrr_msgs[] = {
-       "generic",
-       "generic read",
-       "generic write",
-       "data read",
-       "data write",
-       "inst fetch",
-       "prefetch",
-       "evict",
-       "snoop",
-       "reserved RRRR= 9",
-       "reserved RRRR= 10",
-       "reserved RRRR= 11",
-       "reserved RRRR= 12",
-       "reserved RRRR= 13",
-       "reserved RRRR= 14",
-       "reserved RRRR= 15"
-};
-EXPORT_SYMBOL_GPL(rrrr_msgs);
-
-const char *pp_msgs[] = {      /* participating processor */
-       "local node originated (SRC)",
-       "local node responded to request (RES)",
-       "local node observed as 3rd party (OBS)",
-       "generic"
-};
-EXPORT_SYMBOL_GPL(pp_msgs);
-
-const char *to_msgs[] = {
-       "no timeout",
-       "timed out"
-};
-EXPORT_SYMBOL_GPL(to_msgs);
-
-const char *ii_msgs[] = {      /* memory or i/o */
-       "mem access",
-       "reserved",
-       "i/o access",
-       "generic"
-};
-EXPORT_SYMBOL_GPL(ii_msgs);
-
-/*
- * Map the 4 or 5 (family-specific) bits of Extended Error code to the
- * string table.
- */
-const char *ext_msgs[] = {
-       "K8 ECC error",                                 /* 0_0000b */
-       "CRC error on link",                            /* 0_0001b */
-       "Sync error packets on link",                   /* 0_0010b */
-       "Master Abort during link operation",           /* 0_0011b */
-       "Target Abort during link operation",           /* 0_0100b */
-       "Invalid GART PTE entry during table walk",     /* 0_0101b */
-       "Unsupported atomic RMW command received",      /* 0_0110b */
-       "WDT error: NB transaction timeout",            /* 0_0111b */
-       "ECC/ChipKill ECC error",                       /* 0_1000b */
-       "SVM DEV Error",                                /* 0_1001b */
-       "Link Data error",                              /* 0_1010b */
-       "Link/L3/Probe Filter Protocol error",          /* 0_1011b */
-       "NB Internal Arrays Parity error",              /* 0_1100b */
-       "DRAM Address/Control Parity error",            /* 0_1101b */
-       "Link Transmission error",                      /* 0_1110b */
-       "GART/DEV Table Walk Data error"                /* 0_1111b */
-       "Res 0x100 error",                              /* 1_0000b */
-       "Res 0x101 error",                              /* 1_0001b */
-       "Res 0x102 error",                              /* 1_0010b */
-       "Res 0x103 error",                              /* 1_0011b */
-       "Res 0x104 error",                              /* 1_0100b */
-       "Res 0x105 error",                              /* 1_0101b */
-       "Res 0x106 error",                              /* 1_0110b */
-       "Res 0x107 error",                              /* 1_0111b */
-       "Res 0x108 error",                              /* 1_1000b */
-       "Res 0x109 error",                              /* 1_1001b */
-       "Res 0x10A error",                              /* 1_1010b */
-       "Res 0x10B error",                              /* 1_1011b */
-       "ECC error in L3 Cache Data",                   /* 1_1100b */
-       "L3 Cache Tag error",                           /* 1_1101b */
-       "L3 Cache LRU Parity error",                    /* 1_1110b */
-       "Probe Filter error"                            /* 1_1111b */
-};
-EXPORT_SYMBOL_GPL(ext_msgs);
-
-static void amd_decode_dc_mce(u64 mc0_status)
-{
-       u32 ec  = mc0_status & 0xffff;
-       u32 xec = (mc0_status >> 16) & 0xf;
-
-       pr_emerg("Data Cache Error");
-
-       if (xec == 1 && TLB_ERROR(ec))
-               pr_cont(": %s TLB multimatch.\n", LL_MSG(ec));
-       else if (xec == 0) {
-               if (mc0_status & (1ULL << 40))
-                       pr_cont(" during Data Scrub.\n");
-               else if (TLB_ERROR(ec))
-                       pr_cont(": %s TLB parity error.\n", LL_MSG(ec));
-               else if (MEM_ERROR(ec)) {
-                       u8 ll   = ec & 0x3;
-                       u8 tt   = (ec >> 2) & 0x3;
-                       u8 rrrr = (ec >> 4) & 0xf;
-
-                       /* see F10h BKDG (31116), Table 92. */
-                       if (ll == 0x1) {
-                               if (tt != 0x1)
-                                       goto wrong_dc_mce;
-
-                               pr_cont(": Data/Tag %s error.\n", RRRR_MSG(ec));
-
-                       } else if (ll == 0x2 && rrrr == 0x3)
-                               pr_cont(" during L1 linefill from L2.\n");
-                       else
-                               goto wrong_dc_mce;
-               } else if (BUS_ERROR(ec) && boot_cpu_data.x86 == 0xf)
-                       pr_cont(" during system linefill.\n");
-               else
-                       goto wrong_dc_mce;
-       } else
-               goto wrong_dc_mce;
-
-       return;
-
-wrong_dc_mce:
-       pr_warning("Corrupted DC MCE info?\n");
-}
-
-static void amd_decode_ic_mce(u64 mc1_status)
-{
-       u32 ec  = mc1_status & 0xffff;
-       u32 xec = (mc1_status >> 16) & 0xf;
-
-       pr_emerg("Instruction Cache Error");
-
-       if (xec == 1 && TLB_ERROR(ec))
-               pr_cont(": %s TLB multimatch.\n", LL_MSG(ec));
-       else if (xec == 0) {
-               if (TLB_ERROR(ec))
-                       pr_cont(": %s TLB Parity error.\n", LL_MSG(ec));
-               else if (BUS_ERROR(ec)) {
-                       if (boot_cpu_data.x86 == 0xf &&
-                           (mc1_status & (1ULL << 58)))
-                               pr_cont(" during system linefill.\n");
-                       else
-                               pr_cont(" during attempted NB data read.\n");
-               } else if (MEM_ERROR(ec)) {
-                       u8 ll   = ec & 0x3;
-                       u8 rrrr = (ec >> 4) & 0xf;
-
-                       if (ll == 0x2)
-                               pr_cont(" during a linefill from L2.\n");
-                       else if (ll == 0x1) {
-
-                               switch (rrrr) {
-                               case 0x5:
-                                       pr_cont(": Parity error during "
-                                              "data load.\n");
-                                       break;
-
-                               case 0x7:
-                                       pr_cont(": Copyback Parity/Victim"
-                                               " error.\n");
-                                       break;
-
-                               case 0x8:
-                                       pr_cont(": Tag Snoop error.\n");
-                                       break;
-
-                               default:
-                                       goto wrong_ic_mce;
-                                       break;
-                               }
-                       }
-               } else
-                       goto wrong_ic_mce;
-       } else
-               goto wrong_ic_mce;
-
-       return;
-
-wrong_ic_mce:
-       pr_warning("Corrupted IC MCE info?\n");
-}
-
-static void amd_decode_bu_mce(u64 mc2_status)
-{
-       u32 ec = mc2_status & 0xffff;
-       u32 xec = (mc2_status >> 16) & 0xf;
-
-       pr_emerg("Bus Unit Error");
-
-       if (xec == 0x1)
-               pr_cont(" in the write data buffers.\n");
-       else if (xec == 0x3)
-               pr_cont(" in the victim data buffers.\n");
-       else if (xec == 0x2 && MEM_ERROR(ec))
-               pr_cont(": %s error in the L2 cache tags.\n", RRRR_MSG(ec));
-       else if (xec == 0x0) {
-               if (TLB_ERROR(ec))
-                       pr_cont(": %s error in a Page Descriptor Cache or "
-                               "Guest TLB.\n", TT_MSG(ec));
-               else if (BUS_ERROR(ec))
-                       pr_cont(": %s/ECC error in data read from NB: %s.\n",
-                               RRRR_MSG(ec), PP_MSG(ec));
-               else if (MEM_ERROR(ec)) {
-                       u8 rrrr = (ec >> 4) & 0xf;
-
-                       if (rrrr >= 0x7)
-                               pr_cont(": %s error during data copyback.\n",
-                                       RRRR_MSG(ec));
-                       else if (rrrr <= 0x1)
-                               pr_cont(": %s parity/ECC error during data "
-                                       "access from L2.\n", RRRR_MSG(ec));
-                       else
-                               goto wrong_bu_mce;
-               } else
-                       goto wrong_bu_mce;
-       } else
-               goto wrong_bu_mce;
-
-       return;
-
-wrong_bu_mce:
-       pr_warning("Corrupted BU MCE info?\n");
-}
-
-static void amd_decode_ls_mce(u64 mc3_status)
-{
-       u32 ec  = mc3_status & 0xffff;
-       u32 xec = (mc3_status >> 16) & 0xf;
-
-       pr_emerg("Load Store Error");
-
-       if (xec == 0x0) {
-               u8 rrrr = (ec >> 4) & 0xf;
-
-               if (!BUS_ERROR(ec) || (rrrr != 0x3 && rrrr != 0x4))
-                       goto wrong_ls_mce;
-
-               pr_cont(" during %s.\n", RRRR_MSG(ec));
-       }
-       return;
-
-wrong_ls_mce:
-       pr_warning("Corrupted LS MCE info?\n");
-}
-
-void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
-{
-       u32 ec  = ERROR_CODE(regs->nbsl);
-
-       if (!handle_errors)
-               return;
-
-       /*
-        * GART TLB error reporting is disabled by default. Bail out early.
-        */
-       if (TLB_ERROR(ec) && !report_gart_errors)
-               return;
-
-       pr_emerg("Northbridge Error, node %d", node_id);
-
-       /*
-        * F10h, revD can disable ErrCpu[3:0] so check that first and also the
-        * value encoding has changed so interpret those differently
-        */
-       if ((boot_cpu_data.x86 == 0x10) &&
-           (boot_cpu_data.x86_model > 7)) {
-               if (regs->nbsh & K8_NBSH_ERR_CPU_VAL)
-                       pr_cont(", core: %u\n", (u8)(regs->nbsh & 0xf));
-       } else {
-               u8 assoc_cpus = regs->nbsh & 0xf;
-
-               if (assoc_cpus > 0)
-                       pr_cont(", core: %d", fls(assoc_cpus) - 1);
-
-               pr_cont("\n");
-       }
-
-       pr_emerg("%s.\n", EXT_ERR_MSG(regs->nbsl));
-
-       if (BUS_ERROR(ec) && nb_bus_decoder)
-               nb_bus_decoder(node_id, regs);
-}
-EXPORT_SYMBOL_GPL(amd_decode_nb_mce);
-
-static void amd_decode_fr_mce(u64 mc5_status)
-{
-       /* we have only one error signature so match all fields at once. */
-       if ((mc5_status & 0xffff) == 0x0f0f)
-               pr_emerg(" FR Error: CPU Watchdog timer expire.\n");
-       else
-               pr_warning("Corrupted FR MCE info?\n");
-}
-
-static inline void amd_decode_err_code(unsigned int ec)
-{
-       if (TLB_ERROR(ec)) {
-               pr_emerg("Transaction: %s, Cache Level %s\n",
-                        TT_MSG(ec), LL_MSG(ec));
-       } else if (MEM_ERROR(ec)) {
-               pr_emerg("Transaction: %s, Type: %s, Cache Level: %s",
-                        RRRR_MSG(ec), TT_MSG(ec), LL_MSG(ec));
-       } else if (BUS_ERROR(ec)) {
-               pr_emerg("Transaction type: %s(%s), %s, Cache Level: %s, "
-                        "Participating Processor: %s\n",
-                         RRRR_MSG(ec), II_MSG(ec), TO_MSG(ec), LL_MSG(ec),
-                         PP_MSG(ec));
-       } else
-               pr_warning("Huh? Unknown MCE error 0x%x\n", ec);
-}
-
-static int amd_decode_mce(struct notifier_block *nb, unsigned long val,
-                          void *data)
-{
-       struct mce *m = (struct mce *)data;
-       struct err_regs regs;
-       int node, ecc;
-
-       pr_emerg("MC%d_STATUS: ", m->bank);
-
-       pr_cont("%sorrected error, other errors lost: %s, "
-                "CPU context corrupt: %s",
-                ((m->status & MCI_STATUS_UC) ? "Unc"  : "C"),
-                ((m->status & MCI_STATUS_OVER) ? "yes"  : "no"),
-                ((m->status & MCI_STATUS_PCC) ? "yes" : "no"));
-
-       /* do the two bits[14:13] together */
-       ecc = (m->status >> 45) & 0x3;
-       if (ecc)
-               pr_cont(", %sECC Error", ((ecc == 2) ? "C" : "U"));
-
-       pr_cont("\n");
-
-       switch (m->bank) {
-       case 0:
-               amd_decode_dc_mce(m->status);
-               break;
-
-       case 1:
-               amd_decode_ic_mce(m->status);
-               break;
-
-       case 2:
-               amd_decode_bu_mce(m->status);
-               break;
-
-       case 3:
-               amd_decode_ls_mce(m->status);
-               break;
-
-       case 4:
-               regs.nbsl  = (u32) m->status;
-               regs.nbsh  = (u32)(m->status >> 32);
-               regs.nbeal = (u32) m->addr;
-               regs.nbeah = (u32)(m->addr >> 32);
-               node       = amd_get_nb_id(m->extcpu);
-
-               amd_decode_nb_mce(node, &regs, 1);
-               break;
-
-       case 5:
-               amd_decode_fr_mce(m->status);
-               break;
-
-       default:
-               break;
-       }
-
-       amd_decode_err_code(m->status & 0xffff);
-
-       return NOTIFY_STOP;
-}
-
-static struct notifier_block amd_mce_dec_nb = {
-       .notifier_call  = amd_decode_mce,
-};
-
-static int __init mce_amd_init(void)
-{
-       /*
-        * We can decode MCEs for K8, F10h and F11h CPUs:
-        */
-       if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
-               return 0;
-
-       if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
-               return 0;
-
-       atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb);
-
-       return 0;
-}
-early_initcall(mce_amd_init);
-
-#ifdef MODULE
-static void __exit mce_amd_exit(void)
-{
-       atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &amd_mce_dec_nb);
-}
-
-MODULE_DESCRIPTION("AMD MCE decoder");
-MODULE_ALIAS("edac-mce-amd");
-MODULE_LICENSE("GPL");
-module_exit(mce_amd_exit);
-#endif
index 7e1374afd967256054be17b687954151576d60ae..be4b075c30984c1b408246a306664c0a6bb626f0 100644 (file)
@@ -26,15 +26,6 @@ EXPORT_SYMBOL_GPL(edac_debug_level);
 /* scope is to module level only */
 struct workqueue_struct *edac_workqueue;
 
-/*
- * sysfs object: /sys/devices/system/edac
- *     need to export to other files in this modules
- */
-static struct sysdev_class edac_class = {
-       .name = "edac",
-};
-static int edac_class_valid;
-
 /*
  * edac_op_state_to_string()
  */
@@ -54,60 +45,6 @@ char *edac_op_state_to_string(int opstate)
        return "UNKNOWN";
 }
 
-/*
- * edac_get_edac_class()
- *
- *     return pointer to the edac class of 'edac'
- */
-struct sysdev_class *edac_get_edac_class(void)
-{
-       struct sysdev_class *classptr = NULL;
-
-       if (edac_class_valid)
-               classptr = &edac_class;
-
-       return classptr;
-}
-
-/*
- * edac_register_sysfs_edac_name()
- *
- *     register the 'edac' into /sys/devices/system
- *
- * return:
- *     0  success
- *     !0 error
- */
-static int edac_register_sysfs_edac_name(void)
-{
-       int err;
-
-       /* create the /sys/devices/system/edac directory */
-       err = sysdev_class_register(&edac_class);
-
-       if (err) {
-               debugf1("%s() error=%d\n", __func__, err);
-               return err;
-       }
-
-       edac_class_valid = 1;
-       return 0;
-}
-
-/*
- * sysdev_class_unregister()
- *
- *     unregister the 'edac' from /sys/devices/system
- */
-static void edac_unregister_sysfs_edac_name(void)
-{
-       /* only if currently registered, then unregister it */
-       if (edac_class_valid)
-               sysdev_class_unregister(&edac_class);
-
-       edac_class_valid = 0;
-}
-
 /*
  * edac_workqueue_setup
  *     initialize the edac work queue for polling operations
@@ -153,22 +90,12 @@ static int __init edac_init(void)
         */
        edac_pci_clear_parity_errors();
 
-       /*
-        * perform the registration of the /sys/devices/system/edac class object
-        */
-       if (edac_register_sysfs_edac_name()) {
-               edac_printk(KERN_ERR, EDAC_MC,
-                       "Error initializing 'edac' kobject\n");
-               err = -ENODEV;
-               goto error;
-       }
-
        /*
         * now set up the mc_kset under the edac class object
         */
        err = edac_sysfs_setup_mc_kset();
        if (err)
-               goto sysfs_setup_fail;
+               goto error;
 
        /* Setup/Initialize the workq for this core */
        err = edac_workqueue_setup();
@@ -183,9 +110,6 @@ static int __init edac_init(void)
 workq_fail:
        edac_sysfs_teardown_mc_kset();
 
-sysfs_setup_fail:
-       edac_unregister_sysfs_edac_name();
-
 error:
        return err;
 }
@@ -201,7 +125,6 @@ static void __exit edac_exit(void)
        /* tear down the various subsystems */
        edac_workqueue_teardown();
        edac_sysfs_teardown_mc_kset();
-       edac_unregister_sysfs_edac_name();
 }
 
 /*
index 233d4798c3aa2ecf18f3875d45a138b8bc2f2fe8..17aabb7b90ecff1ac09a7344ea9fe17a86fb9c8a 100644 (file)
@@ -42,7 +42,6 @@ extern void edac_device_unregister_sysfs_main_kobj(
                                struct edac_device_ctl_info *edac_dev);
 extern int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev);
 extern void edac_device_remove_sysfs(struct edac_device_ctl_info *edac_dev);
-extern struct sysdev_class *edac_get_edac_class(void);
 
 /* edac core workqueue: single CPU mode */
 extern struct workqueue_struct *edac_workqueue;
index c39697df9cb41e87c8177c76bab788b85567003e..023b01cb5175c70a15804faba0c356ecacfb1809 100644 (file)
@@ -7,7 +7,7 @@
  *
  */
 #include <linux/module.h>
-#include <linux/sysdev.h>
+#include <linux/edac.h>
 #include <linux/slab.h>
 #include <linux/ctype.h>
 
@@ -354,7 +354,7 @@ static int edac_pci_main_kobj_setup(void)
        /* First time, so create the main kobject and its
         * controls and atributes
         */
-       edac_class = edac_get_edac_class();
+       edac_class = edac_get_sysfs_class();
        if (edac_class == NULL) {
                debugf1("%s() no edac_class\n", __func__);
                err = -ENODEV;
@@ -368,7 +368,7 @@ static int edac_pci_main_kobj_setup(void)
        if (!try_module_get(THIS_MODULE)) {
                debugf1("%s() try_module_get() failed\n", __func__);
                err = -ENODEV;
-               goto decrement_count_fail;
+               goto mod_get_fail;
        }
 
        edac_pci_top_main_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
@@ -403,6 +403,9 @@ kobject_init_and_add_fail:
 kzalloc_fail:
        module_put(THIS_MODULE);
 
+mod_get_fail:
+       edac_put_sysfs_class();
+
 decrement_count_fail:
        /* if are on this error exit, nothing to tear down */
        atomic_dec(&edac_pci_sysfs_refcount);
@@ -429,6 +432,7 @@ static void edac_pci_main_kobj_teardown(void)
                        __func__);
                kobject_put(edac_pci_top_main_kobj);
        }
+       edac_put_sysfs_class();
 }
 
 /*
index 20b428aa155e144413aeaeed2928b059ebd659d4..aab970760b755b4a5e3f848829bcbd74b3122a37 100644 (file)
@@ -3,10 +3,13 @@
  *
  * Author: Dave Jiang <djiang@mvista.com>
  *
- * 2007 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
+ * 2007 (c) MontaVista Software, Inc.
+ * 2010 (c) Advanced Micro Devices Inc.
+ *         Borislav Petkov <borislav.petkov@amd.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
  *
  */
 #include <linux/module.h>
@@ -23,6 +26,8 @@ EXPORT_SYMBOL_GPL(edac_handlers);
 int edac_err_assert = 0;
 EXPORT_SYMBOL_GPL(edac_err_assert);
 
+static atomic_t edac_class_valid = ATOMIC_INIT(0);
+
 /*
  * called to determine if there is an EDAC driver interested in
  * knowing an event (such as NMI) occurred
@@ -44,3 +49,41 @@ void edac_atomic_assert_error(void)
        edac_err_assert++;
 }
 EXPORT_SYMBOL_GPL(edac_atomic_assert_error);
+
+/*
+ * sysfs object: /sys/devices/system/edac
+ *     need to export to other files
+ */
+struct sysdev_class edac_class = {
+       .name = "edac",
+};
+EXPORT_SYMBOL_GPL(edac_class);
+
+/* return pointer to the 'edac' node in sysfs */
+struct sysdev_class *edac_get_sysfs_class(void)
+{
+       int err = 0;
+
+       if (atomic_read(&edac_class_valid))
+               goto out;
+
+       /* create the /sys/devices/system/edac directory */
+       err = sysdev_class_register(&edac_class);
+       if (err) {
+               printk(KERN_ERR "Error registering toplevel EDAC sysfs dir\n");
+               return NULL;
+       }
+
+out:
+       atomic_inc(&edac_class_valid);
+       return &edac_class;
+}
+EXPORT_SYMBOL_GPL(edac_get_sysfs_class);
+
+void edac_put_sysfs_class(void)
+{
+       /* last user unregisters it */
+       if (atomic_dec_and_test(&edac_class_valid))
+               sysdev_class_unregister(&edac_class);
+}
+EXPORT_SYMBOL_GPL(edac_put_sysfs_class);
index e0187d16dd7c53fd240b58d62005e4c17df14bc4..0fd5b85a0f756745bd1074ae673e89d6c81a237a 100644 (file)
@@ -1140,6 +1140,7 @@ static struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = {
        ATTR_COUNTER(0),
        ATTR_COUNTER(1),
        ATTR_COUNTER(2),
+       { .attr = { .name = NULL } }
 };
 
 static struct mcidev_sysfs_group i7core_udimm_counters = {
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
new file mode 100644 (file)
index 0000000..c018109
--- /dev/null
@@ -0,0 +1,680 @@
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "mce_amd.h"
+
+static struct amd_decoder_ops *fam_ops;
+
+static u8 nb_err_cpumask = 0xf;
+
+static bool report_gart_errors;
+static void (*nb_bus_decoder)(int node_id, struct mce *m, u32 nbcfg);
+
+void amd_report_gart_errors(bool v)
+{
+       report_gart_errors = v;
+}
+EXPORT_SYMBOL_GPL(amd_report_gart_errors);
+
+void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32))
+{
+       nb_bus_decoder = f;
+}
+EXPORT_SYMBOL_GPL(amd_register_ecc_decoder);
+
+void amd_unregister_ecc_decoder(void (*f)(int, struct mce *, u32))
+{
+       if (nb_bus_decoder) {
+               WARN_ON(nb_bus_decoder != f);
+
+               nb_bus_decoder = NULL;
+       }
+}
+EXPORT_SYMBOL_GPL(amd_unregister_ecc_decoder);
+
+/*
+ * string representation for the different MCA reported error types, see F3x48
+ * or MSR0000_0411.
+ */
+
+/* transaction type */
+const char *tt_msgs[] = { "INSN", "DATA", "GEN", "RESV" };
+EXPORT_SYMBOL_GPL(tt_msgs);
+
+/* cache level */
+const char *ll_msgs[] = { "RESV", "L1", "L2", "L3/GEN" };
+EXPORT_SYMBOL_GPL(ll_msgs);
+
+/* memory transaction type */
+const char *rrrr_msgs[] = {
+       "GEN", "RD", "WR", "DRD", "DWR", "IRD", "PRF", "EV", "SNP"
+};
+EXPORT_SYMBOL_GPL(rrrr_msgs);
+
+/* participating processor */
+const char *pp_msgs[] = { "SRC", "RES", "OBS", "GEN" };
+EXPORT_SYMBOL_GPL(pp_msgs);
+
+/* request timeout */
+const char *to_msgs[] = { "no timeout",        "timed out" };
+EXPORT_SYMBOL_GPL(to_msgs);
+
+/* memory or i/o */
+const char *ii_msgs[] = { "MEM", "RESV", "IO", "GEN" };
+EXPORT_SYMBOL_GPL(ii_msgs);
+
+static const char *f10h_nb_mce_desc[] = {
+       "HT link data error",
+       "Protocol error (link, L3, probe filter, etc.)",
+       "Parity error in NB-internal arrays",
+       "Link Retry due to IO link transmission error",
+       "L3 ECC data cache error",
+       "ECC error in L3 cache tag",
+       "L3 LRU parity bits error",
+       "ECC Error in the Probe Filter directory"
+};
+
+static bool f12h_dc_mce(u16 ec)
+{
+       bool ret = false;
+
+       if (MEM_ERROR(ec)) {
+               u8 ll = ec & 0x3;
+               ret = true;
+
+               if (ll == LL_L2)
+                       pr_cont("during L1 linefill from L2.\n");
+               else if (ll == LL_L1)
+                       pr_cont("Data/Tag %s error.\n", RRRR_MSG(ec));
+               else
+                       ret = false;
+       }
+       return ret;
+}
+
+static bool f10h_dc_mce(u16 ec)
+{
+       u8 r4  = (ec >> 4) & 0xf;
+       u8 ll  = ec & 0x3;
+
+       if (r4 == R4_GEN && ll == LL_L1) {
+               pr_cont("during data scrub.\n");
+               return true;
+       }
+       return f12h_dc_mce(ec);
+}
+
+static bool k8_dc_mce(u16 ec)
+{
+       if (BUS_ERROR(ec)) {
+               pr_cont("during system linefill.\n");
+               return true;
+       }
+
+       return f10h_dc_mce(ec);
+}
+
+static bool f14h_dc_mce(u16 ec)
+{
+       u8 r4    = (ec >> 4) & 0xf;
+       u8 ll    = ec & 0x3;
+       u8 tt    = (ec >> 2) & 0x3;
+       u8 ii    = tt;
+       bool ret = true;
+
+       if (MEM_ERROR(ec)) {
+
+               if (tt != TT_DATA || ll != LL_L1)
+                       return false;
+
+               switch (r4) {
+               case R4_DRD:
+               case R4_DWR:
+                       pr_cont("Data/Tag parity error due to %s.\n",
+                               (r4 == R4_DRD ? "load/hw prf" : "store"));
+                       break;
+               case R4_EVICT:
+                       pr_cont("Copyback parity error on a tag miss.\n");
+                       break;
+               case R4_SNOOP:
+                       pr_cont("Tag parity error during snoop.\n");
+                       break;
+               default:
+                       ret = false;
+               }
+       } else if (BUS_ERROR(ec)) {
+
+               if ((ii != II_MEM && ii != II_IO) || ll != LL_LG)
+                       return false;
+
+               pr_cont("System read data error on a ");
+
+               switch (r4) {
+               case R4_RD:
+                       pr_cont("TLB reload.\n");
+                       break;
+               case R4_DWR:
+                       pr_cont("store.\n");
+                       break;
+               case R4_DRD:
+                       pr_cont("load.\n");
+                       break;
+               default:
+                       ret = false;
+               }
+       } else {
+               ret = false;
+       }
+
+       return ret;
+}
+
+static void amd_decode_dc_mce(struct mce *m)
+{
+       u16 ec = m->status & 0xffff;
+       u8 xec = (m->status >> 16) & 0xf;
+
+       pr_emerg(HW_ERR "Data Cache Error: ");
+
+       /* TLB error signatures are the same across families */
+       if (TLB_ERROR(ec)) {
+               u8 tt = (ec >> 2) & 0x3;
+
+               if (tt == TT_DATA) {
+                       pr_cont("%s TLB %s.\n", LL_MSG(ec),
+                               (xec ? "multimatch" : "parity error"));
+                       return;
+               }
+               else
+                       goto wrong_dc_mce;
+       }
+
+       if (!fam_ops->dc_mce(ec))
+               goto wrong_dc_mce;
+
+       return;
+
+wrong_dc_mce:
+       pr_emerg(HW_ERR "Corrupted DC MCE info?\n");
+}
+
+static bool k8_ic_mce(u16 ec)
+{
+       u8 ll    = ec & 0x3;
+       u8 r4    = (ec >> 4) & 0xf;
+       bool ret = true;
+
+       if (!MEM_ERROR(ec))
+               return false;
+
+       if (ll == 0x2)
+               pr_cont("during a linefill from L2.\n");
+       else if (ll == 0x1) {
+               switch (r4) {
+               case R4_IRD:
+                       pr_cont("Parity error during data load.\n");
+                       break;
+
+               case R4_EVICT:
+                       pr_cont("Copyback Parity/Victim error.\n");
+                       break;
+
+               case R4_SNOOP:
+                       pr_cont("Tag Snoop error.\n");
+                       break;
+
+               default:
+                       ret = false;
+                       break;
+               }
+       } else
+               ret = false;
+
+       return ret;
+}
+
+static bool f14h_ic_mce(u16 ec)
+{
+       u8 ll    = ec & 0x3;
+       u8 tt    = (ec >> 2) & 0x3;
+       u8 r4  = (ec >> 4) & 0xf;
+       bool ret = true;
+
+       if (MEM_ERROR(ec)) {
+               if (tt != 0 || ll != 1)
+                       ret = false;
+
+               if (r4 == R4_IRD)
+                       pr_cont("Data/tag array parity error for a tag hit.\n");
+               else if (r4 == R4_SNOOP)
+                       pr_cont("Tag error during snoop/victimization.\n");
+               else
+                       ret = false;
+       }
+       return ret;
+}
+
+static void amd_decode_ic_mce(struct mce *m)
+{
+       u16 ec = m->status & 0xffff;
+       u8 xec = (m->status >> 16) & 0xf;
+
+       pr_emerg(HW_ERR "Instruction Cache Error: ");
+
+       if (TLB_ERROR(ec))
+               pr_cont("%s TLB %s.\n", LL_MSG(ec),
+                       (xec ? "multimatch" : "parity error"));
+       else if (BUS_ERROR(ec)) {
+               bool k8 = (boot_cpu_data.x86 == 0xf && (m->status & BIT_64(58)));
+
+               pr_cont("during %s.\n", (k8 ? "system linefill" : "NB data read"));
+       } else if (fam_ops->ic_mce(ec))
+               ;
+       else
+               pr_emerg(HW_ERR "Corrupted IC MCE info?\n");
+}
+
+static void amd_decode_bu_mce(struct mce *m)
+{
+       u32 ec = m->status & 0xffff;
+       u32 xec = (m->status >> 16) & 0xf;
+
+       pr_emerg(HW_ERR "Bus Unit Error");
+
+       if (xec == 0x1)
+               pr_cont(" in the write data buffers.\n");
+       else if (xec == 0x3)
+               pr_cont(" in the victim data buffers.\n");
+       else if (xec == 0x2 && MEM_ERROR(ec))
+               pr_cont(": %s error in the L2 cache tags.\n", RRRR_MSG(ec));
+       else if (xec == 0x0) {
+               if (TLB_ERROR(ec))
+                       pr_cont(": %s error in a Page Descriptor Cache or "
+                               "Guest TLB.\n", TT_MSG(ec));
+               else if (BUS_ERROR(ec))
+                       pr_cont(": %s/ECC error in data read from NB: %s.\n",
+                               RRRR_MSG(ec), PP_MSG(ec));
+               else if (MEM_ERROR(ec)) {
+                       u8 rrrr = (ec >> 4) & 0xf;
+
+                       if (rrrr >= 0x7)
+                               pr_cont(": %s error during data copyback.\n",
+                                       RRRR_MSG(ec));
+                       else if (rrrr <= 0x1)
+                               pr_cont(": %s parity/ECC error during data "
+                                       "access from L2.\n", RRRR_MSG(ec));
+                       else
+                               goto wrong_bu_mce;
+               } else
+                       goto wrong_bu_mce;
+       } else
+               goto wrong_bu_mce;
+
+       return;
+
+wrong_bu_mce:
+       pr_emerg(HW_ERR "Corrupted BU MCE info?\n");
+}
+
+static void amd_decode_ls_mce(struct mce *m)
+{
+       u16 ec = m->status & 0xffff;
+       u8 xec = (m->status >> 16) & 0xf;
+
+       if (boot_cpu_data.x86 == 0x14) {
+               pr_emerg("You shouldn't be seeing an LS MCE on this cpu family,"
+                        " please report on LKML.\n");
+               return;
+       }
+
+       pr_emerg(HW_ERR "Load Store Error");
+
+       if (xec == 0x0) {
+               u8 r4 = (ec >> 4) & 0xf;
+
+               if (!BUS_ERROR(ec) || (r4 != R4_DRD && r4 != R4_DWR))
+                       goto wrong_ls_mce;
+
+               pr_cont(" during %s.\n", RRRR_MSG(ec));
+       } else
+               goto wrong_ls_mce;
+
+       return;
+
+wrong_ls_mce:
+       pr_emerg(HW_ERR "Corrupted LS MCE info?\n");
+}
+
+static bool k8_nb_mce(u16 ec, u8 xec)
+{
+       bool ret = true;
+
+       switch (xec) {
+       case 0x1:
+               pr_cont("CRC error detected on HT link.\n");
+               break;
+
+       case 0x5:
+               pr_cont("Invalid GART PTE entry during GART table walk.\n");
+               break;
+
+       case 0x6:
+               pr_cont("Unsupported atomic RMW received from an IO link.\n");
+               break;
+
+       case 0x0:
+       case 0x8:
+               if (boot_cpu_data.x86 == 0x11)
+                       return false;
+
+               pr_cont("DRAM ECC error detected on the NB.\n");
+               break;
+
+       case 0xd:
+               pr_cont("Parity error on the DRAM addr/ctl signals.\n");
+               break;
+
+       default:
+               ret = false;
+               break;
+       }
+
+       return ret;
+}
+
+static bool f10h_nb_mce(u16 ec, u8 xec)
+{
+       bool ret = true;
+       u8 offset = 0;
+
+       if (k8_nb_mce(ec, xec))
+               return true;
+
+       switch(xec) {
+       case 0xa ... 0xc:
+               offset = 10;
+               break;
+
+       case 0xe:
+               offset = 11;
+               break;
+
+       case 0xf:
+               if (TLB_ERROR(ec))
+                       pr_cont("GART Table Walk data error.\n");
+               else if (BUS_ERROR(ec))
+                       pr_cont("DMA Exclusion Vector Table Walk error.\n");
+               else
+                       ret = false;
+
+               goto out;
+               break;
+
+       case 0x1c ... 0x1f:
+               offset = 24;
+               break;
+
+       default:
+               ret = false;
+
+               goto out;
+               break;
+       }
+
+       pr_cont("%s.\n", f10h_nb_mce_desc[xec - offset]);
+
+out:
+       return ret;
+}
+
+static bool nb_noop_mce(u16 ec, u8 xec)
+{
+       return false;
+}
+
+void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg)
+{
+       u8 xec   = (m->status >> 16) & 0x1f;
+       u16 ec   = m->status & 0xffff;
+       u32 nbsh = (u32)(m->status >> 32);
+
+       pr_emerg(HW_ERR "Northbridge Error, node %d: ", node_id);
+
+       /*
+        * F10h, revD can disable ErrCpu[3:0] so check that first and also the
+        * value encoding has changed so interpret those differently
+        */
+       if ((boot_cpu_data.x86 == 0x10) &&
+           (boot_cpu_data.x86_model > 7)) {
+               if (nbsh & K8_NBSH_ERR_CPU_VAL)
+                       pr_cont(", core: %u", (u8)(nbsh & nb_err_cpumask));
+       } else {
+               u8 assoc_cpus = nbsh & nb_err_cpumask;
+
+               if (assoc_cpus > 0)
+                       pr_cont(", core: %d", fls(assoc_cpus) - 1);
+       }
+
+       switch (xec) {
+       case 0x2:
+               pr_cont("Sync error (sync packets on HT link detected).\n");
+               return;
+
+       case 0x3:
+               pr_cont("HT Master abort.\n");
+               return;
+
+       case 0x4:
+               pr_cont("HT Target abort.\n");
+               return;
+
+       case 0x7:
+               pr_cont("NB Watchdog timeout.\n");
+               return;
+
+       case 0x9:
+               pr_cont("SVM DMA Exclusion Vector error.\n");
+               return;
+
+       default:
+               break;
+       }
+
+       if (!fam_ops->nb_mce(ec, xec))
+               goto wrong_nb_mce;
+
+       if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10)
+               if ((xec == 0x8 || xec == 0x0) && nb_bus_decoder)
+                       nb_bus_decoder(node_id, m, nbcfg);
+
+       return;
+
+wrong_nb_mce:
+       pr_emerg(HW_ERR "Corrupted NB MCE info?\n");
+}
+EXPORT_SYMBOL_GPL(amd_decode_nb_mce);
+
+static void amd_decode_fr_mce(struct mce *m)
+{
+       if (boot_cpu_data.x86 == 0xf ||
+           boot_cpu_data.x86 == 0x11)
+               goto wrong_fr_mce;
+
+       /* we have only one error signature so match all fields at once. */
+       if ((m->status & 0xffff) == 0x0f0f) {
+               pr_emerg(HW_ERR "FR Error: CPU Watchdog timer expire.\n");
+               return;
+       }
+
+wrong_fr_mce:
+       pr_emerg(HW_ERR "Corrupted FR MCE info?\n");
+}
+
+static inline void amd_decode_err_code(u16 ec)
+{
+       if (TLB_ERROR(ec)) {
+               pr_emerg(HW_ERR "Transaction: %s, Cache Level: %s\n",
+                        TT_MSG(ec), LL_MSG(ec));
+       } else if (MEM_ERROR(ec)) {
+               pr_emerg(HW_ERR "Transaction: %s, Type: %s, Cache Level: %s\n",
+                        RRRR_MSG(ec), TT_MSG(ec), LL_MSG(ec));
+       } else if (BUS_ERROR(ec)) {
+               pr_emerg(HW_ERR "Transaction: %s (%s), %s, Cache Level: %s, "
+                        "Participating Processor: %s\n",
+                         RRRR_MSG(ec), II_MSG(ec), TO_MSG(ec), LL_MSG(ec),
+                         PP_MSG(ec));
+       } else
+               pr_emerg(HW_ERR "Huh? Unknown MCE error 0x%x\n", ec);
+}
+
+/*
+ * Filter out unwanted MCE signatures here.
+ */
+static bool amd_filter_mce(struct mce *m)
+{
+       u8 xec = (m->status >> 16) & 0x1f;
+
+       /*
+        * NB GART TLB error reporting is disabled by default.
+        */
+       if (m->bank == 4 && xec == 0x5 && !report_gart_errors)
+               return true;
+
+       return false;
+}
+
+int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
+{
+       struct mce *m = (struct mce *)data;
+       int node, ecc;
+
+       if (amd_filter_mce(m))
+               return NOTIFY_STOP;
+
+       pr_emerg(HW_ERR "MC%d_STATUS: ", m->bank);
+
+       pr_cont("%sorrected error, other errors lost: %s, "
+                "CPU context corrupt: %s",
+                ((m->status & MCI_STATUS_UC) ? "Unc"  : "C"),
+                ((m->status & MCI_STATUS_OVER) ? "yes"  : "no"),
+                ((m->status & MCI_STATUS_PCC) ? "yes" : "no"));
+
+       /* do the two bits[14:13] together */
+       ecc = (m->status >> 45) & 0x3;
+       if (ecc)
+               pr_cont(", %sECC Error", ((ecc == 2) ? "C" : "U"));
+
+       pr_cont("\n");
+
+       switch (m->bank) {
+       case 0:
+               amd_decode_dc_mce(m);
+               break;
+
+       case 1:
+               amd_decode_ic_mce(m);
+               break;
+
+       case 2:
+               amd_decode_bu_mce(m);
+               break;
+
+       case 3:
+               amd_decode_ls_mce(m);
+               break;
+
+       case 4:
+               node = amd_get_nb_id(m->extcpu);
+               amd_decode_nb_mce(node, m, 0);
+               break;
+
+       case 5:
+               amd_decode_fr_mce(m);
+               break;
+
+       default:
+               break;
+       }
+
+       amd_decode_err_code(m->status & 0xffff);
+
+       return NOTIFY_STOP;
+}
+EXPORT_SYMBOL_GPL(amd_decode_mce);
+
+static struct notifier_block amd_mce_dec_nb = {
+       .notifier_call  = amd_decode_mce,
+};
+
+static int __init mce_amd_init(void)
+{
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+               return 0;
+
+       if ((boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x12) &&
+           (boot_cpu_data.x86 != 0x14 || boot_cpu_data.x86_model > 0xf))
+               return 0;
+
+       fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL);
+       if (!fam_ops)
+               return -ENOMEM;
+
+       switch (boot_cpu_data.x86) {
+       case 0xf:
+               fam_ops->dc_mce = k8_dc_mce;
+               fam_ops->ic_mce = k8_ic_mce;
+               fam_ops->nb_mce = k8_nb_mce;
+               break;
+
+       case 0x10:
+               fam_ops->dc_mce = f10h_dc_mce;
+               fam_ops->ic_mce = k8_ic_mce;
+               fam_ops->nb_mce = f10h_nb_mce;
+               break;
+
+       case 0x11:
+               fam_ops->dc_mce = k8_dc_mce;
+               fam_ops->ic_mce = k8_ic_mce;
+               fam_ops->nb_mce = f10h_nb_mce;
+               break;
+
+       case 0x12:
+               fam_ops->dc_mce = f12h_dc_mce;
+               fam_ops->ic_mce = k8_ic_mce;
+               fam_ops->nb_mce = nb_noop_mce;
+               break;
+
+       case 0x14:
+               nb_err_cpumask  = 0x3;
+               fam_ops->dc_mce = f14h_dc_mce;
+               fam_ops->ic_mce = f14h_ic_mce;
+               fam_ops->nb_mce = nb_noop_mce;
+               break;
+
+       default:
+               printk(KERN_WARNING "Huh? What family is that: %d?!\n",
+                                   boot_cpu_data.x86);
+               kfree(fam_ops);
+               return -EINVAL;
+       }
+
+       pr_info("MCE: In-kernel MCE decoding enabled.\n");
+
+       atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb);
+
+       return 0;
+}
+early_initcall(mce_amd_init);
+
+#ifdef MODULE
+static void __exit mce_amd_exit(void)
+{
+       atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &amd_mce_dec_nb);
+       kfree(fam_ops);
+}
+
+MODULE_DESCRIPTION("AMD MCE decoder");
+MODULE_ALIAS("edac-mce-amd");
+MODULE_LICENSE("GPL");
+module_exit(mce_amd_exit);
+#endif
similarity index 65%
rename from drivers/edac/edac_mce_amd.h
rename to drivers/edac/mce_amd.h
index df23ee065f7981fe9f4084b461efa1de0f5d6836..35f6e0e3b297de16ad5e7c0ccac18dae0c388590 100644 (file)
@@ -1,11 +1,14 @@
 #ifndef _EDAC_MCE_AMD_H
 #define _EDAC_MCE_AMD_H
 
+#include <linux/notifier.h>
+
 #include <asm/mce.h>
 
+#define BIT_64(n)                      (U64_C(1) << (n))
+
 #define ERROR_CODE(x)                  ((x) & 0xffff)
 #define EXT_ERROR_CODE(x)              (((x) >> 16) & 0x1f)
-#define EXT_ERR_MSG(x)                 ext_msgs[EXT_ERROR_CODE(x)]
 
 #define LOW_SYNDROME(x)                        (((x) >> 15) & 0xff)
 #define HIGH_SYNDROME(x)               (((x) >> 24) & 0xff)
 #define II_MSG(x)                      ii_msgs[II(x)]
 #define LL(x)                          (((x) >> 0) & 0x3)
 #define LL_MSG(x)                      ll_msgs[LL(x)]
-#define RRRR(x)                                (((x) >> 4) & 0xf)
-#define RRRR_MSG(x)                    rrrr_msgs[RRRR(x)]
 #define TO(x)                          (((x) >> 8) & 0x1)
 #define TO_MSG(x)                      to_msgs[TO(x)]
 #define PP(x)                          (((x) >> 9) & 0x3)
 #define PP_MSG(x)                      pp_msgs[PP(x)]
 
+#define RRRR(x)                                (((x) >> 4) & 0xf)
+#define RRRR_MSG(x)                    ((RRRR(x) < 9) ?  rrrr_msgs[RRRR(x)] : "Wrong R4!")
+
 #define K8_NBSH                                0x4C
 
 #define K8_NBSH_VALID_BIT              BIT(31)
 #define K8_NBSH_UECC                   BIT(13)
 #define K8_NBSH_ERR_SCRUBER            BIT(8)
 
+enum tt_ids {
+       TT_INSTR = 0,
+       TT_DATA,
+       TT_GEN,
+       TT_RESV,
+};
+
+enum ll_ids {
+       LL_RESV = 0,
+       LL_L1,
+       LL_L2,
+       LL_LG,
+};
+
+enum ii_ids {
+       II_MEM = 0,
+       II_RESV,
+       II_IO,
+       II_GEN,
+};
+
+enum rrrr_ids {
+       R4_GEN  = 0,
+       R4_RD,
+       R4_WR,
+       R4_DRD,
+       R4_DWR,
+       R4_IRD,
+       R4_PREF,
+       R4_EVICT,
+       R4_SNOOP,
+};
+
 extern const char *tt_msgs[];
 extern const char *ll_msgs[];
 extern const char *rrrr_msgs[];
 extern const char *pp_msgs[];
 extern const char *to_msgs[];
 extern const char *ii_msgs[];
-extern const char *ext_msgs[];
 
 /*
  * relevant NB regs
@@ -60,10 +96,19 @@ struct err_regs {
        u32 nbeal;
 };
 
+/*
+ * per-family decoder ops
+ */
+struct amd_decoder_ops {
+       bool (*dc_mce)(u16);
+       bool (*ic_mce)(u16);
+       bool (*nb_mce)(u16, u8);
+};
 
 void amd_report_gart_errors(bool);
-void amd_register_ecc_decoder(void (*f)(int, struct err_regs *));
-void amd_unregister_ecc_decoder(void (*f)(int, struct err_regs *));
-void amd_decode_nb_mce(int, struct err_regs *, int);
+void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
+void amd_unregister_ecc_decoder(void (*f)(int, struct mce *, u32));
+void amd_decode_nb_mce(int, struct mce *, u32);
+int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data);
 
 #endif /* _EDAC_MCE_AMD_H */
diff --git a/drivers/edac/mce_amd_inj.c b/drivers/edac/mce_amd_inj.c
new file mode 100644 (file)
index 0000000..8d0688f
--- /dev/null
@@ -0,0 +1,171 @@
+/*
+ * A simple MCE injection facility for testing the MCE decoding code. This
+ * driver should be built as module so that it can be loaded on production
+ * kernels for testing purposes.
+ *
+ * This file may be distributed under the terms of the GNU General Public
+ * License version 2.
+ *
+ * Copyright (c) 2010:  Borislav Petkov <borislav.petkov@amd.com>
+ *                     Advanced Micro Devices Inc.
+ */
+
+#include <linux/kobject.h>
+#include <linux/sysdev.h>
+#include <linux/edac.h>
+#include <asm/mce.h>
+
+#include "mce_amd.h"
+
+struct edac_mce_attr {
+       struct attribute attr;
+       ssize_t (*show) (struct kobject *kobj, struct edac_mce_attr *attr, char *buf);
+       ssize_t (*store)(struct kobject *kobj, struct edac_mce_attr *attr,
+                        const char *buf, size_t count);
+};
+
+#define EDAC_MCE_ATTR(_name, _mode, _show, _store)                     \
+static struct edac_mce_attr mce_attr_##_name = __ATTR(_name, _mode, _show, _store)
+
+static struct kobject *mce_kobj;
+
+/*
+ * Collect all the MCi_XXX settings
+ */
+static struct mce i_mce;
+
+#define MCE_INJECT_STORE(reg)                                          \
+static ssize_t edac_inject_##reg##_store(struct kobject *kobj,         \
+                                        struct edac_mce_attr *attr,    \
+                                        const char *data, size_t count)\
+{                                                                      \
+       int ret = 0;                                                    \
+       unsigned long value;                                            \
+                                                                       \
+       ret = strict_strtoul(data, 16, &value);                         \
+       if (ret < 0)                                                    \
+               printk(KERN_ERR "Error writing MCE " #reg " field.\n"); \
+                                                                       \
+       i_mce.reg = value;                                              \
+                                                                       \
+       return count;                                                   \
+}
+
+MCE_INJECT_STORE(status);
+MCE_INJECT_STORE(misc);
+MCE_INJECT_STORE(addr);
+
+#define MCE_INJECT_SHOW(reg)                                           \
+static ssize_t edac_inject_##reg##_show(struct kobject *kobj,          \
+                                       struct edac_mce_attr *attr,     \
+                                       char *buf)                      \
+{                                                                      \
+       return sprintf(buf, "0x%016llx\n", i_mce.reg);                  \
+}
+
+MCE_INJECT_SHOW(status);
+MCE_INJECT_SHOW(misc);
+MCE_INJECT_SHOW(addr);
+
+EDAC_MCE_ATTR(status, 0644, edac_inject_status_show, edac_inject_status_store);
+EDAC_MCE_ATTR(misc, 0644, edac_inject_misc_show, edac_inject_misc_store);
+EDAC_MCE_ATTR(addr, 0644, edac_inject_addr_show, edac_inject_addr_store);
+
+/*
+ * This denotes into which bank we're injecting and triggers
+ * the injection, at the same time.
+ */
+static ssize_t edac_inject_bank_store(struct kobject *kobj,
+                                     struct edac_mce_attr *attr,
+                                     const char *data, size_t count)
+{
+       int ret = 0;
+       unsigned long value;
+
+       ret = strict_strtoul(data, 10, &value);
+       if (ret < 0) {
+               printk(KERN_ERR "Invalid bank value!\n");
+               return -EINVAL;
+       }
+
+       if (value > 5) {
+               printk(KERN_ERR "Non-existant MCE bank: %lu\n", value);
+               return -EINVAL;
+       }
+
+       i_mce.bank = value;
+
+       amd_decode_mce(NULL, 0, &i_mce);
+
+       return count;
+}
+
+static ssize_t edac_inject_bank_show(struct kobject *kobj,
+                                    struct edac_mce_attr *attr, char *buf)
+{
+       return sprintf(buf, "%d\n", i_mce.bank);
+}
+
+EDAC_MCE_ATTR(bank, 0644, edac_inject_bank_show, edac_inject_bank_store);
+
+static struct edac_mce_attr *sysfs_attrs[] = { &mce_attr_status, &mce_attr_misc,
+                                              &mce_attr_addr, &mce_attr_bank
+};
+
+static int __init edac_init_mce_inject(void)
+{
+       struct sysdev_class *edac_class = NULL;
+       int i, err = 0;
+
+       edac_class = edac_get_sysfs_class();
+       if (!edac_class)
+               return -EINVAL;
+
+       mce_kobj = kobject_create_and_add("mce", &edac_class->kset.kobj);
+       if (!mce_kobj) {
+               printk(KERN_ERR "Error creating a mce kset.\n");
+               err = -ENOMEM;
+               goto err_mce_kobj;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(sysfs_attrs); i++) {
+               err = sysfs_create_file(mce_kobj, &sysfs_attrs[i]->attr);
+               if (err) {
+                       printk(KERN_ERR "Error creating %s in sysfs.\n",
+                                       sysfs_attrs[i]->attr.name);
+                       goto err_sysfs_create;
+               }
+       }
+       return 0;
+
+err_sysfs_create:
+       while (i-- >= 0)
+               sysfs_remove_file(mce_kobj, &sysfs_attrs[i]->attr);
+
+       kobject_del(mce_kobj);
+
+err_mce_kobj:
+       edac_put_sysfs_class();
+
+       return err;
+}
+
+static void __exit edac_exit_mce_inject(void)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(sysfs_attrs); i++)
+               sysfs_remove_file(mce_kobj, &sysfs_attrs[i]->attr);
+
+       kobject_del(mce_kobj);
+
+       edac_put_sysfs_class();
+}
+
+module_init(edac_init_mce_inject);
+module_exit(edac_exit_mce_inject);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Borislav Petkov <borislav.petkov@amd.com>");
+MODULE_AUTHOR("AMD Inc.");
+MODULE_DESCRIPTION("MCE injection facility for testing MCE decoding");
index be29b0bb247101442a63fa623467e98ad7b77524..9dcb17d51aee737bcbb4ac478807c04311588d6c 100644 (file)
@@ -263,6 +263,7 @@ static const struct {
        {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, QUIRK_NO_MSI},
        {PCI_VENDOR_ID_NEC,     PCI_ANY_ID,     QUIRK_CYCLE_TIMER},
        {PCI_VENDOR_ID_VIA,     PCI_ANY_ID,     QUIRK_CYCLE_TIMER},
+       {PCI_VENDOR_ID_RICOH,   PCI_ANY_ID,     QUIRK_CYCLE_TIMER},
        {PCI_VENDOR_ID_APPLE,   PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS},
 };
 
@@ -2839,7 +2840,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
                               const struct pci_device_id *ent)
 {
        struct fw_ohci *ohci;
-       u32 bus_options, max_receive, link_speed, version, link_enh;
+       u32 bus_options, max_receive, link_speed, version;
        u64 guid;
        int i, err, n_ir, n_it;
        size_t size;
@@ -2893,23 +2894,6 @@ static int __devinit pci_probe(struct pci_dev *dev,
        if (param_quirks)
                ohci->quirks = param_quirks;
 
-       /* TI OHCI-Lynx and compatible: set recommended configuration bits. */
-       if (dev->vendor == PCI_VENDOR_ID_TI) {
-               pci_read_config_dword(dev, PCI_CFG_TI_LinkEnh, &link_enh);
-
-               /* adjust latency of ATx FIFO: use 1.7 KB threshold */
-               link_enh &= ~TI_LinkEnh_atx_thresh_mask;
-               link_enh |= TI_LinkEnh_atx_thresh_1_7K;
-
-               /* use priority arbitration for asynchronous responses */
-               link_enh |= TI_LinkEnh_enab_unfair;
-
-               /* required for aPhyEnhanceEnable to work */
-               link_enh |= TI_LinkEnh_enab_accel;
-
-               pci_write_config_dword(dev, PCI_CFG_TI_LinkEnh, link_enh);
-       }
-
        ar_context_init(&ohci->ar_request_ctx, ohci,
                        OHCI1394_AsReqRcvContextControlSet);
 
index 0e6c5a466908d58156f4fe7dd978c469efb94aad..ef5e7336da68ddf6af413ecd0cc616a812ae1490 100644 (file)
 
 #define OHCI1394_phy_tcode             0xe
 
-/* TI extensions */
-
-#define PCI_CFG_TI_LinkEnh             0xf4
-#define  TI_LinkEnh_enab_accel         0x00000002
-#define  TI_LinkEnh_enab_unfair                0x00000080
-#define  TI_LinkEnh_atx_thresh_mask    0x00003000
-#define  TI_LinkEnh_atx_thresh_1_7K    0x00001000
-
 #endif /* _FIREWIRE_OHCI_H */
index 280c9b5ad9e375afafd3116cc75d8b6fc3eb12fd..88a3ae6cd02306784184fd3f31399da88840defd 100644 (file)
@@ -125,7 +125,7 @@ config ISCSI_IBFT_FIND
 config ISCSI_IBFT
        tristate "iSCSI Boot Firmware Table Attributes module"
        select ISCSI_BOOT_SYSFS
-       depends on ISCSI_IBFT_FIND && SCSI
+       depends on ISCSI_IBFT_FIND && SCSI && SCSI_LOWLEVEL
        default n
        help
          This option enables support for detection and exposing of iSCSI
index b42f42ca70c3c9454bb00ff7cf2e8505f74e482d..823559ab0e243610ca8e5ff3b5983aca3d53b7bb 100644 (file)
@@ -459,17 +459,33 @@ static int sx150x_init_io(struct sx150x_chip *chip, u8 base, u16 cfg)
        return err;
 }
 
-static int sx150x_init_hw(struct sx150x_chip *chip,
-                       struct sx150x_platform_data *pdata)
+static int sx150x_reset(struct sx150x_chip *chip)
 {
-       int err = 0;
+       int err;
 
-       err = i2c_smbus_write_word_data(chip->client,
+       err = i2c_smbus_write_byte_data(chip->client,
                                        chip->dev_cfg->reg_reset,
-                                       0x3412);
+                                       0x12);
        if (err < 0)
                return err;
 
+       err = i2c_smbus_write_byte_data(chip->client,
+                                       chip->dev_cfg->reg_reset,
+                                       0x34);
+       return err;
+}
+
+static int sx150x_init_hw(struct sx150x_chip *chip,
+                       struct sx150x_platform_data *pdata)
+{
+       int err = 0;
+
+       if (pdata->reset_during_probe) {
+               err = sx150x_reset(chip);
+               if (err < 0)
+                       return err;
+       }
+
        err = sx150x_i2c_write(chip->client,
                        chip->dev_cfg->reg_misc,
                        0x01);
index 55d03ed050006c3de3a2b7470c4ededeafbc8871..529a0dbe9fc65960e62bc7840320754fb61393e8 100644 (file)
@@ -98,8 +98,8 @@ EXPORT_SYMBOL(drm_buffer_alloc);
  *   user_data: A pointer the data that is copied to the buffer.
  *   size: The Number of bytes to copy.
  */
-extern int drm_buffer_copy_from_user(struct drm_buffer *buf,
-               void __user *user_data, int size)
+int drm_buffer_copy_from_user(struct drm_buffer *buf,
+                             void __user *user_data, int size)
 {
        int nr_pages = size / PAGE_SIZE + 1;
        int idx;
@@ -163,7 +163,7 @@ void *drm_buffer_read_object(struct drm_buffer *buf,
 {
        int idx = drm_buffer_index(buf);
        int page = drm_buffer_page(buf);
-       void *obj = 0;
+       void *obj = NULL;
 
        if (idx + objsize <= PAGE_SIZE) {
                obj = &buf->data[page][idx];
index d2ab01e90a96315fee72015f4a76f194211d4a81..dcbeb98f195a7addf665e0134af9ee799280a279 100644 (file)
@@ -103,8 +103,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
                if (connector->funcs->force)
                        connector->funcs->force(connector);
        } else {
-               connector->status = connector->funcs->detect(connector);
-               drm_helper_hpd_irq_event(dev);
+               connector->status = connector->funcs->detect(connector, true);
+               drm_kms_helper_poll_enable(dev);
        }
 
        if (connector->status == connector_status_disconnected) {
@@ -637,13 +637,13 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
                mode_changed = true;
 
        if (mode_changed) {
-               old_fb = set->crtc->fb;
-               set->crtc->fb = set->fb;
                set->crtc->enabled = (set->mode != NULL);
                if (set->mode != NULL) {
                        DRM_DEBUG_KMS("attempting to set mode from"
                                        " userspace\n");
                        drm_mode_debug_printmodeline(set->mode);
+                       old_fb = set->crtc->fb;
+                       set->crtc->fb = set->fb;
                        if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
                                                      set->x, set->y,
                                                      old_fb)) {
@@ -866,7 +866,7 @@ static void output_poll_execute(struct work_struct *work)
                    !(connector->polled & DRM_CONNECTOR_POLL_HPD))
                        continue;
 
-               status = connector->funcs->detect(connector);
+               status = connector->funcs->detect(connector, false);
                if (old_status != status)
                        changed = true;
        }
index bf92d07510df740d856c173e7dcce292659fe6f7..5663d2719063de9231ca6cc153b63b30422e17aa 100644 (file)
@@ -148,7 +148,7 @@ int drm_gem_object_init(struct drm_device *dev,
                return -ENOMEM;
 
        kref_init(&obj->refcount);
-       kref_init(&obj->handlecount);
+       atomic_set(&obj->handle_count, 0);
        obj->size = size;
 
        atomic_inc(&dev->object_count);
@@ -462,28 +462,6 @@ drm_gem_object_free(struct kref *kref)
 }
 EXPORT_SYMBOL(drm_gem_object_free);
 
-/**
- * Called after the last reference to the object has been lost.
- * Must be called without holding struct_mutex
- *
- * Frees the object
- */
-void
-drm_gem_object_free_unlocked(struct kref *kref)
-{
-       struct drm_gem_object *obj = (struct drm_gem_object *) kref;
-       struct drm_device *dev = obj->dev;
-
-       if (dev->driver->gem_free_object_unlocked != NULL)
-               dev->driver->gem_free_object_unlocked(obj);
-       else if (dev->driver->gem_free_object != NULL) {
-               mutex_lock(&dev->struct_mutex);
-               dev->driver->gem_free_object(obj);
-               mutex_unlock(&dev->struct_mutex);
-       }
-}
-EXPORT_SYMBOL(drm_gem_object_free_unlocked);
-
 static void drm_gem_object_ref_bug(struct kref *list_kref)
 {
        BUG();
@@ -496,12 +474,8 @@ static void drm_gem_object_ref_bug(struct kref *list_kref)
  * called before drm_gem_object_free or we'll be touching
  * freed memory
  */
-void
-drm_gem_object_handle_free(struct kref *kref)
+void drm_gem_object_handle_free(struct drm_gem_object *obj)
 {
-       struct drm_gem_object *obj = container_of(kref,
-                                                 struct drm_gem_object,
-                                                 handlecount);
        struct drm_device *dev = obj->dev;
 
        /* Remove any name for this object */
@@ -528,6 +502,10 @@ void drm_gem_vm_open(struct vm_area_struct *vma)
        struct drm_gem_object *obj = vma->vm_private_data;
 
        drm_gem_object_reference(obj);
+
+       mutex_lock(&obj->dev->struct_mutex);
+       drm_vm_open_locked(vma);
+       mutex_unlock(&obj->dev->struct_mutex);
 }
 EXPORT_SYMBOL(drm_gem_vm_open);
 
@@ -535,7 +513,10 @@ void drm_gem_vm_close(struct vm_area_struct *vma)
 {
        struct drm_gem_object *obj = vma->vm_private_data;
 
-       drm_gem_object_unreference_unlocked(obj);
+       mutex_lock(&obj->dev->struct_mutex);
+       drm_vm_close_locked(vma);
+       drm_gem_object_unreference(obj);
+       mutex_unlock(&obj->dev->struct_mutex);
 }
 EXPORT_SYMBOL(drm_gem_vm_close);
 
index 2ef2c78272434dcb6b32dc17fd96e34dd7a8d959..974e970ce3f81ce014170b90ad1b8adc8a1dd5a9 100644 (file)
@@ -255,7 +255,7 @@ int drm_gem_one_name_info(int id, void *ptr, void *data)
 
        seq_printf(m, "%6d %8zd %7d %8d\n",
                   obj->name, obj->size,
-                  atomic_read(&obj->handlecount.refcount),
+                  atomic_read(&obj->handle_count),
                   atomic_read(&obj->refcount.refcount));
        return 0;
 }
index e20f78b542a756644a29693c8da5927dd72f5a7d..f5bd9e590c801b50b0d3629ddbadd6c54a69923a 100644 (file)
@@ -164,6 +164,8 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
        dev->hose = pdev->sysdata;
 #endif
 
+       mutex_lock(&drm_global_mutex);
+
        if ((ret = drm_fill_in_dev(dev, ent, driver))) {
                printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
                goto err_g2;
@@ -199,6 +201,7 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
                 driver->name, driver->major, driver->minor, driver->patchlevel,
                 driver->date, pci_name(pdev), dev->primary->index);
 
+       mutex_unlock(&drm_global_mutex);
        return 0;
 
 err_g4:
@@ -210,6 +213,7 @@ err_g2:
        pci_disable_device(pdev);
 err_g1:
        kfree(dev);
+       mutex_unlock(&drm_global_mutex);
        return ret;
 }
 EXPORT_SYMBOL(drm_get_pci_dev);
index 460e9a3afa8d4bd43ac752cbf2e1bae3e0f2b832..92d1d0fb7b7581821756ebcdf01089f55037198c 100644 (file)
@@ -53,6 +53,8 @@ int drm_get_platform_dev(struct platform_device *platdev,
        dev->platformdev = platdev;
        dev->dev = &platdev->dev;
 
+       mutex_lock(&drm_global_mutex);
+
        ret = drm_fill_in_dev(dev, NULL, driver);
 
        if (ret) {
@@ -87,6 +89,8 @@ int drm_get_platform_dev(struct platform_device *platdev,
 
        list_add_tail(&dev->driver_item, &driver->device_list);
 
+       mutex_unlock(&drm_global_mutex);
+
        DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
                 driver->name, driver->major, driver->minor, driver->patchlevel,
                 driver->date, dev->primary->index);
@@ -100,6 +104,7 @@ err_g2:
                drm_put_minor(&dev->control);
 err_g1:
        kfree(dev);
+       mutex_unlock(&drm_global_mutex);
        return ret;
 }
 EXPORT_SYMBOL(drm_get_platform_dev);
index 86118a742231b42fd711f18b64deb182a8d60d3a..85da4c40694cc8a99c2b3224e31af548b76e198a 100644 (file)
@@ -159,7 +159,7 @@ static ssize_t status_show(struct device *device,
        struct drm_connector *connector = to_drm_connector(device);
        enum drm_connector_status status;
 
-       status = connector->funcs->detect(connector);
+       status = connector->funcs->detect(connector, true);
        return snprintf(buf, PAGE_SIZE, "%s\n",
                        drm_get_connector_status_name(status));
 }
index fda67468e603b6169393b92bc4922afef8b4d8ce..5df450683aab8649511aaa96aaa759452b022fc0 100644 (file)
@@ -433,15 +433,7 @@ static void drm_vm_open(struct vm_area_struct *vma)
        mutex_unlock(&dev->struct_mutex);
 }
 
-/**
- * \c close method for all virtual memory types.
- *
- * \param vma virtual memory area.
- *
- * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
- * free it.
- */
-static void drm_vm_close(struct vm_area_struct *vma)
+void drm_vm_close_locked(struct vm_area_struct *vma)
 {
        struct drm_file *priv = vma->vm_file->private_data;
        struct drm_device *dev = priv->minor->dev;
@@ -451,7 +443,6 @@ static void drm_vm_close(struct vm_area_struct *vma)
                  vma->vm_start, vma->vm_end - vma->vm_start);
        atomic_dec(&dev->vma_count);
 
-       mutex_lock(&dev->struct_mutex);
        list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
                if (pt->vma == vma) {
                        list_del(&pt->head);
@@ -459,6 +450,23 @@ static void drm_vm_close(struct vm_area_struct *vma)
                        break;
                }
        }
+}
+
+/**
+ * \c close method for all virtual memory types.
+ *
+ * \param vma virtual memory area.
+ *
+ * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
+ * free it.
+ */
+static void drm_vm_close(struct vm_area_struct *vma)
+{
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->minor->dev;
+
+       mutex_lock(&dev->struct_mutex);
+       drm_vm_close_locked(vma);
        mutex_unlock(&dev->struct_mutex);
 }
 
index 61b4caf220fa83bd15815ea0f82b627f2d773727..fb07e73581e84467ab59ebe744e21ff6f712d2ce 100644 (file)
@@ -116,7 +116,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
 static const struct file_operations i810_buffer_fops = {
        .open = drm_open,
        .release = drm_release,
-       .unlocked_ioctl = drm_ioctl,
+       .unlocked_ioctl = i810_ioctl,
        .mmap = i810_mmap_buffers,
        .fasync = drm_fasync,
 };
index 671aa18415ac52d17164e79b4c2a9f287b02da0d..cc92c7e6236fbdffb86078290b5b93dffad2cbad 100644 (file)
@@ -118,7 +118,7 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
 static const struct file_operations i830_buffer_fops = {
        .open = drm_open,
        .release = drm_release,
-       .unlocked_ioctl = drm_ioctl,
+       .unlocked_ioctl = i830_ioctl,
        .mmap = i830_mmap_buffers,
        .fasync = drm_fasync,
 };
index 9d67b485303005771a090ea7a3c1e1c6f8b74e9e..2dd2c93ebfa35dace7916b38c6df18c835161978 100644 (file)
@@ -1787,9 +1787,9 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
                }
        }
 
-       div_u64(diff, diff1);
+       diff = div_u64(diff, diff1);
        ret = ((m * diff) + c);
-       div_u64(ret, 10);
+       ret = div_u64(ret, 10);
 
        dev_priv->last_count1 = total_count;
        dev_priv->last_time1 = now;
@@ -1858,7 +1858,7 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv)
 
        /* More magic constants... */
        diff = diff * 1181;
-       div_u64(diff, diffms * 10);
+       diff = div_u64(diff, diffms * 10);
        dev_priv->gfx_power = diff;
 }
 
@@ -2231,6 +2231,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        dev_priv->mchdev_lock = &mchdev_lock;
        spin_unlock(&mchdev_lock);
 
+       /* XXX Prevent module unload due to memory corruption bugs. */
+       __module_get(THIS_MODULE);
+
        return 0;
 
 out_workqueue_free:
index 216deb579785eb93e27e2ba57a0556471a13daf2..6dbe14cc4f7474aa57221c46fa59286caf362b9f 100644 (file)
@@ -170,6 +170,7 @@ static const struct pci_device_id pciidlist[] = {           /* aka */
        INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),              /* G45_G */
        INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),              /* G41_G */
        INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),              /* B43_G */
+       INTEL_VGA_DEVICE(0x2e92, &intel_g45_info),              /* B43_G.1 */
        INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
        INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
        INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
index 16fca1d1799a4211474a91e7fc52b605eceafbfc..90b1d6753b9d493d3ed8d2c45153bf2047b54d8f 100644 (file)
@@ -136,14 +136,12 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
                return -ENOMEM;
 
        ret = drm_gem_handle_create(file_priv, obj, &handle);
+       /* drop reference from allocate - handle holds it now */
+       drm_gem_object_unreference_unlocked(obj);
        if (ret) {
-               drm_gem_object_unreference_unlocked(obj);
                return ret;
        }
 
-       /* Sink the floating reference from kref_init(handlecount) */
-       drm_gem_object_handle_unreference_unlocked(obj);
-
        args->handle = handle;
        return 0;
 }
@@ -471,14 +469,17 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
                return -ENOENT;
        obj_priv = to_intel_bo(obj);
 
-       /* Bounds check source.
-        *
-        * XXX: This could use review for overflow issues...
-        */
-       if (args->offset > obj->size || args->size > obj->size ||
-           args->offset + args->size > obj->size) {
-               drm_gem_object_unreference_unlocked(obj);
-               return -EINVAL;
+       /* Bounds check source.  */
+       if (args->offset > obj->size || args->size > obj->size - args->offset) {
+               ret = -EINVAL;
+               goto err;
+       }
+
+       if (!access_ok(VERIFY_WRITE,
+                      (char __user *)(uintptr_t)args->data_ptr,
+                      args->size)) {
+               ret = -EFAULT;
+               goto err;
        }
 
        if (i915_gem_object_needs_bit17_swizzle(obj)) {
@@ -490,8 +491,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
                                                        file_priv);
        }
 
+err:
        drm_gem_object_unreference_unlocked(obj);
-
        return ret;
 }
 
@@ -580,8 +581,6 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
 
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
-       if (!access_ok(VERIFY_READ, user_data, remain))
-               return -EFAULT;
 
 
        mutex_lock(&dev->struct_mutex);
@@ -934,14 +933,17 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                return -ENOENT;
        obj_priv = to_intel_bo(obj);
 
-       /* Bounds check destination.
-        *
-        * XXX: This could use review for overflow issues...
-        */
-       if (args->offset > obj->size || args->size > obj->size ||
-           args->offset + args->size > obj->size) {
-               drm_gem_object_unreference_unlocked(obj);
-               return -EINVAL;
+       /* Bounds check destination. */
+       if (args->offset > obj->size || args->size > obj->size - args->offset) {
+               ret = -EINVAL;
+               goto err;
+       }
+
+       if (!access_ok(VERIFY_READ,
+                      (char __user *)(uintptr_t)args->data_ptr,
+                      args->size)) {
+               ret = -EFAULT;
+               goto err;
        }
 
        /* We can only do the GTT pwrite on untiled buffers, as otherwise
@@ -975,8 +977,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                DRM_INFO("pwrite failed %d\n", ret);
 #endif
 
+err:
        drm_gem_object_unreference_unlocked(obj);
-
        return ret;
 }
 
@@ -2351,14 +2353,21 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
 
        reg->obj = obj;
 
-       if (IS_GEN6(dev))
+       switch (INTEL_INFO(dev)->gen) {
+       case 6:
                sandybridge_write_fence_reg(reg);
-       else if (IS_I965G(dev))
+               break;
+       case 5:
+       case 4:
                i965_write_fence_reg(reg);
-       else if (IS_I9XX(dev))
+               break;
+       case 3:
                i915_write_fence_reg(reg);
-       else
+               break;
+       case 2:
                i830_write_fence_reg(reg);
+               break;
+       }
 
        trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
                        obj_priv->tiling_mode);
@@ -2381,22 +2390,26 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        struct drm_i915_fence_reg *reg =
                &dev_priv->fence_regs[obj_priv->fence_reg];
+       uint32_t fence_reg;
 
-       if (IS_GEN6(dev)) {
+       switch (INTEL_INFO(dev)->gen) {
+       case 6:
                I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
                             (obj_priv->fence_reg * 8), 0);
-       } else if (IS_I965G(dev)) {
+               break;
+       case 5:
+       case 4:
                I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
-       } else {
-               uint32_t fence_reg;
-
-               if (obj_priv->fence_reg < 8)
-                       fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
+               break;
+       case 3:
+               if (obj_priv->fence_reg >= 8)
+                       fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
                else
-                       fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
-                                                      8) * 4;
+       case 2:
+                       fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
 
                I915_WRITE(fence_reg, 0);
+               break;
        }
 
        reg->obj = NULL;
@@ -3247,6 +3260,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
                                  (int) reloc->offset,
                                  reloc->read_domains,
                                  reloc->write_domain);
+                       drm_gem_object_unreference(target_obj);
+                       i915_gem_object_unpin(obj);
                        return -EINVAL;
                }
                if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
index 72cae3cccad8802641d973542ab6c440445b81db..5c428fa3e0b34049e94786184b646a98ee87c06d 100644 (file)
@@ -79,6 +79,7 @@ mark_free(struct drm_i915_gem_object *obj_priv,
           struct list_head *unwind)
 {
        list_add(&obj_priv->evict_list, unwind);
+       drm_gem_object_reference(&obj_priv->base);
        return drm_mm_scan_add_block(obj_priv->gtt_space);
 }
 
@@ -92,7 +93,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct list_head eviction_list, unwind_list;
-       struct drm_i915_gem_object *obj_priv, *tmp_obj_priv;
+       struct drm_i915_gem_object *obj_priv;
        struct list_head *render_iter, *bsd_iter;
        int ret = 0;
 
@@ -165,6 +166,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
        list_for_each_entry(obj_priv, &unwind_list, evict_list) {
                ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
                BUG_ON(ret);
+               drm_gem_object_unreference(&obj_priv->base);
        }
 
        /* We expect the caller to unpin, evict all and try again, or give up.
@@ -173,36 +175,34 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
        return -ENOSPC;
 
 found:
+       /* drm_mm doesn't allow any other other operations while
+        * scanning, therefore store to be evicted objects on a
+        * temporary list. */
        INIT_LIST_HEAD(&eviction_list);
-       list_for_each_entry_safe(obj_priv, tmp_obj_priv,
-                                &unwind_list, evict_list) {
+       while (!list_empty(&unwind_list)) {
+               obj_priv = list_first_entry(&unwind_list,
+                                           struct drm_i915_gem_object,
+                                           evict_list);
                if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
-                       /* drm_mm doesn't allow any other other operations while
-                        * scanning, therefore store to be evicted objects on a
-                        * temporary list. */
                        list_move(&obj_priv->evict_list, &eviction_list);
+                       continue;
                }
+               list_del(&obj_priv->evict_list);
+               drm_gem_object_unreference(&obj_priv->base);
        }
 
        /* Unbinding will emit any required flushes */
-       list_for_each_entry_safe(obj_priv, tmp_obj_priv,
-                                &eviction_list, evict_list) {
-#if WATCH_LRU
-               DRM_INFO("%s: evicting %p\n", __func__, obj);
-#endif
-               ret = i915_gem_object_unbind(&obj_priv->base);
-               if (ret)
-                       return ret;
+       while (!list_empty(&eviction_list)) {
+               obj_priv = list_first_entry(&eviction_list,
+                                           struct drm_i915_gem_object,
+                                           evict_list);
+               if (ret == 0)
+                       ret = i915_gem_object_unbind(&obj_priv->base);
+               list_del(&obj_priv->evict_list);
+               drm_gem_object_unreference(&obj_priv->base);
        }
 
-       /* The just created free hole should be on the top of the free stack
-        * maintained by drm_mm, so this BUG_ON actually executes in O(1).
-        * Furthermore all accessed data has just recently been used, so it
-        * should be really fast, too. */
-       BUG_ON(!drm_mm_search_free(&dev_priv->mm.gtt_space, min_size,
-                                  alignment, 0));
-
-       return 0;
+       return ret;
 }
 
 int
index 59457e83b011aa3bbac119faf6836a66df7affa5..744225ebb4b25d5988fab454441de95d7db94115 100644 (file)
@@ -1350,17 +1350,25 @@ void i915_hangcheck_elapsed(unsigned long data)
                i915_seqno_passed(i915_get_gem_seqno(dev,
                                &dev_priv->render_ring),
                        i915_get_tail_request(dev)->seqno)) {
+               bool missed_wakeup = false;
+
                dev_priv->hangcheck_count = 0;
 
                /* Issue a wake-up to catch stuck h/w. */
-               if (dev_priv->render_ring.waiting_gem_seqno |
-                   dev_priv->bsd_ring.waiting_gem_seqno) {
-                       DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
-                       if (dev_priv->render_ring.waiting_gem_seqno)
-                               DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
-                       if (dev_priv->bsd_ring.waiting_gem_seqno)
-                               DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+               if (dev_priv->render_ring.waiting_gem_seqno &&
+                   waitqueue_active(&dev_priv->render_ring.irq_queue)) {
+                       DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
+                       missed_wakeup = true;
+               }
+
+               if (dev_priv->bsd_ring.waiting_gem_seqno &&
+                   waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
+                       DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+                       missed_wakeup = true;
                }
+
+               if (missed_wakeup)
+                       DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
                return;
        }
 
index d094e91292234507c82ea57cb57947a019318aa6..4f5e15577e89e3e6f7005cd92f9f87ab0636b4eb 100644 (file)
 #define  WM1_LP_SR_EN          (1<<31)
 #define  WM1_LP_LATENCY_SHIFT  24
 #define  WM1_LP_LATENCY_MASK   (0x7f<<24)
+#define  WM1_LP_FBC_LP1_MASK   (0xf<<20)
+#define  WM1_LP_FBC_LP1_SHIFT  20
 #define  WM1_LP_SR_MASK                (0x1ff<<8)
 #define  WM1_LP_SR_SHIFT       8
 #define  WM1_LP_CURSOR_MASK    (0x3f)
+#define WM2_LP_ILK             0x4510c
+#define  WM2_LP_EN             (1<<31)
+#define WM3_LP_ILK             0x45110
+#define  WM3_LP_EN             (1<<31)
+#define WM1S_LP_ILK            0x45120
+#define  WM1S_LP_EN            (1<<31)
 
 /* Memory latency timer register */
 #define MLTR_ILK               0x11222
index 2c6b98f2440eff4fdee79ba3c7c95a5bbdf07c4a..31f08581e93a46dbdc2ca1cfa563faec31e9ffc5 100644 (file)
@@ -789,16 +789,25 @@ int i915_save_state(struct drm_device *dev)
                dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
 
        /* Fences */
-       if (IS_I965G(dev)) {
+       switch (INTEL_INFO(dev)->gen) {
+       case 6:
+               for (i = 0; i < 16; i++)
+                       dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+               break;
+       case 5:
+       case 4:
                for (i = 0; i < 16; i++)
                        dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
-       } else {
-               for (i = 0; i < 8; i++)
-                       dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
-
+               break;
+       case 3:
                if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
                        for (i = 0; i < 8; i++)
                                dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+       case 2:
+               for (i = 0; i < 8; i++)
+                       dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+               break;
+
        }
 
        return 0;
@@ -815,15 +824,24 @@ int i915_restore_state(struct drm_device *dev)
        I915_WRITE(HWS_PGA, dev_priv->saveHWS);
 
        /* Fences */
-       if (IS_I965G(dev)) {
+       switch (INTEL_INFO(dev)->gen) {
+       case 6:
+               for (i = 0; i < 16; i++)
+                       I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
+               break;
+       case 5:
+       case 4:
                for (i = 0; i < 16; i++)
                        I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
-       } else {
-               for (i = 0; i < 8; i++)
-                       I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
+               break;
+       case 3:
+       case 2:
                if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
                        for (i = 0; i < 8; i++)
                                I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
+               for (i = 0; i < 8; i++)
+                       I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
+               break;
        }
 
        i915_restore_display(dev);
index 4b7735196cd5a516eb3bb34b9a843450da3f1fe4..197d4f32585a59b5b336328b470fa038bc922781 100644 (file)
@@ -188,7 +188,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
 
        if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
                     1000, 1))
-               DRM_ERROR("timed out waiting for FORCE_TRIGGER");
+               DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
 
        if (turn_off_dac) {
                I915_WRITE(PCH_ADPA, temp);
@@ -245,7 +245,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
                if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
                              CRT_HOTPLUG_FORCE_DETECT) == 0,
                             1000, 1))
-                       DRM_ERROR("timed out waiting for FORCE_DETECT to go off");
+                       DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
        }
 
        stat = I915_READ(PORT_HOTPLUG_STAT);
@@ -400,7 +400,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
        return status;
 }
 
-static enum drm_connector_status intel_crt_detect(struct drm_connector *connector)
+static enum drm_connector_status
+intel_crt_detect(struct drm_connector *connector, bool force)
 {
        struct drm_device *dev = connector->dev;
        struct drm_encoder *encoder = intel_attached_encoder(connector);
@@ -419,6 +420,9 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
        if (intel_crt_detect_ddc(encoder))
                return connector_status_connected;
 
+       if (!force)
+               return connector->status;
+
        /* for pre-945g platforms use load detect */
        if (encoder->crtc && encoder->crtc->enabled) {
                status = intel_crt_load_detect(encoder->crtc, intel_encoder);
index 40cc5da264a9bdf5520909ba39ca2cb21c1ca5ca..979228594599a28ac7737762679f1c97fd5981bf 100644 (file)
@@ -1013,8 +1013,8 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
                DRM_DEBUG_KMS("vblank wait timed out\n");
 }
 
-/**
- * intel_wait_for_vblank_off - wait for vblank after disabling a pipe
+/*
+ * intel_wait_for_pipe_off - wait for pipe to turn off
  * @dev: drm device
  * @pipe: pipe to wait for
  *
@@ -1022,25 +1022,39 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
  * spinning on the vblank interrupt status bit, since we won't actually
  * see an interrupt when the pipe is disabled.
  *
- * So this function waits for the display line value to settle (it
- * usually ends up stopping at the start of the next frame).
+ * On Gen4 and above:
+ *   wait for the pipe register state bit to turn off
+ *
+ * Otherwise:
+ *   wait for the display line value to settle (it usually
+ *   ends up stopping at the start of the next frame).
+ *  
  */
-void intel_wait_for_vblank_off(struct drm_device *dev, int pipe)
+static void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL);
-       unsigned long timeout = jiffies + msecs_to_jiffies(100);
-       u32 last_line;
-
-       /* Wait for the display line to settle */
-       do {
-               last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK;
-               mdelay(5);
-       } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) &&
-                time_after(timeout, jiffies));
-
-       if (time_after(jiffies, timeout))
-               DRM_DEBUG_KMS("vblank wait timed out\n");
+
+       if (INTEL_INFO(dev)->gen >= 4) {
+               int pipeconf_reg = (pipe == 0 ? PIPEACONF : PIPEBCONF);
+
+               /* Wait for the Pipe State to go off */
+               if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0,
+                            100, 0))
+                       DRM_DEBUG_KMS("pipe_off wait timed out\n");
+       } else {
+               u32 last_line;
+               int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL);
+               unsigned long timeout = jiffies + msecs_to_jiffies(100);
+
+               /* Wait for the display line to settle */
+               do {
+                       last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK;
+                       mdelay(5);
+               } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) &&
+                        time_after(timeout, jiffies));
+               if (time_after(jiffies, timeout))
+                       DRM_DEBUG_KMS("pipe_off wait timed out\n");
+       }
 }
 
 /* Parameters have changed, update FBC info */
@@ -2328,13 +2342,13 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
                        I915_READ(dspbase_reg);
                }
 
-               /* Wait for vblank for the disable to take effect */
-               intel_wait_for_vblank_off(dev, pipe);
-
                /* Don't disable pipe A or pipe A PLLs if needed */
                if (pipeconf_reg == PIPEACONF &&
-                   (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+                   (dev_priv->quirks & QUIRK_PIPEA_FORCE)) {
+                       /* Wait for vblank for the disable to take effect */
+                       intel_wait_for_vblank(dev, pipe);
                        goto skip_pipe_off;
+               }
 
                /* Next, disable display pipes */
                temp = I915_READ(pipeconf_reg);
@@ -2343,8 +2357,8 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
                        I915_READ(pipeconf_reg);
                }
 
-               /* Wait for vblank for the disable to take effect. */
-               intel_wait_for_vblank_off(dev, pipe);
+               /* Wait for the pipe to turn off */
+               intel_wait_for_pipe_off(dev, pipe);
 
                temp = I915_READ(dpll_reg);
                if ((temp & DPLL_VCO_ENABLE) != 0) {
@@ -2463,11 +2477,19 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
                                  struct drm_display_mode *adjusted_mode)
 {
        struct drm_device *dev = crtc->dev;
+
        if (HAS_PCH_SPLIT(dev)) {
                /* FDI link clock is fixed at 2.7G */
                if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
                        return false;
        }
+
+       /* XXX some encoders set the crtcinfo, others don't.
+        * Obviously we need some form of conflict resolution here...
+        */
+       if (adjusted_mode->crtc_htotal == 0)
+               drm_mode_set_crtcinfo(adjusted_mode, 0);
+
        return true;
 }
 
@@ -2767,14 +2789,8 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
        /* Don't promote wm_size to unsigned... */
        if (wm_size > (long)wm->max_wm)
                wm_size = wm->max_wm;
-       if (wm_size <= 0) {
+       if (wm_size <= 0)
                wm_size = wm->default_wm;
-               DRM_ERROR("Insufficient FIFO for plane, expect flickering:"
-                         " entries required = %ld, available = %lu.\n",
-                         entries_required + wm->guard_size,
-                         wm->fifo_size);
-       }
-
        return wm_size;
 }
 
@@ -3388,8 +3404,7 @@ static void ironlake_update_wm(struct drm_device *dev,  int planea_clock,
                reg_value = I915_READ(WM1_LP_ILK);
                reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
                               WM1_LP_CURSOR_MASK);
-               reg_value |= WM1_LP_SR_EN |
-                            (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
+               reg_value |= (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
                             (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
 
                I915_WRITE(WM1_LP_ILK, reg_value);
@@ -5675,6 +5690,9 @@ void intel_init_clock_gating(struct drm_device *dev)
                        I915_WRITE(DISP_ARB_CTL,
                                        (I915_READ(DISP_ARB_CTL) |
                                                DISP_FBC_WM_DIS));
+               I915_WRITE(WM3_LP_ILK, 0);
+               I915_WRITE(WM2_LP_ILK, 0);
+               I915_WRITE(WM1_LP_ILK, 0);
                }
                /*
                 * Based on the document from hardware guys the following bits
@@ -5696,8 +5714,7 @@ void intel_init_clock_gating(struct drm_device *dev)
                                   ILK_DPFC_DIS2 |
                                   ILK_CLK_FBC);
                }
-               if (IS_GEN6(dev))
-                       return;
+               return;
        } else if (IS_G4X(dev)) {
                uint32_t dspclk_gate;
                I915_WRITE(RENCLK_GATE_D1, 0);
@@ -5758,11 +5775,9 @@ void intel_init_clock_gating(struct drm_device *dev)
                                OUT_RING(MI_FLUSH);
                                ADVANCE_LP_RING();
                        }
-               } else {
+               } else
                        DRM_DEBUG_KMS("Failed to allocate render context."
-                                     "Disable RC6\n");
-                       return;
-               }
+                                      "Disable RC6\n");
        }
 
        if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
index 51d142939a26e9abe76fdfe8a90aa94ff0f3b612..9ab8708ac6ba1370cea75680d6a660daa5f9b147 100644 (file)
@@ -1138,18 +1138,14 @@ static bool
 intel_dp_set_link_train(struct intel_dp *intel_dp,
                        uint32_t dp_reg_value,
                        uint8_t dp_train_pat,
-                       uint8_t train_set[4],
-                       bool first)
+                       uint8_t train_set[4])
 {
        struct drm_device *dev = intel_dp->base.enc.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
        int ret;
 
        I915_WRITE(intel_dp->output_reg, dp_reg_value);
        POSTING_READ(intel_dp->output_reg);
-       if (first)
-               intel_wait_for_vblank(dev, intel_crtc->pipe);
 
        intel_dp_aux_native_write_1(intel_dp,
                                    DP_TRAINING_PATTERN_SET,
@@ -1174,10 +1170,15 @@ intel_dp_link_train(struct intel_dp *intel_dp)
        uint8_t voltage;
        bool clock_recovery = false;
        bool channel_eq = false;
-       bool first = true;
        int tries;
        u32 reg;
        uint32_t DP = intel_dp->DP;
+       struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
+
+       /* Enable output, wait for it to become active */
+       I915_WRITE(intel_dp->output_reg, intel_dp->DP);
+       POSTING_READ(intel_dp->output_reg);
+       intel_wait_for_vblank(dev, intel_crtc->pipe);
 
        /* Write the link configuration data */
        intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
@@ -1210,9 +1211,8 @@ intel_dp_link_train(struct intel_dp *intel_dp)
                        reg = DP | DP_LINK_TRAIN_PAT_1;
 
                if (!intel_dp_set_link_train(intel_dp, reg,
-                                            DP_TRAINING_PATTERN_1, train_set, first))
+                                            DP_TRAINING_PATTERN_1, train_set))
                        break;
-               first = false;
                /* Set training pattern 1 */
 
                udelay(100);
@@ -1266,8 +1266,7 @@ intel_dp_link_train(struct intel_dp *intel_dp)
 
                /* channel eq pattern */
                if (!intel_dp_set_link_train(intel_dp, reg,
-                                            DP_TRAINING_PATTERN_2, train_set,
-                                            false))
+                                            DP_TRAINING_PATTERN_2, train_set))
                        break;
 
                udelay(400);
@@ -1386,7 +1385,7 @@ ironlake_dp_detect(struct drm_connector *connector)
  * \return false if DP port is disconnected.
  */
 static enum drm_connector_status
-intel_dp_detect(struct drm_connector *connector)
+intel_dp_detect(struct drm_connector *connector, bool force)
 {
        struct drm_encoder *encoder = intel_attached_encoder(connector);
        struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
index ad312ca6b3e570125732168b3c2f670467264beb..8828b3ac6414eabff93134e34a41ae5c38d1cd34 100644 (file)
@@ -229,7 +229,6 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
                                                    struct drm_crtc *crtc);
 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
-extern void intel_wait_for_vblank_off(struct drm_device *dev, int pipe);
 extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
 extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
 extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
index a399f4b2c1c526cdfd64aa2d476f6b97c3fd69d2..7c9ec1472d46ab3cbb08f6bffc8257af952a64b0 100644 (file)
@@ -221,7 +221,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
  *
  * Unimplemented.
  */
-static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
+static enum drm_connector_status
+intel_dvo_detect(struct drm_connector *connector, bool force)
 {
        struct drm_encoder *encoder = intel_attached_encoder(connector);
        struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
index 7bdc96256bf55b6e87d102377b428a871792be91..b61966c126d3e3839d33be6c8df2c0170c5d1376 100644 (file)
@@ -237,8 +237,10 @@ int intel_fbdev_destroy(struct drm_device *dev,
        drm_fb_helper_fini(&ifbdev->helper);
 
        drm_framebuffer_cleanup(&ifb->base);
-       if (ifb->obj)
+       if (ifb->obj) {
                drm_gem_object_unreference(ifb->obj);
+               ifb->obj = NULL;
+       }
 
        return 0;
 }
index ccd4c97e652492f19d85abf532d56aafcab6968a..926934a482ec085c63256567e27f0309b51b24cf 100644 (file)
@@ -139,7 +139,7 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
 }
 
 static enum drm_connector_status
-intel_hdmi_detect(struct drm_connector *connector)
+intel_hdmi_detect(struct drm_connector *connector, bool force)
 {
        struct drm_encoder *encoder = intel_attached_encoder(connector);
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
index b819c10811470775b19826e407ba75fffd6a9b68..6ec39a86ed06d2bd6e716611f3ab4d384d950636 100644 (file)
@@ -445,7 +445,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
  * connected and closed means disconnected.  We also send hotplug events as
  * needed, using lid status notification from the input layer.
  */
-static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector)
+static enum drm_connector_status
+intel_lvds_detect(struct drm_connector *connector, bool force)
 {
        struct drm_device *dev = connector->dev;
        enum drm_connector_status status = connector_status_connected;
@@ -540,7 +541,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
         * the LID nofication event.
         */
        if (connector)
-               connector->status = connector->funcs->detect(connector);
+               connector->status = connector->funcs->detect(connector,
+                                                            false);
+
        /* Don't force modeset on machines where it causes a GPU lockup */
        if (dmi_check_system(intel_no_modeset_on_lid))
                return NOTIFY_OK;
@@ -875,8 +878,6 @@ void intel_lvds_init(struct drm_device *dev)
 
        intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
        intel_encoder->crtc_mask = (1 << 1);
-       if (IS_I965G(dev))
-               intel_encoder->crtc_mask |= (1 << 0);
        drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
        drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
        connector->display_info.subpixel_order = SubPixelHorizontalRGB;
index e3b7a7ee39cb97b390048c1a5ce9187b94b6e51d..ee73e428a84a800dd8d70a1747457033f7e8da8e 100644 (file)
@@ -1417,7 +1417,7 @@ intel_analog_is_connected(struct drm_device *dev)
        if (!analog_connector)
                return false;
 
-       if (analog_connector->funcs->detect(analog_connector) ==
+       if (analog_connector->funcs->detect(analog_connector, false) ==
                        connector_status_disconnected)
                return false;
 
@@ -1486,7 +1486,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
        return status;
 }
 
-static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector)
+static enum drm_connector_status
+intel_sdvo_detect(struct drm_connector *connector, bool force)
 {
        uint16_t response;
        struct drm_encoder *encoder = intel_attached_encoder(connector);
@@ -2169,8 +2170,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
         return true;
 
 err:
-       intel_sdvo_destroy_enhance_property(connector);
-       kfree(intel_sdvo_connector);
+       intel_sdvo_destroy(connector);
        return false;
 }
 
@@ -2242,8 +2242,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
        return true;
 
 err:
-       intel_sdvo_destroy_enhance_property(connector);
-       kfree(intel_sdvo_connector);
+       intel_sdvo_destroy(connector);
        return false;
 }
 
@@ -2521,11 +2520,10 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
                uint16_t response;
        } enhancements;
 
-       if (!intel_sdvo_get_value(intel_sdvo,
-                                 SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
-                                 &enhancements, sizeof(enhancements)))
-               return false;
-
+       enhancements.response = 0;
+       intel_sdvo_get_value(intel_sdvo,
+                            SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
+                            &enhancements, sizeof(enhancements));
        if (enhancements.response == 0) {
                DRM_DEBUG_KMS("No enhancement is supported\n");
                return true;
index c671f60ce80bac917a61c1c60cdb02e692afcb85..4a117e318a73a0a44c7ae4cc0be3447d11a403da 100644 (file)
@@ -1341,7 +1341,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
  * we have a pipe programmed in order to probe the TV.
  */
 static enum drm_connector_status
-intel_tv_detect(struct drm_connector *connector)
+intel_tv_detect(struct drm_connector *connector, bool force)
 {
        struct drm_display_mode mode;
        struct drm_encoder *encoder = intel_attached_encoder(connector);
@@ -1353,7 +1353,7 @@ intel_tv_detect(struct drm_connector *connector)
 
        if (encoder->crtc && encoder->crtc->enabled) {
                type = intel_tv_detect_type(intel_tv);
-       } else {
+       } else if (force) {
                struct drm_crtc *crtc;
                int dpms_mode;
 
@@ -1364,10 +1364,9 @@ intel_tv_detect(struct drm_connector *connector)
                        intel_release_load_detect_pipe(&intel_tv->base, connector,
                                                       dpms_mode);
                } else
-                       type = -1;
-       }
-
-       intel_tv->type = type;
+                       return connector_status_unknown;
+       } else
+               return connector->status;
 
        if (type < 0)
                return connector_status_disconnected;
index a1473fff06ac2d61bd3f629dcc9527be5975f165..fc737037f751c3690dfb09239e3439df1fa4191c 100644 (file)
@@ -168,7 +168,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
 }
 
 static enum drm_connector_status
-nouveau_connector_detect(struct drm_connector *connector)
+nouveau_connector_detect(struct drm_connector *connector, bool force)
 {
        struct drm_device *dev = connector->dev;
        struct nouveau_connector *nv_connector = nouveau_connector(connector);
@@ -246,7 +246,7 @@ detect_analog:
 }
 
 static enum drm_connector_status
-nouveau_connector_detect_lvds(struct drm_connector *connector)
+nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
 {
        struct drm_device *dev = connector->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -267,7 +267,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector)
 
        /* Try retrieving EDID via DDC */
        if (!dev_priv->vbios.fp_no_ddc) {
-               status = nouveau_connector_detect(connector);
+               status = nouveau_connector_detect(connector, force);
                if (status == connector_status_connected)
                        goto out;
        }
@@ -558,8 +558,10 @@ nouveau_connector_get_modes(struct drm_connector *connector)
        if (nv_encoder->dcb->type == OUTPUT_LVDS &&
            (nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
             dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) {
-               nv_connector->native_mode = drm_mode_create(dev);
-               nouveau_bios_fp_mode(dev, nv_connector->native_mode);
+               struct drm_display_mode mode;
+
+               nouveau_bios_fp_mode(dev, &mode);
+               nv_connector->native_mode = drm_mode_duplicate(dev, &mode);
        }
 
        /* Find the native mode if this is a digital panel, if we didn't
index ead7b8fc53fcbcd473dbdc7a97d893a3e2e9c454..19620a6709f55c00e97efd5d2f816705788420f8 100644 (file)
@@ -167,11 +167,9 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
                goto out;
 
        ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
+       /* drop reference from allocate - handle holds it now */
+       drm_gem_object_unreference_unlocked(nvbo->gem);
 out:
-       drm_gem_object_handle_unreference_unlocked(nvbo->gem);
-
-       if (ret)
-               drm_gem_object_unreference_unlocked(nvbo->gem);
        return ret;
 }
 
index 1bc72c3190a9dcfe9b8f70f16087a231560d9b08..fe359a239df343437cce0b0c79d2692c559ffb9e 100644 (file)
@@ -4999,7 +4999,7 @@ typedef struct _SW_I2C_IO_DATA_PARAMETERS
 #define SW_I2C_CNTL_WRITE1BIT 6
 
 //==============================VESA definition Portion===============================
-#define VESA_OEM_PRODUCT_REV                               '01.00'
+#define VESA_OEM_PRODUCT_REV                               "01.00"
 #define VESA_MODE_ATTRIBUTE_MODE_SUPPORT            0xBB       //refer to VBE spec p.32, no TTY support
 #define VESA_MODE_WIN_ATTRIBUTE                                                     7
 #define VESA_WIN_SIZE                                                                                       64
index 464a81a1990f6f274d46bd0535283bb315680305..cd0290f946cff51e8aa8c702dda5e650ec5f9af0 100644 (file)
@@ -539,14 +539,15 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                                        pll->algo = PLL_ALGO_LEGACY;
                                        pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
                                }
-                               /* There is some evidence (often anecdotal) that RV515 LVDS
+                               /* There is some evidence (often anecdotal) that RV515/RV620 LVDS
                                 * (on some boards at least) prefers the legacy algo.  I'm not
                                 * sure whether this should handled generically or on a
                                 * case-by-case quirk basis.  Both algos should work fine in the
                                 * majority of cases.
                                 */
                                if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) &&
-                                   (rdev->family == CHIP_RV515)) {
+                                   ((rdev->family == CHIP_RV515) ||
+                                    (rdev->family == CHIP_RV620))) {
                                        /* allow the user to overrride just in case */
                                        if (radeon_new_pll == 1)
                                                pll->algo = PLL_ALGO_NEW;
index b8b7f010b25f8df49e20329932c1735482c03ecf..2f93d46ae69ad58dfb90ea5db02021a402506ae5 100644 (file)
@@ -1137,7 +1137,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
 
                WREG32(RCU_IND_INDEX, 0x203);
                efuse_straps_3 = RREG32(RCU_IND_DATA);
-               efuse_box_bit_127_124 = (u8)(efuse_straps_3 & 0xF0000000) >> 28;
+               efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28);
 
                switch(efuse_box_bit_127_124) {
                case 0x0:
@@ -1160,14 +1160,25 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                                                                        EVERGREEN_MAX_BACKENDS_MASK));
                        break;
                }
-       } else
-               gb_backend_map =
-                       evergreen_get_tile_pipe_to_backend_map(rdev,
-                                                              rdev->config.evergreen.max_tile_pipes,
-                                                              rdev->config.evergreen.max_backends,
-                                                              ((EVERGREEN_MAX_BACKENDS_MASK <<
-                                                                rdev->config.evergreen.max_backends) &
-                                                               EVERGREEN_MAX_BACKENDS_MASK));
+       } else {
+               switch (rdev->family) {
+               case CHIP_CYPRESS:
+               case CHIP_HEMLOCK:
+                       gb_backend_map = 0x66442200;
+                       break;
+               case CHIP_JUNIPER:
+                       gb_backend_map = 0x00006420;
+                       break;
+               default:
+                       gb_backend_map =
+                               evergreen_get_tile_pipe_to_backend_map(rdev,
+                                                                      rdev->config.evergreen.max_tile_pipes,
+                                                                      rdev->config.evergreen.max_backends,
+                                                                      ((EVERGREEN_MAX_BACKENDS_MASK <<
+                                                                        rdev->config.evergreen.max_backends) &
+                                                                       EVERGREEN_MAX_BACKENDS_MASK));
+               }
+       }
 
        rdev->config.evergreen.tile_config = gb_addr_config;
        WREG32(GB_BACKEND_MAP, gb_backend_map);
@@ -1396,6 +1407,7 @@ int evergreen_mc_init(struct radeon_device *rdev)
        rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
        rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
        rdev->mc.visible_vram_size = rdev->mc.aper_size;
+       rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
        r600_vram_gtt_location(rdev, &rdev->mc);
        radeon_update_bandwidth_info(rdev);
 
@@ -1509,7 +1521,7 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
 {
        u32 tmp;
 
-       WREG32(CP_INT_CNTL, 0);
+       WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
        WREG32(GRBM_INT_CNTL, 0);
        WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
        WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
index e817a0bb5eb4a71550d0c9f6f8697cd80c31914e..e59422320bb6df9873fbf88f9e29d34fdc412110 100644 (file)
@@ -1030,6 +1030,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
                return r;
        }
        rdev->cp.ready = true;
+       rdev->mc.active_vram_size = rdev->mc.real_vram_size;
        return 0;
 }
 
@@ -1047,6 +1048,7 @@ void r100_cp_fini(struct radeon_device *rdev)
 void r100_cp_disable(struct radeon_device *rdev)
 {
        /* Disable ring */
+       rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
        rdev->cp.ready = false;
        WREG32(RADEON_CP_CSQ_MODE, 0);
        WREG32(RADEON_CP_CSQ_CNTL, 0);
@@ -2020,18 +2022,7 @@ bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *l
                return false;
        }
        elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
-       if (elapsed >= 3000) {
-               /* very likely the improbable case where current
-                * rptr is equal to last recorded, a while ago, rptr
-                * this is more likely a false positive update tracking
-                * information which should force us to be recall at
-                * latter point
-                */
-               lockup->last_cp_rptr = cp->rptr;
-               lockup->last_jiffies = jiffies;
-               return false;
-       }
-       if (elapsed >= 1000) {
+       if (elapsed >= 10000) {
                dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
                return true;
        }
@@ -2306,6 +2297,7 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
        /* FIXME we don't use the second aperture yet when we could use it */
        if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
                rdev->mc.visible_vram_size = rdev->mc.aper_size;
+       rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
        config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
        if (rdev->flags & RADEON_IS_IGP) {
                uint32_t tom;
@@ -3308,13 +3300,14 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
        unsigned long size;
        unsigned prim_walk;
        unsigned nverts;
+       unsigned num_cb = track->num_cb;
 
-       for (i = 0; i < track->num_cb; i++) {
+       if (!track->zb_cb_clear && !track->color_channel_mask &&
+           !track->blend_read_enable)
+               num_cb = 0;
+
+       for (i = 0; i < num_cb; i++) {
                if (track->cb[i].robj == NULL) {
-                       if (!(track->zb_cb_clear || track->color_channel_mask ||
-                             track->blend_read_enable)) {
-                               continue;
-                       }
                        DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
                        return -EINVAL;
                }
index afc18d87fdca7409e4c7462fe3a1b03eeaa6d3ca..7b65e4efe8af61e2df5404ea52c468fe8ee564db 100644 (file)
@@ -1248,6 +1248,7 @@ int r600_mc_init(struct radeon_device *rdev)
        rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
        rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
        rdev->mc.visible_vram_size = rdev->mc.aper_size;
+       rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
        r600_vram_gtt_location(rdev, &rdev->mc);
 
        if (rdev->flags & RADEON_IS_IGP) {
@@ -1917,6 +1918,7 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
  */
 void r600_cp_stop(struct radeon_device *rdev)
 {
+       rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
        WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
 }
 
@@ -2729,7 +2731,7 @@ int r600_ib_test(struct radeon_device *rdev)
        if (i < rdev->usec_timeout) {
                DRM_INFO("ib test succeeded in %u usecs\n", i);
        } else {
-               DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
+               DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
                          scratch, tmp);
                r = -EINVAL;
        }
@@ -2910,7 +2912,7 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
 {
        u32 tmp;
 
-       WREG32(CP_INT_CNTL, 0);
+       WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
        WREG32(GRBM_INT_CNTL, 0);
        WREG32(DxMODE_INT_MASK, 0);
        if (ASIC_IS_DCE3(rdev)) {
@@ -3528,7 +3530,8 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
        /* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
         * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
         */
-       if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) {
+       if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
+           rdev->vram_scratch.ptr) {
                void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
                u32 tmp;
 
index d13622ae74e9799d75045cd282d2797071a23d21..3473c00781ffaaac06cab0c520231a5a66a21111 100644 (file)
@@ -1,3 +1,28 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
 #include "drmP.h"
 #include "drm.h"
 #include "radeon_drm.h"
@@ -507,6 +532,7 @@ int r600_blit_init(struct radeon_device *rdev)
        memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4);
        radeon_bo_kunmap(rdev->r600_blit.shader_obj);
        radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+       rdev->mc.active_vram_size = rdev->mc.real_vram_size;
        return 0;
 }
 
@@ -514,6 +540,7 @@ void r600_blit_fini(struct radeon_device *rdev)
 {
        int r;
 
+       rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
        if (rdev->r600_blit.shader_obj == NULL)
                return;
        /* If we can't reserve the bo, unref should be enough to destroy
index fdc3b378cbb0d78542987e111fc1693462dc2b1b..f437d36dd98c2f33d195f4fdaaae034c535be6e8 100644 (file)
@@ -1,3 +1,27 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
 
 #ifndef R600_BLIT_SHADERS_H
 #define R600_BLIT_SHADERS_H
index d8864949e387a30d838e59cb5eb6d95a4549b1e3..250a3a918193e9821aa6f5836d463553cf6f903d 100644 (file)
@@ -1170,9 +1170,8 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 i
        /* using get ib will give us the offset into the mipmap bo */
        word0 = radeon_get_ib_value(p, idx + 3) << 8;
        if ((mipmap_size + word0) > radeon_bo_size(mipmap)) {
-               dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
-                       w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));
-               return -EINVAL;
+               /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
+                 w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));*/
        }
        return 0;
 }
index a168d644bf9e96724b5e717f2a8777bb8354f5e5..9ff38c99a6ea0e568f2567c0a7ad34b06e60e512 100644 (file)
@@ -344,6 +344,7 @@ struct radeon_mc {
         * about vram size near mc fb location */
        u64                     mc_vram_size;
        u64                     visible_vram_size;
+       u64                     active_vram_size;
        u64                     gtt_size;
        u64                     gtt_start;
        u64                     gtt_end;
index ebae14c4b768b4413990e84c1055782a72590009..8e43ddae70cc27d3c37d472561527432cc51dd53 100644 (file)
@@ -317,6 +317,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
                        *connector_type = DRM_MODE_CONNECTOR_DVID;
        }
 
+       /* MSI K9A2GM V2/V3 board has no HDMI or DVI */
+       if ((dev->pdev->device == 0x796e) &&
+           (dev->pdev->subsystem_vendor == 0x1462) &&
+           (dev->pdev->subsystem_device == 0x7302)) {
+               if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) ||
+                   (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
+                       return false;
+       }
+
        /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
        if ((dev->pdev->device == 0x7941) &&
            (dev->pdev->subsystem_vendor == 0x147b) &&
@@ -1549,39 +1558,39 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev)
                switch (tv_info->ucTV_BootUpDefaultStandard) {
                case ATOM_TV_NTSC:
                        tv_std = TV_STD_NTSC;
-                       DRM_INFO("Default TV standard: NTSC\n");
+                       DRM_DEBUG_KMS("Default TV standard: NTSC\n");
                        break;
                case ATOM_TV_NTSCJ:
                        tv_std = TV_STD_NTSC_J;
-                       DRM_INFO("Default TV standard: NTSC-J\n");
+                       DRM_DEBUG_KMS("Default TV standard: NTSC-J\n");
                        break;
                case ATOM_TV_PAL:
                        tv_std = TV_STD_PAL;
-                       DRM_INFO("Default TV standard: PAL\n");
+                       DRM_DEBUG_KMS("Default TV standard: PAL\n");
                        break;
                case ATOM_TV_PALM:
                        tv_std = TV_STD_PAL_M;
-                       DRM_INFO("Default TV standard: PAL-M\n");
+                       DRM_DEBUG_KMS("Default TV standard: PAL-M\n");
                        break;
                case ATOM_TV_PALN:
                        tv_std = TV_STD_PAL_N;
-                       DRM_INFO("Default TV standard: PAL-N\n");
+                       DRM_DEBUG_KMS("Default TV standard: PAL-N\n");
                        break;
                case ATOM_TV_PALCN:
                        tv_std = TV_STD_PAL_CN;
-                       DRM_INFO("Default TV standard: PAL-CN\n");
+                       DRM_DEBUG_KMS("Default TV standard: PAL-CN\n");
                        break;
                case ATOM_TV_PAL60:
                        tv_std = TV_STD_PAL_60;
-                       DRM_INFO("Default TV standard: PAL-60\n");
+                       DRM_DEBUG_KMS("Default TV standard: PAL-60\n");
                        break;
                case ATOM_TV_SECAM:
                        tv_std = TV_STD_SECAM;
-                       DRM_INFO("Default TV standard: SECAM\n");
+                       DRM_DEBUG_KMS("Default TV standard: SECAM\n");
                        break;
                default:
                        tv_std = TV_STD_NTSC;
-                       DRM_INFO("Unknown TV standard; defaulting to NTSC\n");
+                       DRM_DEBUG_KMS("Unknown TV standard; defaulting to NTSC\n");
                        break;
                }
        }
index bd74e428bd147d0df444cc64858f28acbed0d8ed..7b7ea269549ccef95c1e6c343083071774e7e9de 100644 (file)
@@ -913,47 +913,47 @@ radeon_combios_get_tv_info(struct radeon_device *rdev)
                        switch (RBIOS8(tv_info + 7) & 0xf) {
                        case 1:
                                tv_std = TV_STD_NTSC;
-                               DRM_INFO("Default TV standard: NTSC\n");
+                               DRM_DEBUG_KMS("Default TV standard: NTSC\n");
                                break;
                        case 2:
                                tv_std = TV_STD_PAL;
-                               DRM_INFO("Default TV standard: PAL\n");
+                               DRM_DEBUG_KMS("Default TV standard: PAL\n");
                                break;
                        case 3:
                                tv_std = TV_STD_PAL_M;
-                               DRM_INFO("Default TV standard: PAL-M\n");
+                               DRM_DEBUG_KMS("Default TV standard: PAL-M\n");
                                break;
                        case 4:
                                tv_std = TV_STD_PAL_60;
-                               DRM_INFO("Default TV standard: PAL-60\n");
+                               DRM_DEBUG_KMS("Default TV standard: PAL-60\n");
                                break;
                        case 5:
                                tv_std = TV_STD_NTSC_J;
-                               DRM_INFO("Default TV standard: NTSC-J\n");
+                               DRM_DEBUG_KMS("Default TV standard: NTSC-J\n");
                                break;
                        case 6:
                                tv_std = TV_STD_SCART_PAL;
-                               DRM_INFO("Default TV standard: SCART-PAL\n");
+                               DRM_DEBUG_KMS("Default TV standard: SCART-PAL\n");
                                break;
                        default:
                                tv_std = TV_STD_NTSC;
-                               DRM_INFO
+                               DRM_DEBUG_KMS
                                    ("Unknown TV standard; defaulting to NTSC\n");
                                break;
                        }
 
                        switch ((RBIOS8(tv_info + 9) >> 2) & 0x3) {
                        case 0:
-                               DRM_INFO("29.498928713 MHz TV ref clk\n");
+                               DRM_DEBUG_KMS("29.498928713 MHz TV ref clk\n");
                                break;
                        case 1:
-                               DRM_INFO("28.636360000 MHz TV ref clk\n");
+                               DRM_DEBUG_KMS("28.636360000 MHz TV ref clk\n");
                                break;
                        case 2:
-                               DRM_INFO("14.318180000 MHz TV ref clk\n");
+                               DRM_DEBUG_KMS("14.318180000 MHz TV ref clk\n");
                                break;
                        case 3:
-                               DRM_INFO("27.000000000 MHz TV ref clk\n");
+                               DRM_DEBUG_KMS("27.000000000 MHz TV ref clk\n");
                                break;
                        default:
                                break;
@@ -1324,7 +1324,7 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
 
        if (tmds_info) {
                ver = RBIOS8(tmds_info);
-               DRM_INFO("DFP table revision: %d\n", ver);
+               DRM_DEBUG_KMS("DFP table revision: %d\n", ver);
                if (ver == 3) {
                        n = RBIOS8(tmds_info + 5) + 1;
                        if (n > 4)
@@ -1408,7 +1408,7 @@ bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder
                offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
                if (offset) {
                        ver = RBIOS8(offset);
-                       DRM_INFO("External TMDS Table revision: %d\n", ver);
+                       DRM_DEBUG_KMS("External TMDS Table revision: %d\n", ver);
                        tmds->slave_addr = RBIOS8(offset + 4 + 2);
                        tmds->slave_addr >>= 1; /* 7 bit addressing */
                        gpio = RBIOS8(offset + 4 + 3);
@@ -1485,6 +1485,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                        /* PowerMac8,1 ? */
                        /* imac g5 isight */
                        rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT;
+               } else if ((rdev->pdev->device == 0x4a48) &&
+                          (rdev->pdev->subsystem_vendor == 0x1002) &&
+                          (rdev->pdev->subsystem_device == 0x4a48)) {
+                       /* Mac X800 */
+                       rdev->mode_info.connector_table = CT_MAC_X800;
                } else
 #endif /* CONFIG_PPC_PMAC */
 #ifdef CONFIG_PPC64
@@ -1961,6 +1966,48 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                                            CONNECTOR_OBJECT_ID_VGA,
                                            &hpd);
                break;
+       case CT_MAC_X800:
+               DRM_INFO("Connector Table: %d (mac x800)\n",
+                        rdev->mode_info.connector_table);
+               /* DVI - primary dac, internal tmds */
+               ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+               hpd.hpd = RADEON_HPD_1; /* ??? */
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_enum(dev,
+                                                                 ATOM_DEVICE_DFP1_SUPPORT,
+                                                                 0),
+                                         ATOM_DEVICE_DFP1_SUPPORT);
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_enum(dev,
+                                                                 ATOM_DEVICE_CRT1_SUPPORT,
+                                                                 1),
+                                         ATOM_DEVICE_CRT1_SUPPORT);
+               radeon_add_legacy_connector(dev, 0,
+                                           ATOM_DEVICE_DFP1_SUPPORT |
+                                           ATOM_DEVICE_CRT1_SUPPORT,
+                                           DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+                                           CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+                                           &hpd);
+               /* DVI - tv dac, dvo */
+               ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+               hpd.hpd = RADEON_HPD_2; /* ??? */
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_enum(dev,
+                                                                 ATOM_DEVICE_DFP2_SUPPORT,
+                                                                 0),
+                                         ATOM_DEVICE_DFP2_SUPPORT);
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_enum(dev,
+                                                                 ATOM_DEVICE_CRT2_SUPPORT,
+                                                                 2),
+                                         ATOM_DEVICE_CRT2_SUPPORT);
+               radeon_add_legacy_connector(dev, 1,
+                                           ATOM_DEVICE_DFP2_SUPPORT |
+                                           ATOM_DEVICE_CRT2_SUPPORT,
+                                           DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+                                           CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
+                                           &hpd);
+               break;
        default:
                DRM_INFO("Connector table: %d (invalid)\n",
                         rdev->mode_info.connector_table);
index a9dd7847d96ed673e4548efc343e9112390e7c75..ecc1a8fafbfd3eb3c12c0c4d45b4b091a1bee03b 100644 (file)
@@ -481,7 +481,8 @@ static int radeon_lvds_mode_valid(struct drm_connector *connector,
        return MODE_OK;
 }
 
-static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector)
+static enum drm_connector_status
+radeon_lvds_detect(struct drm_connector *connector, bool force)
 {
        struct radeon_connector *radeon_connector = to_radeon_connector(connector);
        struct drm_encoder *encoder = radeon_best_single_encoder(connector);
@@ -594,7 +595,8 @@ static int radeon_vga_mode_valid(struct drm_connector *connector,
        return MODE_OK;
 }
 
-static enum drm_connector_status radeon_vga_detect(struct drm_connector *connector)
+static enum drm_connector_status
+radeon_vga_detect(struct drm_connector *connector, bool force)
 {
        struct radeon_connector *radeon_connector = to_radeon_connector(connector);
        struct drm_encoder *encoder;
@@ -691,7 +693,8 @@ static int radeon_tv_mode_valid(struct drm_connector *connector,
        return MODE_OK;
 }
 
-static enum drm_connector_status radeon_tv_detect(struct drm_connector *connector)
+static enum drm_connector_status
+radeon_tv_detect(struct drm_connector *connector, bool force)
 {
        struct drm_encoder *encoder;
        struct drm_encoder_helper_funcs *encoder_funcs;
@@ -748,7 +751,8 @@ static int radeon_dvi_get_modes(struct drm_connector *connector)
  * we have to check if this analog encoder is shared with anyone else (TV)
  * if its shared we have to set the other connector to disconnected.
  */
-static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connector)
+static enum drm_connector_status
+radeon_dvi_detect(struct drm_connector *connector, bool force)
 {
        struct radeon_connector *radeon_connector = to_radeon_connector(connector);
        struct drm_encoder *encoder = NULL;
@@ -972,7 +976,8 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
        return ret;
 }
 
-static enum drm_connector_status radeon_dp_detect(struct drm_connector *connector)
+static enum drm_connector_status
+radeon_dp_detect(struct drm_connector *connector, bool force)
 {
        struct radeon_connector *radeon_connector = to_radeon_connector(connector);
        enum drm_connector_status ret = connector_status_disconnected;
index 5731fc9b1ae3ae9274188a5bf8cdae7aa78f3b33..3eef567b0421ae71826abd77ac3bc035a5ec1c33 100644 (file)
@@ -203,6 +203,7 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
        struct radeon_device *rdev = crtc->dev->dev_private;
        int xorigin = 0, yorigin = 0;
+       int w = radeon_crtc->cursor_width;
 
        if (x < 0)
                xorigin = -x + 1;
@@ -213,22 +214,7 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
        if (yorigin >= CURSOR_HEIGHT)
                yorigin = CURSOR_HEIGHT - 1;
 
-       radeon_lock_cursor(crtc, true);
-       if (ASIC_IS_DCE4(rdev)) {
-               /* cursors are offset into the total surface */
-               x += crtc->x;
-               y += crtc->y;
-               DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
-
-               /* XXX: check if evergreen has the same issues as avivo chips */
-               WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset,
-                      ((xorigin ? 0 : x) << 16) |
-                      (yorigin ? 0 : y));
-               WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
-               WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
-                      ((radeon_crtc->cursor_width - 1) << 16) | (radeon_crtc->cursor_height - 1));
-       } else if (ASIC_IS_AVIVO(rdev)) {
-               int w = radeon_crtc->cursor_width;
+       if (ASIC_IS_AVIVO(rdev)) {
                int i = 0;
                struct drm_crtc *crtc_p;
 
@@ -260,7 +246,17 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
                        if (w <= 0)
                                w = 1;
                }
+       }
 
+       radeon_lock_cursor(crtc, true);
+       if (ASIC_IS_DCE4(rdev)) {
+               WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset,
+                      ((xorigin ? 0 : x) << 16) |
+                      (yorigin ? 0 : y));
+               WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
+               WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
+                      ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
+       } else if (ASIC_IS_AVIVO(rdev)) {
                WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset,
                             ((xorigin ? 0 : x) << 16) |
                             (yorigin ? 0 : y));
index 6dd434ad2429b9d9689ed861d33a2b1c0b09f092..b92d2f2fcbed6a8bd472ce9f4b936aa82f309278 100644 (file)
@@ -349,6 +349,8 @@ static void radeon_print_display_setup(struct drm_device *dev)
                                        DRM_INFO("    DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]);
                                if (devices & ATOM_DEVICE_DFP5_SUPPORT)
                                        DRM_INFO("    DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]);
+                               if (devices & ATOM_DEVICE_DFP6_SUPPORT)
+                                       DRM_INFO("    DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]);
                                if (devices & ATOM_DEVICE_TV1_SUPPORT)
                                        DRM_INFO("    TV1: %s\n", encoder_names[radeon_encoder->encoder_id]);
                                if (devices & ATOM_DEVICE_CV_SUPPORT)
@@ -841,8 +843,9 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
        struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
 
-       if (radeon_fb->obj)
+       if (radeon_fb->obj) {
                drm_gem_object_unreference_unlocked(radeon_fb->obj);
+       }
        drm_framebuffer_cleanup(fb);
        kfree(radeon_fb);
 }
@@ -1140,17 +1143,18 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
                                radeon_crtc->rmx_type = radeon_encoder->rmx_type;
                        else
                                radeon_crtc->rmx_type = RMX_OFF;
-                       src_v = crtc->mode.vdisplay;
-                       dst_v = radeon_crtc->native_mode.vdisplay;
-                       src_h = crtc->mode.hdisplay;
-                       dst_h = radeon_crtc->native_mode.vdisplay;
                        /* copy native mode */
                        memcpy(&radeon_crtc->native_mode,
                               &radeon_encoder->native_mode,
                                sizeof(struct drm_display_mode));
+                       src_v = crtc->mode.vdisplay;
+                       dst_v = radeon_crtc->native_mode.vdisplay;
+                       src_h = crtc->mode.hdisplay;
+                       dst_h = radeon_crtc->native_mode.hdisplay;
 
                        /* fix up for overscan on hdmi */
                        if (ASIC_IS_AVIVO(rdev) &&
+                           (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
                            ((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
                             ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
                              drm_detect_hdmi_monitor(radeon_connector->edid) &&
index c74a8b20d9413e921bc6a03cd92578146a9ee8ef..40b0c087b5921384d46bf7f745cf93f2b391015b 100644 (file)
@@ -94,6 +94,7 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
        ret = radeon_bo_reserve(rbo, false);
        if (likely(ret == 0)) {
                radeon_bo_kunmap(rbo);
+               radeon_bo_unpin(rbo);
                radeon_bo_unreserve(rbo);
        }
        drm_gem_object_unreference_unlocked(gobj);
@@ -325,8 +326,6 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
 {
        struct fb_info *info;
        struct radeon_framebuffer *rfb = &rfbdev->rfb;
-       struct radeon_bo *rbo;
-       int r;
 
        if (rfbdev->helper.fbdev) {
                info = rfbdev->helper.fbdev;
@@ -338,14 +337,8 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
        }
 
        if (rfb->obj) {
-               rbo = rfb->obj->driver_private;
-               r = radeon_bo_reserve(rbo, false);
-               if (likely(r == 0)) {
-                       radeon_bo_kunmap(rbo);
-                       radeon_bo_unpin(rbo);
-                       radeon_bo_unreserve(rbo);
-               }
-               drm_gem_object_unreference_unlocked(rfb->obj);
+               radeonfb_destroy_pinned_object(rfb->obj);
+               rfb->obj = NULL;
        }
        drm_fb_helper_fini(&rfbdev->helper);
        drm_framebuffer_cleanup(&rfb->base);
index c578f265b24cefc6dce21734783b01c1aed1ce27..d1e595d9172396b8104d19c7a1d0a87d3b14b772 100644 (file)
@@ -201,11 +201,11 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
                return r;
        }
        r = drm_gem_handle_create(filp, gobj, &handle);
+       /* drop reference from allocate - handle holds it now */
+       drm_gem_object_unreference_unlocked(gobj);
        if (r) {
-               drm_gem_object_unreference_unlocked(gobj);
                return r;
        }
-       drm_gem_object_handle_unreference_unlocked(gobj);
        args->handle = handle;
        return 0;
 }
index 5eee3c41d124bf49fbd5dfbc7264fb062699e961..8fbbe1c6ebbda854f7bf9dc9f76ae1c4eefdafc5 100644 (file)
@@ -203,6 +203,10 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
  */
 int radeon_driver_firstopen_kms(struct drm_device *dev)
 {
+       struct radeon_device *rdev = dev->dev_private;
+
+       if (rdev->powered_down)
+               return -EINVAL;
        return 0;
 }
 
index efbe975312dc42342c2add89c417b753b0e92791..17a6602b5885786fecd6dd3231940a7b5d3c4261 100644 (file)
@@ -204,7 +204,7 @@ struct radeon_i2c_chan {
 
 /* mostly for macs, but really any system without connector tables */
 enum radeon_connector_table {
-       CT_NONE,
+       CT_NONE = 0,
        CT_GENERIC,
        CT_IBOOK,
        CT_POWERBOOK_EXTERNAL,
@@ -215,6 +215,7 @@ enum radeon_connector_table {
        CT_IMAC_G5_ISIGHT,
        CT_EMAC,
        CT_RN50_POWER,
+       CT_MAC_X800,
 };
 
 enum radeon_dvo_chip {
index 0afd1e62347dcfb9670d20e13a818d8d7a99b59c..b3b5306bb578bf88547e4078fe48f59d9e0ea720 100644 (file)
@@ -69,7 +69,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
        u32 c = 0;
 
        rbo->placement.fpfn = 0;
-       rbo->placement.lpfn = 0;
+       rbo->placement.lpfn = rbo->rdev->mc.active_vram_size >> PAGE_SHIFT;
        rbo->placement.placement = rbo->placements;
        rbo->placement.busy_placement = rbo->placements;
        if (domain & RADEON_GEM_DOMAIN_VRAM)
index 353998dc2c03b12992cd244ff116d01e45db2b96..3481bc7f6f582b08a0c2a9ff079fa9787defb6cd 100644 (file)
@@ -124,11 +124,8 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
        int r;
 
        r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
-       if (unlikely(r != 0)) {
-               if (r != -ERESTARTSYS)
-                       dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo);
+       if (unlikely(r != 0))
                return r;
-       }
        spin_lock(&bo->tbo.lock);
        if (mem_type)
                *mem_type = bo->tbo.mem.mem_type;
index cc05b230d7effbbae88524da0d698dace6228ccf..51d5f7b5ab21b40a6e34d2fd286f28da91e4f0b0 100644 (file)
@@ -693,6 +693,7 @@ void rs600_mc_init(struct radeon_device *rdev)
        rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
        rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
        rdev->mc.visible_vram_size = rdev->mc.aper_size;
+       rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
        rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
        base = RREG32_MC(R_000004_MC_FB_LOCATION);
        base = G_000004_MC_FB_START(base) << 16;
index 3e3f75718be3e83ab156465dc80a11f604b58a64..4dc2a87ea68018f0292cc0724d6ef4868c00e8ac 100644 (file)
@@ -157,6 +157,7 @@ void rs690_mc_init(struct radeon_device *rdev)
        rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
        rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
        rdev->mc.visible_vram_size = rdev->mc.aper_size;
+       rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
        base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
        base = G_000100_MC_FB_START(base) << 16;
        rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
index bfa59db374d23d3c4a06877a6e9a37aec59904e0..9490da700749487c00fe9c57671ec89727653136 100644 (file)
@@ -267,6 +267,7 @@ static void rv770_mc_program(struct radeon_device *rdev)
  */
 void r700_cp_stop(struct radeon_device *rdev)
 {
+       rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
        WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
 }
 
@@ -992,6 +993,7 @@ int rv770_mc_init(struct radeon_device *rdev)
        rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
        rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
        rdev->mc.visible_vram_size = rdev->mc.aper_size;
+       rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
        r600_vram_gtt_location(rdev, &rdev->mc);
        radeon_update_bandwidth_info(rdev);
 
index cb4cf7ef4d1eee9bc726c4d4ee34f8962526b316..db809e034cc48b6d8c246cba1ede0660113e30de 100644 (file)
@@ -441,6 +441,43 @@ out_err:
        return ret;
 }
 
+/**
+ * Call bo::reserved and with the lru lock held.
+ * Will release GPU memory type usage on destruction.
+ * This is the place to put in driver specific hooks.
+ * Will release the bo::reserved lock and the
+ * lru lock on exit.
+ */
+
+static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
+{
+       struct ttm_bo_global *glob = bo->glob;
+
+       if (bo->ttm) {
+
+               /**
+                * Release the lru_lock, since we don't want to have
+                * an atomic requirement on ttm_tt[unbind|destroy].
+                */
+
+               spin_unlock(&glob->lru_lock);
+               ttm_tt_unbind(bo->ttm);
+               ttm_tt_destroy(bo->ttm);
+               bo->ttm = NULL;
+               spin_lock(&glob->lru_lock);
+       }
+
+       if (bo->mem.mm_node) {
+               drm_mm_put_block(bo->mem.mm_node);
+               bo->mem.mm_node = NULL;
+       }
+
+       atomic_set(&bo->reserved, 0);
+       wake_up_all(&bo->event_queue);
+       spin_unlock(&glob->lru_lock);
+}
+
+
 /**
  * If bo idle, remove from delayed- and lru lists, and unref.
  * If not idle, and already on delayed list, do nothing.
@@ -456,6 +493,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
        int ret;
 
        spin_lock(&bo->lock);
+retry:
        (void) ttm_bo_wait(bo, false, false, !remove_all);
 
        if (!bo->sync_obj) {
@@ -464,31 +502,52 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
                spin_unlock(&bo->lock);
 
                spin_lock(&glob->lru_lock);
-               put_count = ttm_bo_del_from_lru(bo);
+               ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0);
+
+               /**
+                * Someone else has the object reserved. Bail and retry.
+                */
 
-               ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
-               BUG_ON(ret);
-               if (bo->ttm)
-                       ttm_tt_unbind(bo->ttm);
+               if (unlikely(ret == -EBUSY)) {
+                       spin_unlock(&glob->lru_lock);
+                       spin_lock(&bo->lock);
+                       goto requeue;
+               }
+
+               /**
+                * We can re-check for sync object without taking
+                * the bo::lock since setting the sync object requires
+                * also bo::reserved. A busy object at this point may
+                * be caused by another thread starting an accelerated
+                * eviction.
+                */
+
+               if (unlikely(bo->sync_obj)) {
+                       atomic_set(&bo->reserved, 0);
+                       wake_up_all(&bo->event_queue);
+                       spin_unlock(&glob->lru_lock);
+                       spin_lock(&bo->lock);
+                       if (remove_all)
+                               goto retry;
+                       else
+                               goto requeue;
+               }
+
+               put_count = ttm_bo_del_from_lru(bo);
 
                if (!list_empty(&bo->ddestroy)) {
                        list_del_init(&bo->ddestroy);
                        ++put_count;
                }
-               if (bo->mem.mm_node) {
-                       drm_mm_put_block(bo->mem.mm_node);
-                       bo->mem.mm_node = NULL;
-               }
-               spin_unlock(&glob->lru_lock);
 
-               atomic_set(&bo->reserved, 0);
+               ttm_bo_cleanup_memtype_use(bo);
 
                while (put_count--)
                        kref_put(&bo->list_kref, ttm_bo_ref_bug);
 
                return 0;
        }
-
+requeue:
        spin_lock(&glob->lru_lock);
        if (list_empty(&bo->ddestroy)) {
                void *sync_obj = bo->sync_obj;
index 7cffb3e0423249ec4f78f7c7cbdc72b6df921e50..3451a82adba76c31672ee96f086146f5da1ab12b 100644 (file)
@@ -351,6 +351,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
        INIT_LIST_HEAD(&fbo->lru);
        INIT_LIST_HEAD(&fbo->swap);
        fbo->vm_node = NULL;
+       atomic_set(&fbo->cpu_writers, 0);
 
        fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
        kref_init(&fbo->list_kref);
index ca904799f018a6e3ae23c80933bd1e007aec8f7f..b1e02fffd3ccdebf256d38bb55bed9a37ea1c8d7 100644 (file)
@@ -69,7 +69,7 @@ struct ttm_page_pool {
        spinlock_t              lock;
        bool                    fill_lock;
        struct list_head        list;
-       int                     gfp_flags;
+       gfp_t                   gfp_flags;
        unsigned                npages;
        char                    *name;
        unsigned long           nfrees;
@@ -475,7 +475,7 @@ static void ttm_handle_caching_state_failure(struct list_head *pages,
  * This function is reentrant if caller updates count depending on number of
  * pages returned in pages array.
  */
-static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
+static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
                int ttm_flags, enum ttm_caching_state cstate, unsigned count)
 {
        struct page **caching_array;
@@ -666,7 +666,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
 {
        struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
        struct page *p = NULL;
-       int gfp_flags = GFP_USER;
+       gfp_t gfp_flags = GFP_USER;
        int r;
 
        /* set zero flag for page allocation if required */
@@ -818,7 +818,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
        return 0;
 }
 
-void ttm_page_alloc_fini()
+void ttm_page_alloc_fini(void)
 {
        int i;
 
index 72ec2e2b6e9787196ca1de65f28e4c6a0f090051..a96ed6d9d010b82cfc58ed41ec6240f99d5a9103 100644 (file)
@@ -148,13 +148,16 @@ static struct pci_device_id vmw_pci_id_list[] = {
        {0, 0, 0}
 };
 
-static char *vmw_devname = "vmwgfx";
+static int enable_fbdev;
 
 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
 static void vmw_master_init(struct vmw_master *);
 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
                              void *ptr);
 
+MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
+module_param_named(enable_fbdev, enable_fbdev, int, 0600);
+
 static void vmw_print_capabilities(uint32_t capabilities)
 {
        DRM_INFO("Capabilities:\n");
@@ -192,8 +195,6 @@ static int vmw_request_device(struct vmw_private *dev_priv)
 {
        int ret;
 
-       vmw_kms_save_vga(dev_priv);
-
        ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
        if (unlikely(ret != 0)) {
                DRM_ERROR("Unable to initialize FIFO.\n");
@@ -206,9 +207,35 @@ static int vmw_request_device(struct vmw_private *dev_priv)
 static void vmw_release_device(struct vmw_private *dev_priv)
 {
        vmw_fifo_release(dev_priv, &dev_priv->fifo);
-       vmw_kms_restore_vga(dev_priv);
 }
 
+int vmw_3d_resource_inc(struct vmw_private *dev_priv)
+{
+       int ret = 0;
+
+       mutex_lock(&dev_priv->release_mutex);
+       if (unlikely(dev_priv->num_3d_resources++ == 0)) {
+               ret = vmw_request_device(dev_priv);
+               if (unlikely(ret != 0))
+                       --dev_priv->num_3d_resources;
+       }
+       mutex_unlock(&dev_priv->release_mutex);
+       return ret;
+}
+
+
+void vmw_3d_resource_dec(struct vmw_private *dev_priv)
+{
+       int32_t n3d;
+
+       mutex_lock(&dev_priv->release_mutex);
+       if (unlikely(--dev_priv->num_3d_resources == 0))
+               vmw_release_device(dev_priv);
+       n3d = (int32_t) dev_priv->num_3d_resources;
+       mutex_unlock(&dev_priv->release_mutex);
+
+       BUG_ON(n3d < 0);
+}
 
 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 {
@@ -228,6 +255,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        dev_priv->last_read_sequence = (uint32_t) -100;
        mutex_init(&dev_priv->hw_mutex);
        mutex_init(&dev_priv->cmdbuf_mutex);
+       mutex_init(&dev_priv->release_mutex);
        rwlock_init(&dev_priv->resource_lock);
        idr_init(&dev_priv->context_idr);
        idr_init(&dev_priv->surface_idr);
@@ -244,6 +272,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
        dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
 
+       dev_priv->enable_fb = enable_fbdev;
+
        mutex_lock(&dev_priv->hw_mutex);
 
        vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
@@ -343,17 +373,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 
        dev->dev_private = dev_priv;
 
-       if (!dev->devname)
-               dev->devname = vmw_devname;
-
-       if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
-               ret = drm_irq_install(dev);
-               if (unlikely(ret != 0)) {
-                       DRM_ERROR("Failed installing irq: %d\n", ret);
-                       goto out_no_irq;
-               }
-       }
-
        ret = pci_request_regions(dev->pdev, "vmwgfx probe");
        dev_priv->stealth = (ret != 0);
        if (dev_priv->stealth) {
@@ -369,26 +388,52 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
                        goto out_no_device;
                }
        }
-       ret = vmw_request_device(dev_priv);
+       ret = vmw_kms_init(dev_priv);
        if (unlikely(ret != 0))
-               goto out_no_device;
-       vmw_kms_init(dev_priv);
+               goto out_no_kms;
        vmw_overlay_init(dev_priv);
-       vmw_fb_init(dev_priv);
+       if (dev_priv->enable_fb) {
+               ret = vmw_3d_resource_inc(dev_priv);
+               if (unlikely(ret != 0))
+                       goto out_no_fifo;
+               vmw_kms_save_vga(dev_priv);
+               vmw_fb_init(dev_priv);
+               DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ?
+                        "Detected device 3D availability.\n" :
+                        "Detected no device 3D availability.\n");
+       } else {
+               DRM_INFO("Delayed 3D detection since we're not "
+                        "running the device in SVGA mode yet.\n");
+       }
+
+       if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
+               ret = drm_irq_install(dev);
+               if (unlikely(ret != 0)) {
+                       DRM_ERROR("Failed installing irq: %d\n", ret);
+                       goto out_no_irq;
+               }
+       }
 
        dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
        register_pm_notifier(&dev_priv->pm_nb);
 
-       DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n");
-
        return 0;
 
-out_no_device:
-       if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
-               drm_irq_uninstall(dev_priv->dev);
-       if (dev->devname == vmw_devname)
-               dev->devname = NULL;
 out_no_irq:
+       if (dev_priv->enable_fb) {
+               vmw_fb_close(dev_priv);
+               vmw_kms_restore_vga(dev_priv);
+               vmw_3d_resource_dec(dev_priv);
+       }
+out_no_fifo:
+       vmw_overlay_close(dev_priv);
+       vmw_kms_close(dev_priv);
+out_no_kms:
+       if (dev_priv->stealth)
+               pci_release_region(dev->pdev, 2);
+       else
+               pci_release_regions(dev->pdev);
+out_no_device:
        ttm_object_device_release(&dev_priv->tdev);
 out_err4:
        iounmap(dev_priv->mmio_virt);
@@ -415,19 +460,20 @@ static int vmw_driver_unload(struct drm_device *dev)
 
        unregister_pm_notifier(&dev_priv->pm_nb);
 
-       vmw_fb_close(dev_priv);
+       if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+               drm_irq_uninstall(dev_priv->dev);
+       if (dev_priv->enable_fb) {
+               vmw_fb_close(dev_priv);
+               vmw_kms_restore_vga(dev_priv);
+               vmw_3d_resource_dec(dev_priv);
+       }
        vmw_kms_close(dev_priv);
        vmw_overlay_close(dev_priv);
-       vmw_release_device(dev_priv);
        if (dev_priv->stealth)
                pci_release_region(dev->pdev, 2);
        else
                pci_release_regions(dev->pdev);
 
-       if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
-               drm_irq_uninstall(dev_priv->dev);
-       if (dev->devname == vmw_devname)
-               dev->devname = NULL;
        ttm_object_device_release(&dev_priv->tdev);
        iounmap(dev_priv->mmio_virt);
        drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
@@ -500,7 +546,7 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
                struct drm_ioctl_desc *ioctl =
                    &vmw_ioctls[nr - DRM_COMMAND_BASE];
 
-               if (unlikely(ioctl->cmd != cmd)) {
+               if (unlikely(ioctl->cmd_drv != cmd)) {
                        DRM_ERROR("Invalid command format, ioctl %d\n",
                                  nr - DRM_COMMAND_BASE);
                        return -EINVAL;
@@ -589,6 +635,16 @@ static int vmw_master_set(struct drm_device *dev,
        struct vmw_master *vmaster = vmw_master(file_priv->master);
        int ret = 0;
 
+       if (!dev_priv->enable_fb) {
+               ret = vmw_3d_resource_inc(dev_priv);
+               if (unlikely(ret != 0))
+                       return ret;
+               vmw_kms_save_vga(dev_priv);
+               mutex_lock(&dev_priv->hw_mutex);
+               vmw_write(dev_priv, SVGA_REG_TRACES, 0);
+               mutex_unlock(&dev_priv->hw_mutex);
+       }
+
        if (active) {
                BUG_ON(active != &dev_priv->fbdev_master);
                ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
@@ -617,7 +673,13 @@ static int vmw_master_set(struct drm_device *dev,
        return 0;
 
 out_no_active_lock:
-       vmw_release_device(dev_priv);
+       if (!dev_priv->enable_fb) {
+               mutex_lock(&dev_priv->hw_mutex);
+               vmw_write(dev_priv, SVGA_REG_TRACES, 1);
+               mutex_unlock(&dev_priv->hw_mutex);
+               vmw_kms_restore_vga(dev_priv);
+               vmw_3d_resource_dec(dev_priv);
+       }
        return ret;
 }
 
@@ -645,11 +707,23 @@ static void vmw_master_drop(struct drm_device *dev,
 
        ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
 
+       if (!dev_priv->enable_fb) {
+               ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
+               if (unlikely(ret != 0))
+                       DRM_ERROR("Unable to clean VRAM on master drop.\n");
+               mutex_lock(&dev_priv->hw_mutex);
+               vmw_write(dev_priv, SVGA_REG_TRACES, 1);
+               mutex_unlock(&dev_priv->hw_mutex);
+               vmw_kms_restore_vga(dev_priv);
+               vmw_3d_resource_dec(dev_priv);
+       }
+
        dev_priv->active_master = &dev_priv->fbdev_master;
        ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
        ttm_vt_unlock(&dev_priv->fbdev_master.lock);
 
-       vmw_fb_on(dev_priv);
+       if (dev_priv->enable_fb)
+               vmw_fb_on(dev_priv);
 }
 
 
@@ -722,6 +796,7 @@ static struct drm_driver driver = {
        .irq_postinstall = vmw_irq_postinstall,
        .irq_uninstall = vmw_irq_uninstall,
        .irq_handler = vmw_irq_handler,
+       .get_vblank_counter = vmw_get_vblank_counter,
        .reclaim_buffers_locked = NULL,
        .get_map_ofs = drm_core_get_map_ofs,
        .get_reg_ofs = drm_core_get_reg_ofs,
index 429f917b60bf4b30ecdfd0946f9f1f0978bd5c58..58de6393f611dd79fbbebeab81102dcf0f4abfcb 100644 (file)
@@ -277,6 +277,7 @@ struct vmw_private {
 
        bool stealth;
        bool is_opened;
+       bool enable_fb;
 
        /**
         * Master management.
@@ -285,6 +286,9 @@ struct vmw_private {
        struct vmw_master *active_master;
        struct vmw_master fbdev_master;
        struct notifier_block pm_nb;
+
+       struct mutex release_mutex;
+       uint32_t num_3d_resources;
 };
 
 static inline struct vmw_private *vmw_priv(struct drm_device *dev)
@@ -319,6 +323,9 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv,
        return val;
 }
 
+int vmw_3d_resource_inc(struct vmw_private *dev_priv);
+void vmw_3d_resource_dec(struct vmw_private *dev_priv);
+
 /**
  * GMR utilities - vmwgfx_gmr.c
  */
@@ -511,6 +518,7 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv,
                        unsigned bbp, unsigned depth);
 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
+u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
 
 /**
  * Overlay control - vmwgfx_overlay.c
index 870967a97c15d52eb3f380323e6038d32ed6e76f..409e172f4abfe94502be96502e251b5d6b2e54c9 100644 (file)
@@ -615,6 +615,11 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
        if (unlikely(ret != 0))
                goto err_unlock;
 
+       if (bo->mem.mem_type == TTM_PL_VRAM &&
+           bo->mem.mm_node->start < bo->num_pages)
+               (void) ttm_bo_validate(bo, &vmw_sys_placement, false,
+                                      false, false);
+
        ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
 
        /* Could probably bug on */
index e6a1eb7ea95498f00e65d123adeae79160af8fa8..0fe31766e4cf5f11e6025e5a85f96baa5936408f 100644 (file)
@@ -106,6 +106,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
        mutex_lock(&dev_priv->hw_mutex);
        dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
        dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
+       dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
        vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
 
        min = 4;
@@ -175,6 +176,8 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
                  dev_priv->config_done_state);
        vmw_write(dev_priv, SVGA_REG_ENABLE,
                  dev_priv->enable_state);
+       vmw_write(dev_priv, SVGA_REG_TRACES,
+                 dev_priv->traces_state);
 
        mutex_unlock(&dev_priv->hw_mutex);
        vmw_fence_queue_takedown(&fifo->fence_queue);
index 64d7f47df8683ef49cfbb3c83eb449026949af03..e882ba099f0c33dab30f12f7b9328b3b628ac712 100644 (file)
@@ -898,7 +898,19 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv)
                save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
                save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
                vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+               if (i == 0 && vmw_priv->num_displays == 1 &&
+                   save->width == 0 && save->height == 0) {
+
+                       /*
+                        * It should be fairly safe to assume that these
+                        * values are uninitialized.
+                        */
+
+                       save->width = vmw_priv->vga_width - save->pos_x;
+                       save->height = vmw_priv->vga_height - save->pos_y;
+               }
        }
+
        return 0;
 }
 
@@ -984,3 +996,8 @@ out_unlock:
        ttm_read_unlock(&vmaster->lock);
        return ret;
 }
+
+u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+       return 0;
+}
index 2ff5cf78235f35379cf409b3ec659774124a0757..11cb39e3accbfa9581801095ab0398952d2313f1 100644 (file)
@@ -27,6 +27,8 @@
 
 #include "vmwgfx_kms.h"
 
+#define VMWGFX_LDU_NUM_DU 8
+
 #define vmw_crtc_to_ldu(x) \
        container_of(x, struct vmw_legacy_display_unit, base.crtc)
 #define vmw_encoder_to_ldu(x) \
@@ -335,7 +337,8 @@ static void vmw_ldu_connector_restore(struct drm_connector *connector)
 }
 
 static enum drm_connector_status
-       vmw_ldu_connector_detect(struct drm_connector *connector)
+       vmw_ldu_connector_detect(struct drm_connector *connector,
+                                bool force)
 {
        if (vmw_connector_to_ldu(connector)->pref_active)
                return connector_status_connected;
@@ -516,7 +519,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
 
        drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
                           DRM_MODE_CONNECTOR_LVDS);
-       connector->status = vmw_ldu_connector_detect(connector);
+       connector->status = vmw_ldu_connector_detect(connector, true);
 
        drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
                         DRM_MODE_ENCODER_LVDS);
@@ -535,6 +538,10 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
 
 int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
 {
+       struct drm_device *dev = dev_priv->dev;
+       int i;
+       int ret;
+
        if (dev_priv->ldu_priv) {
                DRM_INFO("ldu system already on\n");
                return -EINVAL;
@@ -552,23 +559,24 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
 
        drm_mode_create_dirty_info_property(dev_priv->dev);
 
-       vmw_ldu_init(dev_priv, 0);
-       /* for old hardware without multimon only enable one display */
        if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
-               vmw_ldu_init(dev_priv, 1);
-               vmw_ldu_init(dev_priv, 2);
-               vmw_ldu_init(dev_priv, 3);
-               vmw_ldu_init(dev_priv, 4);
-               vmw_ldu_init(dev_priv, 5);
-               vmw_ldu_init(dev_priv, 6);
-               vmw_ldu_init(dev_priv, 7);
+               for (i = 0; i < VMWGFX_LDU_NUM_DU; ++i)
+                       vmw_ldu_init(dev_priv, i);
+               ret = drm_vblank_init(dev, VMWGFX_LDU_NUM_DU);
+       } else {
+               /* for old hardware without multimon only enable one display */
+               vmw_ldu_init(dev_priv, 0);
+               ret = drm_vblank_init(dev, 1);
        }
 
-       return 0;
+       return ret;
 }
 
 int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
 {
+       struct drm_device *dev = dev_priv->dev;
+
+       drm_vblank_cleanup(dev);
        if (!dev_priv->ldu_priv)
                return -ENOSYS;
 
@@ -610,7 +618,7 @@ int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num,
                        ldu->pref_height = 600;
                        ldu->pref_active = false;
                }
-               con->status = vmw_ldu_connector_detect(con);
+               con->status = vmw_ldu_connector_detect(con, true);
        }
 
        mutex_unlock(&dev->mode_config.mutex);
index 5f2d5df01e5c370acbc1be99772701626419daf5..c8c40e9979dbd21442a5d87cf6ac4cdcad08cfad 100644 (file)
@@ -211,6 +211,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
        cmd->body.cid = cpu_to_le32(res->id);
 
        vmw_fifo_commit(dev_priv, sizeof(*cmd));
+       vmw_3d_resource_dec(dev_priv);
 }
 
 static int vmw_context_init(struct vmw_private *dev_priv,
@@ -247,6 +248,7 @@ static int vmw_context_init(struct vmw_private *dev_priv,
        cmd->body.cid = cpu_to_le32(res->id);
 
        vmw_fifo_commit(dev_priv, sizeof(*cmd));
+       (void) vmw_3d_resource_inc(dev_priv);
        vmw_resource_activate(res, vmw_hw_context_destroy);
        return 0;
 }
@@ -406,6 +408,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
        cmd->body.sid = cpu_to_le32(res->id);
 
        vmw_fifo_commit(dev_priv, sizeof(*cmd));
+       vmw_3d_resource_dec(dev_priv);
 }
 
 void vmw_surface_res_free(struct vmw_resource *res)
@@ -473,6 +476,7 @@ int vmw_surface_init(struct vmw_private *dev_priv,
        }
 
        vmw_fifo_commit(dev_priv, submit_size);
+       (void) vmw_3d_resource_inc(dev_priv);
        vmw_resource_activate(res, vmw_hw_surface_destroy);
        return 0;
 }
index b87569e96b163c04fb35790ef8c457999480e3f3..f366f968155a3ed913ce770a60ca30cbf2f97981 100644 (file)
@@ -598,7 +598,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
        pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
 }
 
-void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
+static void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
 {
        struct vga_device *vgadev;
        unsigned long flags;
index 4267a6fdc277a183cc840342fb5508cadda90815..5925bdcd417dbbf74d878b52fedcbc24a4d4de31 100644 (file)
@@ -237,6 +237,8 @@ static const struct hid_device_id cando_devices[] = {
                        USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
                        USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
+               USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, cando_devices);
index 0c52899be9643d85af5c7ac6be8c87bc67d6e6ce..a0dea3d1296e65ebc9b84e24ec1aeacd69aa59d7 100644 (file)
@@ -1285,10 +1285,14 @@ static const struct hid_device_id hid_blacklist[] = {
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
        { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
@@ -1578,7 +1582,6 @@ static const struct hid_device_id hid_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) },
        { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT)},
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)},
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)},
        { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
index 85c6d13c9ffa9369fca613eda828133b2b69a3e2..c5ae5f1545bd0a18d516edab6f04b3c18d0fca71 100644 (file)
 
 #define USB_VENDOR_ID_ASUS             0x0486
 #define USB_DEVICE_ID_ASUS_T91MT       0x0185
+#define USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO   0x0186
 
 #define USB_VENDOR_ID_ASUSTEK          0x0b05
 #define USB_DEVICE_ID_ASUSTEK_LCM      0x1726
 
 #define USB_VENDOR_ID_BTC              0x046e
 #define USB_DEVICE_ID_BTC_EMPREX_REMOTE        0x5578
+#define USB_DEVICE_ID_BTC_EMPREX_REMOTE_2      0x5577
 
 #define USB_VENDOR_ID_CANDO            0x2087
 #define USB_DEVICE_ID_CANDO_MULTI_TOUCH        0x0a01
 #define USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6 0x0b03
+#define USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6 0x0f01
 
 #define USB_VENDOR_ID_CH               0x068e
 #define USB_DEVICE_ID_CH_PRO_PEDALS    0x00f2
 
 #define USB_VENDOR_ID_CHICONY          0x04f2
 #define USB_DEVICE_ID_CHICONY_TACTICAL_PAD     0x0418
+#define USB_DEVICE_ID_CHICONY_MULTI_TOUCH      0xb19d
 
 #define USB_VENDOR_ID_CIDC             0x1677
 
 
 #define USB_VENDOR_ID_TURBOX           0x062a
 #define USB_DEVICE_ID_TURBOX_KEYBOARD  0x0201
+#define USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART        0x7100
 
 #define USB_VENDOR_ID_TWINHAN          0x6253
 #define USB_DEVICE_ID_TWINHAN_IR_REMOTE        0x0100
 #define USB_VENDOR_ID_UCLOGIC          0x5543
 #define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209    0x0042
 #define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U   0x0003
+#define USB_DEVICE_ID_UCLOGIC_TABLET_KNA5      0x6001
 
 #define USB_VENDOR_ID_VERNIER          0x08f7
 #define USB_DEVICE_ID_VERNIER_LABPRO   0x0001
index e91437c189061cf7c862a74b3b6054f369528af7..ac5421d568f151cd6937f33244740d9f9759fedb 100644 (file)
@@ -239,6 +239,7 @@ static void mosart_remove(struct hid_device *hdev)
 
 static const struct hid_device_id mosart_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, mosart_devices);
index 5771f851f85693a46bd581085e25e09775fbcae5..956ed9ac19d4dee7fc3b259b1453662ec0bd8146 100644 (file)
@@ -64,6 +64,7 @@ static int ts_input_mapping(struct hid_device *hdev, struct hid_input *hi,
 static const struct hid_device_id ts_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
        { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
        { }
 };
index 47d70c523d93474a658bbaa5aa5b1cfb327f194b..a3866b5c0c43da58bceae7bb463a2780846454be 100644 (file)
@@ -109,6 +109,12 @@ static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t
        int ret = 0;
 
        mutex_lock(&minors_lock);
+
+       if (!hidraw_table[minor]) {
+               ret = -ENODEV;
+               goto out;
+       }
+
        dev = hidraw_table[minor]->hid;
 
        if (!dev->hid_output_raw_report) {
@@ -244,6 +250,10 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
 
        mutex_lock(&minors_lock);
        dev = hidraw_table[minor];
+       if (!dev) {
+               ret = -ENODEV;
+               goto out;
+       }
 
        switch (cmd) {
                case HIDIOCGRDESCSIZE:
@@ -317,6 +327,7 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
 
                ret = -ENOTTY;
        }
+out:
        mutex_unlock(&minors_lock);
        return ret;
 }
index b729c02866798c00ae4d2c77eadba097a79d7f73..599041a7f670a9f105e00da3272d79642aefb78c 100644 (file)
@@ -828,6 +828,7 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co
                }
        } else {
                int skipped_report_id = 0;
+               int report_id = buf[0];
                if (buf[0] == 0x0) {
                        /* Don't send the Report ID */
                        buf++;
@@ -837,7 +838,7 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co
                ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
                        HID_REQ_SET_REPORT,
                        USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
-                       ((report_type + 1) << 8) | *buf,
+                       ((report_type + 1) << 8) | report_id,
                        interface->desc.bInterfaceNumber, buf, count,
                        USB_CTRL_SET_TIMEOUT);
                /* count also the report id, if this was a numbered report. */
@@ -1445,6 +1446,11 @@ static const struct hid_device_id hid_usb_table[] = {
        { }
 };
 
+struct usb_interface *usbhid_find_interface(int minor)
+{
+       return usb_find_interface(&hid_driver, minor);
+}
+
 static struct hid_driver hid_usb_driver = {
        .name = "generic-usb",
        .id_table = hid_usb_table,
index 2643d31476213cd41f5d1b39042e83d5a70a63a4..f0260c699adb45ac9d5b11f119c1491a95e04504 100644 (file)
@@ -33,8 +33,10 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD },
        { USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD },
        { USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD },
+       { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
        { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT },
+       { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
@@ -69,6 +71,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT },
+       { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
        { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
 
@@ -77,6 +80,8 @@ static const struct hid_blacklist {
 
        { USB_VENDOR_ID_PI_ENGINEERING, USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL, HID_QUIRK_HIDINPUT_FORCE },
 
+       { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT },
+
        { 0, 0 }
 };
 
index 0a29c51114aaf0d36c64f6c8c25195db5e2f3747..681e620eb95b166ed43ac48c082960ba0f39e211 100644 (file)
@@ -270,7 +270,7 @@ static int hiddev_open(struct inode *inode, struct file *file)
        struct hiddev *hiddev;
        int res;
 
-       intf = usb_find_interface(&hiddev_driver, iminor(inode));
+       intf = usbhid_find_interface(iminor(inode));
        if (!intf)
                return -ENODEV;
        hid = usb_get_intfdata(intf);
index 693fd3e720df41c8ded8ccf15f9575648de77c96..89d2e847dcc671b65f15484b3df34f6c4a6c288d 100644 (file)
@@ -42,6 +42,7 @@ void usbhid_submit_report
 (struct hid_device *hid, struct hid_report *report, unsigned char dir);
 int usbhid_get_power(struct hid_device *hid);
 void usbhid_put_power(struct hid_device *hid);
+struct usb_interface *usbhid_find_interface(int minor);
 
 /* iofl flags */
 #define HID_CTRL_RUNNING       1
index 4d4d09bdec0a7a7cb043725b2fb94dc1fff23dc7..97499d00615aacbddcf5963bcfa08b5f4733a1bb 100644 (file)
@@ -409,7 +409,7 @@ config SENSORS_CORETEMP
 
 config SENSORS_PKGTEMP
        tristate "Intel processor package temperature sensor"
-       depends on X86 && PCI && EXPERIMENTAL
+       depends on X86 && EXPERIMENTAL
        help
          If you say yes here you get support for the package level temperature
          sensor inside your CPU. Check documentation/driver for details.
index 15c1a9616af33ba13ae8a79823cb7d30729c42fe..0683e6be662cfe28e803cac2b6ddd6501f06ef29 100644 (file)
@@ -79,7 +79,7 @@ struct adm1031_data {
        int chip_type;
        char valid;             /* !=0 if following fields are valid */
        unsigned long last_updated;     /* In jiffies */
-       unsigned int update_rate;       /* In milliseconds */
+       unsigned int update_interval;   /* In milliseconds */
        /* The chan_select_table contains the possible configurations for
         * auto fan control.
         */
@@ -743,23 +743,23 @@ static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 12);
 static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 13);
 static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14);
 
-/* Update Rate */
-static const unsigned int update_rates[] = {
+/* Update Interval */
+static const unsigned int update_intervals[] = {
        16000, 8000, 4000, 2000, 1000, 500, 250, 125,
 };
 
-static ssize_t show_update_rate(struct device *dev,
-                               struct device_attribute *attr, char *buf)
+static ssize_t show_update_interval(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct adm1031_data *data = i2c_get_clientdata(client);
 
-       return sprintf(buf, "%u\n", data->update_rate);
+       return sprintf(buf, "%u\n", data->update_interval);
 }
 
-static ssize_t set_update_rate(struct device *dev,
-                              struct device_attribute *attr,
-                              const char *buf, size_t count)
+static ssize_t set_update_interval(struct device *dev,
+                                  struct device_attribute *attr,
+                                  const char *buf, size_t count)
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct adm1031_data *data = i2c_get_clientdata(client);
@@ -771,12 +771,15 @@ static ssize_t set_update_rate(struct device *dev,
        if (err)
                return err;
 
-       /* find the nearest update rate from the table */
-       for (i = 0; i < ARRAY_SIZE(update_rates) - 1; i++) {
-               if (val >= update_rates[i])
+       /*
+        * Find the nearest update interval from the table.
+        * Use it to determine the matching update rate.
+        */
+       for (i = 0; i < ARRAY_SIZE(update_intervals) - 1; i++) {
+               if (val >= update_intervals[i])
                        break;
        }
-       /* if not found, we point to the last entry (lowest update rate) */
+       /* if not found, we point to the last entry (lowest update interval) */
 
        /* set the new update rate while preserving other settings */
        reg = adm1031_read_value(client, ADM1031_REG_FAN_FILTER);
@@ -785,14 +788,14 @@ static ssize_t set_update_rate(struct device *dev,
        adm1031_write_value(client, ADM1031_REG_FAN_FILTER, reg);
 
        mutex_lock(&data->update_lock);
-       data->update_rate = update_rates[i];
+       data->update_interval = update_intervals[i];
        mutex_unlock(&data->update_lock);
 
        return count;
 }
 
-static DEVICE_ATTR(update_rate, S_IRUGO | S_IWUSR, show_update_rate,
-                  set_update_rate);
+static DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR, show_update_interval,
+                  set_update_interval);
 
 static struct attribute *adm1031_attributes[] = {
        &sensor_dev_attr_fan1_input.dev_attr.attr,
@@ -830,7 +833,7 @@ static struct attribute *adm1031_attributes[] = {
 
        &sensor_dev_attr_auto_fan1_min_pwm.dev_attr.attr,
 
-       &dev_attr_update_rate.attr,
+       &dev_attr_update_interval.attr,
        &dev_attr_alarms.attr,
 
        NULL
@@ -981,7 +984,8 @@ static void adm1031_init_client(struct i2c_client *client)
        mask = ADM1031_UPDATE_RATE_MASK;
        read_val = adm1031_read_value(client, ADM1031_REG_FAN_FILTER);
        i = (read_val & mask) >> ADM1031_UPDATE_RATE_SHIFT;
-       data->update_rate = update_rates[i];
+       /* Save it as update interval */
+       data->update_interval = update_intervals[i];
 }
 
 static struct adm1031_data *adm1031_update_device(struct device *dev)
@@ -993,7 +997,8 @@ static struct adm1031_data *adm1031_update_device(struct device *dev)
 
        mutex_lock(&data->update_lock);
 
-       next_update = data->last_updated + msecs_to_jiffies(data->update_rate);
+       next_update = data->last_updated
+         + msecs_to_jiffies(data->update_interval);
        if (time_after(jiffies, next_update) || !data->valid) {
 
                dev_dbg(&client->dev, "Starting adm1031 update\n");
index de8111114f469ec21567a5781349bb0d7f7cbc98..a23b17a78ace8f42cb114b30f593b9019a24a982 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/pci.h>
 #include <asm/msr.h>
 #include <asm/processor.h>
+#include <asm/smp.h>
 
 #define DRVNAME        "coretemp"
 
@@ -423,9 +424,18 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
        int err;
        struct platform_device *pdev;
        struct pdev_entry *pdev_entry;
-#ifdef CONFIG_SMP
        struct cpuinfo_x86 *c = &cpu_data(cpu);
-#endif
+
+       /*
+        * CPUID.06H.EAX[0] indicates whether the CPU has thermal
+        * sensors. We check this bit only, all the early CPUs
+        * without thermal sensors will be filtered out.
+        */
+       if (!cpu_has(c, X86_FEATURE_DTS)) {
+               printk(KERN_INFO DRVNAME ": CPU (model=0x%x)"
+                      " has no thermal sensor.\n", c->x86_model);
+               return 0;
+       }
 
        mutex_lock(&pdev_list_mutex);
 
@@ -482,14 +492,22 @@ exit:
 
 static void coretemp_device_remove(unsigned int cpu)
 {
-       struct pdev_entry *p, *n;
+       struct pdev_entry *p;
+       unsigned int i;
+
        mutex_lock(&pdev_list_mutex);
-       list_for_each_entry_safe(p, n, &pdev_list, list) {
-               if (p->cpu == cpu) {
-                       platform_device_unregister(p->pdev);
-                       list_del(&p->list);
-                       kfree(p);
-               }
+       list_for_each_entry(p, &pdev_list, list) {
+               if (p->cpu != cpu)
+                       continue;
+
+               platform_device_unregister(p->pdev);
+               list_del(&p->list);
+               mutex_unlock(&pdev_list_mutex);
+               kfree(p);
+               for_each_cpu(i, cpu_sibling_mask(cpu))
+                       if (i != cpu && !coretemp_device_add(i))
+                               break;
+               return;
        }
        mutex_unlock(&pdev_list_mutex);
 }
@@ -527,30 +545,21 @@ static int __init coretemp_init(void)
        if (err)
                goto exit;
 
-       for_each_online_cpu(i) {
-               struct cpuinfo_x86 *c = &cpu_data(i);
-               /*
-                * CPUID.06H.EAX[0] indicates whether the CPU has thermal
-                * sensors. We check this bit only, all the early CPUs
-                * without thermal sensors will be filtered out.
-                */
-               if (c->cpuid_level >= 6 && (cpuid_eax(0x06) & 0x01))
-                       coretemp_device_add(i);
-               else {
-                       printk(KERN_INFO DRVNAME ": CPU (model=0x%x)"
-                               " has no thermal sensor.\n", c->x86_model);
-               }
-       }
+       for_each_online_cpu(i)
+               coretemp_device_add(i);
+
+#ifndef CONFIG_HOTPLUG_CPU
        if (list_empty(&pdev_list)) {
                err = -ENODEV;
                goto exit_driver_unreg;
        }
+#endif
 
        register_hotcpu_notifier(&coretemp_cpu_notifier);
        return 0;
 
-exit_driver_unreg:
 #ifndef CONFIG_HOTPLUG_CPU
+exit_driver_unreg:
        platform_driver_unregister(&coretemp_driver);
 #endif
 exit:
index 5b58b20dead1fa92b79af5c05036d45de8bf88ca..8dee3f38fdfb27e1c80a9fb36add60e414aeb805 100644 (file)
@@ -308,7 +308,6 @@ static int emc1403_probe(struct i2c_client *client,
        res = sysfs_create_group(&client->dev.kobj, &m_thermal_gr);
        if (res) {
                dev_warn(&client->dev, "create group failed\n");
-               hwmon_device_unregister(data->hwmon_dev);
                goto thermal_error1;
        }
        data->hwmon_dev = hwmon_device_register(&client->dev);
index 537841ef44b99d179318f7510dbf28dddedb0ed8..75afb3b0e0763c184a1b22cdc163ef32d10d2969 100644 (file)
@@ -111,7 +111,7 @@ static struct platform_device *f71882fg_pdev;
 /* Super-I/O Function prototypes */
 static inline int superio_inb(int base, int reg);
 static inline int superio_inw(int base, int reg);
-static inline void superio_enter(int base);
+static inline int superio_enter(int base);
 static inline void superio_select(int base, int ld);
 static inline void superio_exit(int base);
 
@@ -861,11 +861,20 @@ static int superio_inw(int base, int reg)
        return val;
 }
 
-static inline void superio_enter(int base)
+static inline int superio_enter(int base)
 {
+       /* Don't step on other drivers' I/O space by accident */
+       if (!request_muxed_region(base, 2, DRVNAME)) {
+               printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n",
+                               base);
+               return -EBUSY;
+       }
+
        /* according to the datasheet the key must be send twice! */
        outb(SIO_UNLOCK_KEY, base);
        outb(SIO_UNLOCK_KEY, base);
+
+       return 0;
 }
 
 static inline void superio_select(int base, int ld)
@@ -877,6 +886,7 @@ static inline void superio_select(int base, int ld)
 static inline void superio_exit(int base)
 {
        outb(SIO_LOCK_KEY, base);
+       release_region(base, 2);
 }
 
 static inline int fan_from_reg(u16 reg)
@@ -2175,21 +2185,15 @@ static int f71882fg_remove(struct platform_device *pdev)
 static int __init f71882fg_find(int sioaddr, unsigned short *address,
        struct f71882fg_sio_data *sio_data)
 {
-       int err = -ENODEV;
        u16 devid;
-
-       /* Don't step on other drivers' I/O space by accident */
-       if (!request_region(sioaddr, 2, DRVNAME)) {
-               printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n",
-                               (int)sioaddr);
-               return -EBUSY;
-       }
-
-       superio_enter(sioaddr);
+       int err = superio_enter(sioaddr);
+       if (err)
+               return err;
 
        devid = superio_inw(sioaddr, SIO_REG_MANID);
        if (devid != SIO_FINTEK_ID) {
                pr_debug(DRVNAME ": Not a Fintek device\n");
+               err = -ENODEV;
                goto exit;
        }
 
@@ -2213,6 +2217,7 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
        default:
                printk(KERN_INFO DRVNAME ": Unsupported Fintek device: %04x\n",
                       (unsigned int)devid);
+               err = -ENODEV;
                goto exit;
        }
 
@@ -2223,12 +2228,14 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
 
        if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) {
                printk(KERN_WARNING DRVNAME ": Device not activated\n");
+               err = -ENODEV;
                goto exit;
        }
 
        *address = superio_inw(sioaddr, SIO_REG_ADDR);
        if (*address == 0) {
                printk(KERN_WARNING DRVNAME ": Base address not set\n");
+               err = -ENODEV;
                goto exit;
        }
        *address &= ~(REGION_LENGTH - 1);       /* Ignore 3 LSB */
@@ -2239,7 +2246,6 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
                (int)superio_inb(sioaddr, SIO_REG_DEVREV));
 exit:
        superio_exit(sioaddr);
-       release_region(sioaddr, 2);
        return err;
 }
 
index 0f58ecc5334d941cb4114a3681e2e5a44893280e..9638d58f99fdb0e56f15832f824dc6096d99d3ea 100644 (file)
@@ -79,7 +79,7 @@ enum chips { f75373, f75375 };
 #define F75375_REG_PWM2_DROP_DUTY      0x6C
 
 #define FAN_CTRL_LINEAR(nr)            (4 + nr)
-#define FAN_CTRL_MODE(nr)              (5 + ((nr) * 2))
+#define FAN_CTRL_MODE(nr)              (4 + ((nr) * 2))
 
 /*
  * Data structures and manipulation thereof
@@ -298,7 +298,7 @@ static int set_pwm_enable_direct(struct i2c_client *client, int nr, int val)
                return -EINVAL;
 
        fanmode = f75375_read8(client, F75375_REG_FAN_TIMER);
-       fanmode = ~(3 << FAN_CTRL_MODE(nr));
+       fanmode &= ~(3 << FAN_CTRL_MODE(nr));
 
        switch (val) {
        case 0: /* Full speed */
@@ -350,7 +350,7 @@ static ssize_t set_pwm_mode(struct device *dev, struct device_attribute *attr,
 
        mutex_lock(&data->update_lock);
        conf = f75375_read8(client, F75375_REG_CONFIG1);
-       conf = ~(1 << FAN_CTRL_LINEAR(nr));
+       conf &= ~(1 << FAN_CTRL_LINEAR(nr));
 
        if (val == 0)
                conf |= (1 << FAN_CTRL_LINEAR(nr)) ;
index 7580f55e67e3cf1560437b428d9fb1e5b8e411d0..36e95753223059ab0e1b5ed8490fe7364b39673e 100644 (file)
@@ -221,6 +221,8 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
        AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
        AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
        AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
+       AXIS_DMI_MATCH("HPB532x", "HP ProBook 532", y_inverted),
+       AXIS_DMI_MATCH("Mini5102", "HP Mini 5102", xy_rotated_left_usd),
        { NULL, }
 /* Laptop models without axis info (yet):
  * "NC6910" "HP Compaq 6910"
index 6138f036b159956dbc4eec8282636db794485527..fc591ae53107da8481a2ab5e02f53117ac6e2471 100644 (file)
@@ -277,7 +277,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
        wake_up_interruptible(&lis3_dev.misc_wait);
        kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
 out:
-       if (lis3_dev.whoami == WAI_8B && lis3_dev.idev &&
+       if (lis3_dev.pdata && lis3_dev.whoami == WAI_8B && lis3_dev.idev &&
            lis3_dev.idev->input->users)
                return IRQ_WAKE_THREAD;
        return IRQ_HANDLED;
@@ -718,7 +718,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
         * io-apic is not configurable (and generates a warning) but I keep it
         * in case of support for other hardware.
         */
-       if (dev->whoami == WAI_8B)
+       if (dev->pdata && dev->whoami == WAI_8B)
                thread_fn = lis302dl_interrupt_thread1_8b;
        else
                thread_fn = NULL;
index dc1f5402c1d7ddcdaf4396fe177a689550b9ac2b..8e5933b72d1956c2a931ea7176b1c589e3066968 100644 (file)
@@ -121,7 +121,7 @@ static int lis3lv02d_i2c_suspend(struct i2c_client *client, pm_message_t mesg)
 {
        struct lis3lv02d *lis3 = i2c_get_clientdata(client);
 
-       if (!lis3->pdata->wakeup_flags)
+       if (!lis3->pdata || !lis3->pdata->wakeup_flags)
                lis3lv02d_poweroff(lis3);
        return 0;
 }
@@ -130,7 +130,7 @@ static int lis3lv02d_i2c_resume(struct i2c_client *client)
 {
        struct lis3lv02d *lis3 = i2c_get_clientdata(client);
 
-       if (!lis3->pdata->wakeup_flags)
+       if (!lis3->pdata || !lis3->pdata->wakeup_flags)
                lis3lv02d_poweron(lis3);
        return 0;
 }
index 82b16808a274c1e0045692fcfc32dcfdb641ec31..b9be5e3a22b3825bc640d9c038c8c724e888c5b4 100644 (file)
@@ -92,7 +92,7 @@ static int lis3lv02d_spi_suspend(struct spi_device *spi, pm_message_t mesg)
 {
        struct lis3lv02d *lis3 = spi_get_drvdata(spi);
 
-       if (!lis3->pdata->wakeup_flags)
+       if (!lis3->pdata || !lis3->pdata->wakeup_flags)
                lis3lv02d_poweroff(&lis3_dev);
 
        return 0;
@@ -102,7 +102,7 @@ static int lis3lv02d_spi_resume(struct spi_device *spi)
 {
        struct lis3lv02d *lis3 = spi_get_drvdata(spi);
 
-       if (!lis3->pdata->wakeup_flags)
+       if (!lis3->pdata || !lis3->pdata->wakeup_flags)
                lis3lv02d_poweron(lis3);
 
        return 0;
index 94741d42112da02ca902b2e87beacbe96aab3cd3..464340f25496402dd15a69e2d59281544e267c37 100644 (file)
@@ -91,7 +91,7 @@ static struct lm95241_data *lm95241_update_device(struct device *dev);
 struct lm95241_data {
        struct device *hwmon_dev;
        struct mutex update_lock;
-       unsigned long last_updated, rate; /* in jiffies */
+       unsigned long last_updated, interval; /* in jiffies */
        char valid; /* zero until following fields are valid */
        /* registers values */
        u8 local_h, local_l; /* local */
@@ -114,23 +114,23 @@ show_temp(local);
 show_temp(remote1);
 show_temp(remote2);
 
-static ssize_t show_rate(struct device *dev, struct device_attribute *attr,
+static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
                         char *buf)
 {
        struct lm95241_data *data = lm95241_update_device(dev);
 
-       snprintf(buf, PAGE_SIZE - 1, "%lu\n", 1000 * data->rate / HZ);
+       snprintf(buf, PAGE_SIZE - 1, "%lu\n", 1000 * data->interval / HZ);
        return strlen(buf);
 }
 
-static ssize_t set_rate(struct device *dev, struct device_attribute *attr,
+static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
                        const char *buf, size_t count)
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct lm95241_data *data = i2c_get_clientdata(client);
 
-       strict_strtol(buf, 10, &data->rate);
-       data->rate = data->rate * HZ / 1000;
+       strict_strtol(buf, 10, &data->interval);
+       data->interval = data->interval * HZ / 1000;
 
        return count;
 }
@@ -286,7 +286,8 @@ static DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_min1, set_min1);
 static DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_min2, set_min2);
 static DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_max1, set_max1);
 static DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_max2, set_max2);
-static DEVICE_ATTR(rate, S_IWUSR | S_IRUGO, show_rate, set_rate);
+static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_interval,
+                  set_interval);
 
 static struct attribute *lm95241_attributes[] = {
        &dev_attr_temp1_input.attr,
@@ -298,7 +299,7 @@ static struct attribute *lm95241_attributes[] = {
        &dev_attr_temp3_min.attr,
        &dev_attr_temp2_max.attr,
        &dev_attr_temp3_max.attr,
-       &dev_attr_rate.attr,
+       &dev_attr_update_interval.attr,
        NULL
 };
 
@@ -376,7 +377,7 @@ static void lm95241_init_client(struct i2c_client *client)
 {
        struct lm95241_data *data = i2c_get_clientdata(client);
 
-       data->rate = HZ;    /* 1 sec default */
+       data->interval = HZ;    /* 1 sec default */
        data->valid = 0;
        data->config = CFG_CR0076;
        data->model = 0;
@@ -410,7 +411,7 @@ static struct lm95241_data *lm95241_update_device(struct device *dev)
 
        mutex_lock(&data->update_lock);
 
-       if (time_after(jiffies, data->last_updated + data->rate) ||
+       if (time_after(jiffies, data->last_updated + data->interval) ||
            !data->valid) {
                dev_dbg(&client->dev, "Updating lm95241 data.\n");
                data->local_h =
index 74157fcda6edf4bc569469db4ae1de9efa5b882d..f11903936c8b3a51c3f8dddcb20318185952fa79 100644 (file)
@@ -33,7 +33,6 @@
 #include <linux/list.h>
 #include <linux/platform_device.h>
 #include <linux/cpu.h>
-#include <linux/pci.h>
 #include <asm/msr.h>
 #include <asm/processor.h>
 
@@ -224,7 +223,7 @@ static int __devinit pkgtemp_probe(struct platform_device *pdev)
 
        err = sysfs_create_group(&pdev->dev.kobj, &pkgtemp_group);
        if (err)
-               goto exit_free;
+               goto exit_dev;
 
        data->hwmon_dev = hwmon_device_register(&pdev->dev);
        if (IS_ERR(data->hwmon_dev)) {
@@ -238,6 +237,8 @@ static int __devinit pkgtemp_probe(struct platform_device *pdev)
 
 exit_class:
        sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group);
+exit_dev:
+       device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr);
 exit_free:
        kfree(data);
 exit:
@@ -250,6 +251,7 @@ static int __devexit pkgtemp_remove(struct platform_device *pdev)
 
        hwmon_device_unregister(data->hwmon_dev);
        sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group);
+       device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr);
        platform_set_drvdata(pdev, NULL);
        kfree(data);
        return 0;
@@ -281,9 +283,10 @@ static int __cpuinit pkgtemp_device_add(unsigned int cpu)
        int err;
        struct platform_device *pdev;
        struct pdev_entry *pdev_entry;
-#ifdef CONFIG_SMP
        struct cpuinfo_x86 *c = &cpu_data(cpu);
-#endif
+
+       if (!cpu_has(c, X86_FEATURE_PTS))
+               return 0;
 
        mutex_lock(&pdev_list_mutex);
 
@@ -339,17 +342,18 @@ exit:
 #ifdef CONFIG_HOTPLUG_CPU
 static void pkgtemp_device_remove(unsigned int cpu)
 {
-       struct pdev_entry *p, *n;
+       struct pdev_entry *p;
        unsigned int i;
        int err;
 
        mutex_lock(&pdev_list_mutex);
-       list_for_each_entry_safe(p, n, &pdev_list, list) {
+       list_for_each_entry(p, &pdev_list, list) {
                if (p->cpu != cpu)
                        continue;
 
                platform_device_unregister(p->pdev);
                list_del(&p->list);
+               mutex_unlock(&pdev_list_mutex);
                kfree(p);
                for_each_cpu(i, cpu_core_mask(cpu)) {
                        if (i != cpu) {
@@ -358,7 +362,7 @@ static void pkgtemp_device_remove(unsigned int cpu)
                                        break;
                        }
                }
-               break;
+               return;
        }
        mutex_unlock(&pdev_list_mutex);
 }
@@ -399,11 +403,6 @@ static int __init pkgtemp_init(void)
                goto exit;
 
        for_each_online_cpu(i) {
-               struct cpuinfo_x86 *c = &cpu_data(i);
-
-               if (!cpu_has(c, X86_FEATURE_PTS))
-                       continue;
-
                err = pkgtemp_device_add(i);
                if (err)
                        goto exit_devices_unreg;
index e96e69dd36fb4b4faba43ffad13b53815d06bc65..072c58008a633b713e1f68fd3a522572a41842d1 100644 (file)
@@ -127,6 +127,7 @@ superio_enter(int ioreg)
 static inline void
 superio_exit(int ioreg)
 {
+       outb(0xaa, ioreg);
        outb(0x02, ioreg);
        outb(0x02, ioreg + 1);
 }
index f7bd2613ceccbc69e4d04b69432fdc40638b2555..f2de3be35df36265cdfca5d899097b161ef10171 100644 (file)
@@ -677,6 +677,11 @@ static int __devinit cpm_i2c_probe(struct platform_device *ofdev,
        dev_dbg(&ofdev->dev, "hw routines for %s registered.\n",
                cpm->adap.name);
 
+       /*
+        * register OF I2C devices
+        */
+       of_i2c_register_devices(&cpm->adap);
+
        return 0;
 out_shut:
        cpm_i2c_shutdown(cpm);
index 2222c87876b97bc711b6d739fd4a82deef7db330..5795c8398c7c3a25af82319a0ce1d46741def8b2 100644 (file)
@@ -331,21 +331,16 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
        INIT_COMPLETION(dev->cmd_complete);
        dev->cmd_err = 0;
 
-       /* Take I2C out of reset, configure it as master and set the
-        * start bit */
-       flag = DAVINCI_I2C_MDR_IRS | DAVINCI_I2C_MDR_MST | DAVINCI_I2C_MDR_STT;
+       /* Take I2C out of reset and configure it as master */
+       flag = DAVINCI_I2C_MDR_IRS | DAVINCI_I2C_MDR_MST;
 
        /* if the slave address is ten bit address, enable XA bit */
        if (msg->flags & I2C_M_TEN)
                flag |= DAVINCI_I2C_MDR_XA;
        if (!(msg->flags & I2C_M_RD))
                flag |= DAVINCI_I2C_MDR_TRX;
-       if (stop)
-               flag |= DAVINCI_I2C_MDR_STP;
-       if (msg->len == 0) {
+       if (msg->len == 0)
                flag |= DAVINCI_I2C_MDR_RM;
-               flag &= ~DAVINCI_I2C_MDR_STP;
-       }
 
        /* Enable receive or transmit interrupts */
        w = davinci_i2c_read_reg(dev, DAVINCI_I2C_IMR_REG);
@@ -357,7 +352,11 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
 
        dev->terminate = 0;
 
-       /* write the data into mode register */
+       /*
+        * Write mode register first as needed for correct behaviour
+        * on OMAP-L138, but don't set STT yet to avoid a race with XRDY
+        * occuring before we have loaded DXR
+        */
        davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
 
        /*
@@ -365,12 +364,19 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
         * because transmit-data-ready interrupt can come before
         * NACK-interrupt during sending of previous message and
         * ICDXR may have wrong data
+        * It also saves us one interrupt, slightly faster
         */
        if ((!(msg->flags & I2C_M_RD)) && dev->buf_len) {
                davinci_i2c_write_reg(dev, DAVINCI_I2C_DXR_REG, *dev->buf++);
                dev->buf_len--;
        }
 
+       /* Set STT to begin transmit now DXR is loaded */
+       flag |= DAVINCI_I2C_MDR_STT;
+       if (stop && msg->len != 0)
+               flag |= DAVINCI_I2C_MDR_STP;
+       davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
+
        r = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
                                                      dev->adapter.timeout);
        if (r == 0) {
index 43ca32fddde2b77309c92533f9c2c50e44719879..89eedf45d30ed877e6abbc76c66507372c4a3f83 100644 (file)
@@ -761,6 +761,9 @@ static int __devinit iic_probe(struct platform_device *ofdev,
        dev_info(&ofdev->dev, "using %s mode\n",
                 dev->fast_mode ? "fast (400 kHz)" : "standard (100 kHz)");
 
+       /* Now register all the child nodes */
+       of_i2c_register_devices(adap);
+
        return 0;
 
 error_cleanup:
index d1ff9408dc1f2d68cdbe305c6777ba259eed64e4..4c2a62b75b5cf188dd896dff100c80bfe253aded 100644 (file)
@@ -159,15 +159,9 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy)
 
 static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx)
 {
-       int result;
-
-       result = wait_event_interruptible_timeout(i2c_imx->queue,
-               i2c_imx->i2csr & I2SR_IIF, HZ / 10);
+       wait_event_timeout(i2c_imx->queue, i2c_imx->i2csr & I2SR_IIF, HZ / 10);
 
-       if (unlikely(result < 0)) {
-               dev_dbg(&i2c_imx->adapter.dev, "<%s> result < 0\n", __func__);
-               return result;
-       } else if (unlikely(!(i2c_imx->i2csr & I2SR_IIF))) {
+       if (unlikely(!(i2c_imx->i2csr & I2SR_IIF))) {
                dev_dbg(&i2c_imx->adapter.dev, "<%s> Timeout\n", __func__);
                return -ETIMEDOUT;
        }
@@ -295,7 +289,7 @@ static irqreturn_t i2c_imx_isr(int irq, void *dev_id)
                i2c_imx->i2csr = temp;
                temp &= ~I2SR_IIF;
                writeb(temp, i2c_imx->base + IMX_I2C_I2SR);
-               wake_up_interruptible(&i2c_imx->queue);
+               wake_up(&i2c_imx->queue);
                return IRQ_HANDLED;
        }
 
index a1c419a716af8d24f4a84a680cc93123b6c8bff9..b74e6dc6886c71ed5ebe3e02f219a68f8f0a2df8 100644 (file)
@@ -632,6 +632,7 @@ static int __devinit fsl_i2c_probe(struct platform_device *op,
                dev_err(i2c->dev, "failed to add adapter\n");
                goto fail_add;
        }
+       of_i2c_register_devices(&i2c->adap);
 
        return result;
 
index 0e9f85d0a835718dac97ecd52ff270f327d84136..56dbe54e88118a3fb7b112da16e11ccd5bdbc9fb 100644 (file)
@@ -218,7 +218,7 @@ static int octeon_i2c_wait(struct octeon_i2c *i2c)
                return result;
        } else if (result == 0) {
                dev_dbg(i2c->dev, "%s: timeout\n", __func__);
-               result = -ETIMEDOUT;
+               return -ETIMEDOUT;
        }
 
        return 0;
index 7674efb553786e4e2a639c5a5b263362564b626b..b33c78586bfccf815d9322df1d561b7bec5797b5 100644 (file)
@@ -680,6 +680,8 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
 
        if (r == 0)
                r = num;
+
+       omap_i2c_wait_for_bb(dev);
 out:
        omap_i2c_idle(dev);
        return r;
index bbd77603a4173b8f29aa1353ef1e47376c594196..29933f87d8fa8fdc31ffdd38c4ff905fddd7774d 100644 (file)
@@ -71,8 +71,8 @@ static int pca_isa_readbyte(void *pd, int reg)
 
 static int pca_isa_waitforcompletion(void *pd)
 {
-       long ret = ~0;
        unsigned long timeout;
+       long ret;
 
        if (irq > -1) {
                ret = wait_event_timeout(pca_wait,
@@ -81,11 +81,15 @@ static int pca_isa_waitforcompletion(void *pd)
        } else {
                /* Do polling */
                timeout = jiffies + pca_isa_ops.timeout;
-               while (((pca_isa_readbyte(pd, I2C_PCA_CON)
-                               & I2C_PCA_CON_SI) == 0)
-                               && (ret = time_before(jiffies, timeout)))
+               do {
+                       ret = time_before(jiffies, timeout);
+                       if (pca_isa_readbyte(pd, I2C_PCA_CON)
+                                       & I2C_PCA_CON_SI)
+                               break;
                        udelay(100);
+               } while (ret);
        }
+
        return ret > 0;
 }
 
index ef5c78487eb779c36fd5b98f3fd8d0727c34494e..5f6d7f89e2252d1a4806a3be9e6368c212ceed48 100644 (file)
@@ -80,8 +80,8 @@ static void i2c_pca_pf_writebyte32(void *pd, int reg, int val)
 static int i2c_pca_pf_waitforcompletion(void *pd)
 {
        struct i2c_pca_pf_data *i2c = pd;
-       long ret = ~0;
        unsigned long timeout;
+       long ret;
 
        if (i2c->irq) {
                ret = wait_event_timeout(i2c->wait,
@@ -90,10 +90,13 @@ static int i2c_pca_pf_waitforcompletion(void *pd)
        } else {
                /* Do polling */
                timeout = jiffies + i2c->adap.timeout;
-               while (((i2c->algo_data.read_byte(i2c, I2C_PCA_CON)
-                               & I2C_PCA_CON_SI) == 0)
-                               && (ret = time_before(jiffies, timeout)))
+               do {
+                       ret = time_before(jiffies, timeout);
+                       if (i2c->algo_data.read_byte(i2c, I2C_PCA_CON)
+                                       & I2C_PCA_CON_SI)
+                               break;
                        udelay(100);
+               } while (ret);
        }
 
        return ret > 0;
index 72902e0bbfa79a48caaf2193420d4b8712af1e18..bf831bf8158741a9f857eb541afc1f3a48d38e52 100644 (file)
@@ -662,8 +662,8 @@ static int s3c24xx_i2c_clockrate(struct s3c24xx_i2c *i2c, unsigned int *got)
                unsigned long sda_delay;
 
                if (pdata->sda_delay) {
-                       sda_delay = (freq / 1000) * pdata->sda_delay;
-                       sda_delay /= 1000000;
+                       sda_delay = clkin * pdata->sda_delay;
+                       sda_delay = DIV_ROUND_UP(sda_delay, 1000000);
                        sda_delay = DIV_ROUND_UP(sda_delay, 5);
                        if (sda_delay > 3)
                                sda_delay = 3;
index 6649176de940572a317b2744bd1d4e393c4bbd04..bea4c5021d26cb5b4c92e9b58cabeda2b882023c 100644 (file)
@@ -32,7 +32,6 @@
 #include <linux/init.h>
 #include <linux/idr.h>
 #include <linux/mutex.h>
-#include <linux/of_i2c.h>
 #include <linux/of_device.h>
 #include <linux/completion.h>
 #include <linux/hardirq.h>
@@ -197,11 +196,12 @@ static int i2c_device_pm_suspend(struct device *dev)
 {
        const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 
-       if (pm_runtime_suspended(dev))
-               return 0;
-
-       if (pm)
-               return pm->suspend ? pm->suspend(dev) : 0;
+       if (pm) {
+               if (pm_runtime_suspended(dev))
+                       return 0;
+               else
+                       return pm->suspend ? pm->suspend(dev) : 0;
+       }
 
        return i2c_legacy_suspend(dev, PMSG_SUSPEND);
 }
@@ -216,12 +216,6 @@ static int i2c_device_pm_resume(struct device *dev)
        else
                ret = i2c_legacy_resume(dev);
 
-       if (!ret) {
-               pm_runtime_disable(dev);
-               pm_runtime_set_active(dev);
-               pm_runtime_enable(dev);
-       }
-
        return ret;
 }
 
@@ -229,11 +223,12 @@ static int i2c_device_pm_freeze(struct device *dev)
 {
        const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 
-       if (pm_runtime_suspended(dev))
-               return 0;
-
-       if (pm)
-               return pm->freeze ? pm->freeze(dev) : 0;
+       if (pm) {
+               if (pm_runtime_suspended(dev))
+                       return 0;
+               else
+                       return pm->freeze ? pm->freeze(dev) : 0;
+       }
 
        return i2c_legacy_suspend(dev, PMSG_FREEZE);
 }
@@ -242,11 +237,12 @@ static int i2c_device_pm_thaw(struct device *dev)
 {
        const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 
-       if (pm_runtime_suspended(dev))
-               return 0;
-
-       if (pm)
-               return pm->thaw ? pm->thaw(dev) : 0;
+       if (pm) {
+               if (pm_runtime_suspended(dev))
+                       return 0;
+               else
+                       return pm->thaw ? pm->thaw(dev) : 0;
+       }
 
        return i2c_legacy_resume(dev);
 }
@@ -255,11 +251,12 @@ static int i2c_device_pm_poweroff(struct device *dev)
 {
        const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 
-       if (pm_runtime_suspended(dev))
-               return 0;
-
-       if (pm)
-               return pm->poweroff ? pm->poweroff(dev) : 0;
+       if (pm) {
+               if (pm_runtime_suspended(dev))
+                       return 0;
+               else
+                       return pm->poweroff ? pm->poweroff(dev) : 0;
+       }
 
        return i2c_legacy_suspend(dev, PMSG_HIBERNATE);
 }
@@ -876,9 +873,6 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
        if (adap->nr < __i2c_first_dynamic_bus_num)
                i2c_scan_static_board_info(adap);
 
-       /* Register devices from the device tree */
-       of_i2c_register_devices(adap);
-
        /* Notify drivers */
        mutex_lock(&core_lock);
        bus_for_each_drv(&i2c_bus_type, NULL, adap, __process_new_adapter);
index 4c3d1bfec0c5b450fbbe68014e1c15fe9e29c454..068cef0a987aa672566d986eb7353670d533fdd6 100644 (file)
@@ -1444,14 +1444,6 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
                        ide_acpi_port_init_devices(hwif);
        }
 
-       ide_host_for_each_port(i, hwif, host) {
-               if (hwif == NULL)
-                       continue;
-
-               if (hwif->present)
-                       hwif_register_devices(hwif);
-       }
-
        ide_host_for_each_port(i, hwif, host) {
                if (hwif == NULL)
                        continue;
@@ -1459,8 +1451,10 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
                ide_sysfs_register_port(hwif);
                ide_proc_register_port(hwif);
 
-               if (hwif->present)
+               if (hwif->present) {
                        ide_proc_port_register_devices(hwif);
+                       hwif_register_devices(hwif);
+               }
        }
 
        return j ? 0 : -1;
old mode 100755 (executable)
new mode 100644 (file)
index a10152b..cb3ccf3
 #include <linux/hrtimer.h>     /* ktime_get_real() */
 #include <trace/events/power.h>
 #include <linux/sched.h>
+#include <asm/mwait.h>
 
 #define INTEL_IDLE_VERSION "0.4"
 #define PREFIX "intel_idle: "
 
-#define MWAIT_SUBSTATE_MASK    (0xf)
-#define MWAIT_CSTATE_MASK      (0xf)
-#define MWAIT_SUBSTATE_SIZE    (4)
-#define MWAIT_MAX_NUM_CSTATES  8
-#define CPUID_MWAIT_LEAF (5)
-#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
-#define CPUID5_ECX_INTERRUPT_BREAK     (0x2)
-
 static struct cpuidle_driver intel_idle_driver = {
        .name = "intel_idle",
        .owner = THIS_MODULE,
@@ -83,7 +76,7 @@ static unsigned int mwait_substates;
 /* Reliable LAPIC Timer States, bit 1 for C1 etc.  */
 static unsigned int lapic_timer_reliable_states;
 
-static struct cpuidle_device *intel_idle_cpuidle_devices;
+static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
 static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
 
 static struct cpuidle_state *cpuidle_state_table;
@@ -108,7 +101,7 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
                .name = "NHM-C3",
                .desc = "MWAIT 0x10",
                .driver_data = (void *) 0x10,
-               .flags = CPUIDLE_FLAG_TIME_VALID,
+               .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
                .exit_latency = 20,
                .power_usage = 500,
                .target_residency = 80,
@@ -117,7 +110,7 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
                .name = "NHM-C6",
                .desc = "MWAIT 0x20",
                .driver_data = (void *) 0x20,
-               .flags = CPUIDLE_FLAG_TIME_VALID,
+               .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
                .exit_latency = 200,
                .power_usage = 350,
                .target_residency = 800,
@@ -149,7 +142,7 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
                .name = "ATM-C4",
                .desc = "MWAIT 0x30",
                .driver_data = (void *) 0x30,
-               .flags = CPUIDLE_FLAG_TIME_VALID,
+               .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
                .exit_latency = 100,
                .power_usage = 250,
                .target_residency = 400,
@@ -157,13 +150,13 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
        { /* MWAIT C5 */ },
        { /* MWAIT C6 */
                .name = "ATM-C6",
-               .desc = "MWAIT 0x40",
-               .driver_data = (void *) 0x40,
-               .flags = CPUIDLE_FLAG_TIME_VALID,
-               .exit_latency = 200,
+               .desc = "MWAIT 0x52",
+               .driver_data = (void *) 0x52,
+               .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 140,
                .power_usage = 150,
-               .target_residency = 800,
-               .enter = NULL },        /* disabled */
+               .target_residency = 560,
+               .enter = &intel_idle },
 };
 
 /**
@@ -185,6 +178,16 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
 
        local_irq_disable();
 
+       /*
+        * If the state flag indicates that the TLB will be flushed or if this
+        * is the deepest c-state supported, do a voluntary leave mm to avoid
+        * costly and mostly unnecessary wakeups for flushing the user TLB's
+        * associated with the active mm.
+        */
+       if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED ||
+           (&dev->states[dev->state_count - 1] == state))
+               leave_mm(cpu);
+
        if (!(lapic_timer_reliable_states & (1 << (cstate))))
                clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
 
index 8f0caf7d4482079ef45aa9ea3b8a66d6a37500d4..78fbe9ffe7f024f3f4e1ca486bcbeb5976087e27 100644 (file)
@@ -53,7 +53,7 @@
 #define T3_MAX_PBL_SIZE 256
 #define T3_MAX_RQ_SIZE 1024
 #define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1)
-#define T3_MAX_CQ_DEPTH 262144
+#define T3_MAX_CQ_DEPTH 65536
 #define T3_MAX_NUM_STAG (1<<15)
 #define T3_MAX_MR_SIZE 0x100000000ULL
 #define T3_PAGESIZE_MASK 0xffff000  /* 4KB-128MB */
index d88077a219944ec49e2f594de780d0f216f9201f..13c88871dc3b90f564a52b4651aa371a4cd15633 100644 (file)
@@ -463,7 +463,8 @@ static int send_connect(struct iwch_ep *ep)
            V_MSS_IDX(mtu_idx) |
            V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
        opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
-       opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
+       opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
+              V_CONG_CONTROL_FLAVOR(cong_flavor);
        skb->priority = CPL_PRIORITY_SETUP;
        set_arp_failure_handler(skb, act_open_req_arp_failure);
 
@@ -1280,7 +1281,8 @@ static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
            V_MSS_IDX(mtu_idx) |
            V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
        opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
-       opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
+       opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
+              V_CONG_CONTROL_FLAVOR(cong_flavor);
 
        rpl = cplhdr(skb);
        rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
index 443cea55daac5973469cf43fabe2a388abeadb55..61e0efd4ccfb5d9d4d6f6bdc50f765d365c07d0e 100644 (file)
@@ -502,7 +502,9 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
 static void nes_retrans_expired(struct nes_cm_node *cm_node)
 {
        struct iw_cm_id *cm_id = cm_node->cm_id;
-       switch (cm_node->state) {
+       enum nes_cm_node_state state = cm_node->state;
+       cm_node->state = NES_CM_STATE_CLOSED;
+       switch (state) {
        case NES_CM_STATE_SYN_RCVD:
        case NES_CM_STATE_CLOSING:
                rem_ref_cm_node(cm_node->cm_core, cm_node);
@@ -511,7 +513,6 @@ static void nes_retrans_expired(struct nes_cm_node *cm_node)
        case NES_CM_STATE_FIN_WAIT1:
                if (cm_node->cm_id)
                        cm_id->rem_ref(cm_id);
-               cm_node->state = NES_CM_STATE_CLOSED;
                send_reset(cm_node, NULL);
                break;
        default:
@@ -1439,9 +1440,6 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
                break;
        case NES_CM_STATE_MPAREQ_RCVD:
                passive_state = atomic_add_return(1, &cm_node->passive_state);
-               if (passive_state ==  NES_SEND_RESET_EVENT)
-                       create_event(cm_node, NES_CM_EVENT_RESET);
-               cm_node->state = NES_CM_STATE_CLOSED;
                dev_kfree_skb_any(skb);
                break;
        case NES_CM_STATE_ESTABLISHED:
@@ -1456,6 +1454,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
        case NES_CM_STATE_CLOSED:
                drop_packet(skb);
                break;
+       case NES_CM_STATE_FIN_WAIT2:
        case NES_CM_STATE_FIN_WAIT1:
        case NES_CM_STATE_LAST_ACK:
                cm_node->cm_id->rem_ref(cm_node->cm_id);
@@ -2777,6 +2776,12 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                return -EINVAL;
        }
 
+       passive_state = atomic_add_return(1, &cm_node->passive_state);
+       if (passive_state == NES_SEND_RESET_EVENT) {
+               rem_ref_cm_node(cm_node->cm_core, cm_node);
+               return -ECONNRESET;
+       }
+
        /* associate the node with the QP */
        nesqp->cm_node = (void *)cm_node;
        cm_node->nesqp = nesqp;
@@ -2979,9 +2984,6 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                printk(KERN_ERR "%s[%u] OFA CM event_handler returned, "
                        "ret=%d\n", __func__, __LINE__, ret);
 
-       passive_state = atomic_add_return(1, &cm_node->passive_state);
-       if (passive_state == NES_SEND_RESET_EVENT)
-               create_event(cm_node, NES_CM_EVENT_RESET);
        return 0;
 }
 
index f8233c851c694862d76861e515b93183b8d387a5..1980a461c49904e93102e02e655e5b033c5f92be 100644 (file)
@@ -3468,6 +3468,19 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
                                return; /* Ignore it, wait for close complete */
 
                        if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
+                               if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) &&
+                                       (nesqp->ibqp_state == IB_QPS_RTS) &&
+                                       ((nesadapter->eeprom_version >> 16) != NES_A0)) {
+                                       spin_lock_irqsave(&nesqp->lock, flags);
+                                       nesqp->hw_iwarp_state = iwarp_state;
+                                       nesqp->hw_tcp_state = tcp_state;
+                                       nesqp->last_aeq = async_event_id;
+                                       next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
+                                       nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
+                                       spin_unlock_irqrestore(&nesqp->lock, flags);
+                                       nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
+                                       nes_cm_disconn(nesqp);
+                               }
                                nesqp->cm_id->add_ref(nesqp->cm_id);
                                schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp,
                                                NES_TIMER_TYPE_CLOSE, 1, 0);
@@ -3477,7 +3490,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
                                                nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
                                                async_event_id, nesqp->last_aeq, tcp_state);
                        }
-
                        break;
                case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
                        if (nesqp->term_flags) {
index aa9183db32b104aaaa7bfad081c3c969699cec72..1204c3432b6322f23518c42d550746f19d9e4ce6 100644 (file)
@@ -45,6 +45,7 @@
 #define NES_PHY_TYPE_KR               9
 
 #define NES_MULTICAST_PF_MAX 8
+#define NES_A0 3
 
 enum pci_regs {
        NES_INT_STAT = 0x0000,
index 6dfdd49cdbcf36ef5cd68aee3caf46dbdaeb77f0..10560c796fd6c0ffc591601c610579d3e1b6e8ca 100644 (file)
@@ -1446,14 +1446,14 @@ static int nes_netdev_set_pauseparam(struct net_device *netdev,
                                NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
                u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
                nes_write_indexed(nesdev,
-                               NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp);
+                               NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
                nesdev->disable_tx_flow_control = 0;
        } else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) {
                u32temp = nes_read_indexed(nesdev,
                                NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
                u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
                nes_write_indexed(nesdev,
-                               NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp);
+                               NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
                nesdev->disable_tx_flow_control = 1;
        }
        if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) {
index c908c5f83645c901f87823e2a587ba65d9b5ee97..af9ee313c10b67b9f0a034e6806757aca085c4ed 100644 (file)
@@ -28,7 +28,7 @@ struct evdev {
        int minor;
        struct input_handle handle;
        wait_queue_head_t wait;
-       struct evdev_client *grab;
+       struct evdev_client __rcu *grab;
        struct list_head client_list;
        spinlock_t client_lock; /* protects client_list */
        struct mutex mutex;
@@ -669,6 +669,9 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
 
                if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCGABS(0))) {
 
+                       if (!dev->absinfo)
+                               return -EINVAL;
+
                        t = _IOC_NR(cmd) & ABS_MAX;
                        abs = dev->absinfo[t];
 
@@ -680,10 +683,13 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
                }
        }
 
-       if (_IOC_DIR(cmd) == _IOC_READ) {
+       if (_IOC_DIR(cmd) == _IOC_WRITE) {
 
                if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCSABS(0))) {
 
+                       if (!dev->absinfo)
+                               return -EINVAL;
+
                        t = _IOC_NR(cmd) & ABS_MAX;
 
                        if (copy_from_user(&abs, p, min_t(size_t,
index a9b025f4147a0692845d2407b6efbd9220f9837e..ab6982056518e3c086c57738f360574b211108aa 100644 (file)
@@ -1599,11 +1599,14 @@ EXPORT_SYMBOL(input_free_device);
  * @dev: input device supporting MT events and finger tracking
  * @num_slots: number of slots used by the device
  *
- * This function allocates all necessary memory for MT slot handling
- * in the input device, and adds ABS_MT_SLOT to the device capabilities.
+ * This function allocates all necessary memory for MT slot handling in the
+ * input device, and adds ABS_MT_SLOT to the device capabilities. All slots
+ * are initially marked as unused iby setting ABS_MT_TRACKING_ID to -1.
  */
 int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots)
 {
+       int i;
+
        if (!num_slots)
                return 0;
 
@@ -1614,6 +1617,10 @@ int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots)
        dev->mtsize = num_slots;
        input_set_abs_params(dev, ABS_MT_SLOT, 0, num_slots - 1, 0, 0);
 
+       /* Mark slots as 'unused' */
+       for (i = 0; i < num_slots; i++)
+               dev->mt[i].abs[ABS_MT_TRACKING_ID - ABS_MT_FIRST] = -1;
+
        return 0;
 }
 EXPORT_SYMBOL(input_mt_create_slots);
index d85bd8a7967d2ee26aff8e5313c67a0cd7290532..22239e9884988139df3228a50db421f337818355 100644 (file)
@@ -483,6 +483,9 @@ static int joydev_handle_JSIOCSAXMAP(struct joydev *joydev,
 
        memcpy(joydev->abspam, abspam, len);
 
+       for (i = 0; i < joydev->nabs; i++)
+               joydev->absmap[joydev->abspam[i]] = i;
+
  out:
        kfree(abspam);
        return retval;
index c19066479057ff3567d7168c38922e0fd0b84da3..7e2c12a5b83933e23ce665783d2886b4ee6f211f 100644 (file)
@@ -104,7 +104,7 @@ static int hp_sdc_rtc_do_read_bbrtc (struct rtc_time *rtctm)
        t.endidx =              91;
        t.seq =                 tseq;
        t.act.semaphore =       &tsem;
-       init_MUTEX_LOCKED(&tsem);
+       sema_init(&tsem, 0);
        
        if (hp_sdc_enqueue_transaction(&t)) return -1;
        
@@ -698,7 +698,7 @@ static int __init hp_sdc_rtc_init(void)
                return -ENODEV;
 #endif
 
-       init_MUTEX(&i8042tregs);
+       sema_init(&i8042tregs, 1);
 
        if ((ret = hp_sdc_request_timer_irq(&hp_sdc_rtc_isr)))
                return ret;
index 0d4266a533a524564adcc85cc546736248753628..360698553eb55d2d1f1b84f912dbc6984eb606bd 100644 (file)
@@ -404,6 +404,13 @@ static int uinput_setup_device(struct uinput_device *udev, const char __user *bu
                retval = uinput_validate_absbits(dev);
                if (retval < 0)
                        goto exit;
+               if (test_bit(ABS_MT_SLOT, dev->absbit)) {
+                       int nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1;
+                       input_mt_create_slots(dev, nslot);
+                       input_set_events_per_packet(dev, 6 * nslot);
+               } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
+                       input_set_events_per_packet(dev, 60);
+               }
        }
 
        udev->state = UIST_SETUP_COMPLETE;
index ea67c49146a3a03280ee8719c362c41d8033c743..b952317639116f2f18a7bc1f41ff5887c17f2a49 100644 (file)
@@ -337,10 +337,14 @@ static void report_finger_data(struct input_dev *input,
                               const struct bcm5974_config *cfg,
                               const struct tp_finger *f)
 {
-       input_report_abs(input, ABS_MT_TOUCH_MAJOR, raw2int(f->force_major));
-       input_report_abs(input, ABS_MT_TOUCH_MINOR, raw2int(f->force_minor));
-       input_report_abs(input, ABS_MT_WIDTH_MAJOR, raw2int(f->size_major));
-       input_report_abs(input, ABS_MT_WIDTH_MINOR, raw2int(f->size_minor));
+       input_report_abs(input, ABS_MT_TOUCH_MAJOR,
+                        raw2int(f->force_major) << 1);
+       input_report_abs(input, ABS_MT_TOUCH_MINOR,
+                        raw2int(f->force_minor) << 1);
+       input_report_abs(input, ABS_MT_WIDTH_MAJOR,
+                        raw2int(f->size_major) << 1);
+       input_report_abs(input, ABS_MT_WIDTH_MINOR,
+                        raw2int(f->size_minor) << 1);
        input_report_abs(input, ABS_MT_ORIENTATION,
                         MAX_FINGER_ORIENTATION - raw2int(f->orientation));
        input_report_abs(input, ABS_MT_POSITION_X, raw2int(f->abs_x));
index c92f4edfee7beef3a326e0af4e43275093fba8dc..e5624d8f1709e6c576547c6e78ba66e766d39743 100644 (file)
@@ -915,15 +915,15 @@ int hil_mlc_register(hil_mlc *mlc)
        mlc->ostarted = 0;
 
        rwlock_init(&mlc->lock);
-       init_MUTEX(&mlc->osem);
+       sema_init(&mlc->osem, 1);
 
-       init_MUTEX(&mlc->isem);
+       sema_init(&mlc->isem, 1);
        mlc->icount = -1;
        mlc->imatch = 0;
 
        mlc->opercnt = 0;
 
-       init_MUTEX_LOCKED(&(mlc->csem));
+       sema_init(&(mlc->csem), 0);
 
        hil_mlc_clear_di_scratch(mlc);
        hil_mlc_clear_di_map(mlc, 0);
index bcc2d30ec245ef8349e81745958e0cf8112dfcbf..8c0b51c31424ce2e0c9b15a2a9eb467424e9b3ec 100644 (file)
@@ -905,7 +905,7 @@ static int __init hp_sdc_init(void)
        ts_sync[1]      = 0x0f;
        ts_sync[2] = ts_sync[3] = ts_sync[4] = ts_sync[5] = 0;
        t_sync.act.semaphore = &s_sync;
-       init_MUTEX_LOCKED(&s_sync);
+       sema_init(&s_sync, 0);
        hp_sdc_enqueue_transaction(&t_sync);
        down(&s_sync); /* Wait for t_sync to complete */
 
@@ -1039,7 +1039,7 @@ static int __init hp_sdc_register(void)
                return hp_sdc.dev_err;
        }
 
-       init_MUTEX_LOCKED(&tq_init_sem);
+       sema_init(&tq_init_sem, 0);
 
        tq_init.actidx          = 0;
        tq_init.idx             = 1;
index 46e4ba0b92463184d5e398345e50ca68809628b2..f585131604806f531f91c48ebb867919339fd80f 100644 (file)
@@ -1485,8 +1485,8 @@ static int __init i8042_init(void)
 
 static void __exit i8042_exit(void)
 {
-       platform_driver_unregister(&i8042_driver);
        platform_device_unregister(i8042_platform_device);
+       platform_driver_unregister(&i8042_driver);
        i8042_platform_exit();
 
        panic_blink = NULL;
index 42ba3691d908bc1fc8c370da5ada4287ceb21115..b35876ee6908c7328f29e2505fd29a94a61b2d61 100644 (file)
@@ -103,27 +103,26 @@ static void wacom_sys_irq(struct urb *urb)
 static int wacom_open(struct input_dev *dev)
 {
        struct wacom *wacom = input_get_drvdata(dev);
+       int retval = 0;
 
-       mutex_lock(&wacom->lock);
-
-       wacom->irq->dev = wacom->usbdev;
-
-       if (usb_autopm_get_interface(wacom->intf) < 0) {
-               mutex_unlock(&wacom->lock);
+       if (usb_autopm_get_interface(wacom->intf) < 0)
                return -EIO;
-       }
+
+       mutex_lock(&wacom->lock);
 
        if (usb_submit_urb(wacom->irq, GFP_KERNEL)) {
-               usb_autopm_put_interface(wacom->intf);
-               mutex_unlock(&wacom->lock);
-               return -EIO;
+               retval = -EIO;
+               goto out;
        }
 
        wacom->open = true;
        wacom->intf->needs_remote_wakeup = 1;
 
+out:
        mutex_unlock(&wacom->lock);
-       return 0;
+       if (retval)
+               usb_autopm_put_interface(wacom->intf);
+       return retval;
 }
 
 static void wacom_close(struct input_dev *dev)
@@ -135,6 +134,8 @@ static void wacom_close(struct input_dev *dev)
        wacom->open = false;
        wacom->intf->needs_remote_wakeup = 0;
        mutex_unlock(&wacom->lock);
+
+       usb_autopm_put_interface(wacom->intf);
 }
 
 static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hid_desc,
index 40d77ba8fdc138ff98b0320b877358a5302d7a28..47fd7a041c52e1898a8727c5386569fb3caed45c 100644 (file)
@@ -243,10 +243,10 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
                        if (features->type == WACOM_G4 ||
                                        features->type == WACOM_MO) {
                                input_report_abs(input, ABS_DISTANCE, data[6] & 0x3f);
-                               rw = (signed)(data[7] & 0x04) - (data[7] & 0x03);
+                               rw = (data[7] & 0x04) - (data[7] & 0x03);
                        } else {
                                input_report_abs(input, ABS_DISTANCE, data[7] & 0x3f);
-                               rw = -(signed)data[6];
+                               rw = -(signed char)data[6];
                        }
                        input_report_rel(input, REL_WHEEL, rw);
                }
@@ -442,8 +442,10 @@ static void wacom_intuos_general(struct wacom_wac *wacom)
        /* general pen packet */
        if ((data[1] & 0xb8) == 0xa0) {
                t = (data[6] << 2) | ((data[7] >> 6) & 3);
-               if (features->type >= INTUOS4S && features->type <= INTUOS4L)
+               if ((features->type >= INTUOS4S && features->type <= INTUOS4L) ||
+                   features->type == WACOM_21UX2) {
                        t = (t << 1) | (data[1] & 1);
+               }
                input_report_abs(input, ABS_PRESSURE, t);
                input_report_abs(input, ABS_TILT_X,
                                ((data[7] << 1) & 0x7e) | (data[8] >> 7));
index 485be8b1e1b33bef2bd7e3af8fab2066d3687ec3..f0225bc0f2670ce2f0fde9b5d276f26151c6a046 100644 (file)
@@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
                        }
                        else if(callid>=0x0000 && callid<=0x7FFF)
                        {
+                               int len;
+
                                pr_debug("%s: Got Incoming Call\n",
                                                sc_adapter[card]->devicename);
-                               strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
-                               strcpy(setup.eazmsn,
-                                       sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
+                               len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
+                                               sizeof(setup.phone));
+                               if (len >= sizeof(setup.phone))
+                                       continue;
+                               len = strlcpy(setup.eazmsn,
+                                               sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
+                                               sizeof(setup.eazmsn));
+                               if (len >= sizeof(setup.eazmsn))
+                                       continue;
                                setup.si1 = 7;
                                setup.si2 = 0;
                                setup.plan = 0;
@@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
                 * Handle a GetMyNumber Rsp
                 */
                if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
-                       strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
+                       strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
+                               rcvmsg.msg_data.byte_array,
+                               sizeof(rcvmsg.msg_data.byte_array));
                        continue;
                }
                        
index 74dce4ba0262560977ae88d69fbb2fbe7db62009..350eb34f049c97520bb183d97f9f84f8864663b5 100644 (file)
@@ -81,7 +81,7 @@ static int ns2_led_get_mode(struct ns2_led_data *led_dat,
        int cmd_level;
        int slow_level;
 
-       read_lock(&led_dat->rw_lock);
+       read_lock_irq(&led_dat->rw_lock);
 
        cmd_level = gpio_get_value(led_dat->cmd);
        slow_level = gpio_get_value(led_dat->slow);
@@ -95,7 +95,7 @@ static int ns2_led_get_mode(struct ns2_led_data *led_dat,
                }
        }
 
-       read_unlock(&led_dat->rw_lock);
+       read_unlock_irq(&led_dat->rw_lock);
 
        return ret;
 }
@@ -104,8 +104,9 @@ static void ns2_led_set_mode(struct ns2_led_data *led_dat,
                             enum ns2_led_modes mode)
 {
        int i;
+       unsigned long flags;
 
-       write_lock(&led_dat->rw_lock);
+       write_lock_irqsave(&led_dat->rw_lock, flags);
 
        for (i = 0; i < ARRAY_SIZE(ns2_led_modval); i++) {
                if (mode == ns2_led_modval[i].mode) {
@@ -116,7 +117,7 @@ static void ns2_led_set_mode(struct ns2_led_data *led_dat,
                }
        }
 
-       write_unlock(&led_dat->rw_lock);
+       write_unlock_irqrestore(&led_dat->rw_lock, flags);
 }
 
 static void ns2_led_set(struct led_classdev *led_cdev,
index 1c4ee6e77937f596378f88587787052106cd3c58..bf64e49d996acd121a215c7d0bdc3c1bc1fd86b6 100644 (file)
@@ -83,7 +83,7 @@ static struct adb_driver *adb_controller;
 BLOCKING_NOTIFIER_HEAD(adb_client_list);
 static int adb_got_sleep;
 static int adb_inited;
-static DECLARE_MUTEX(adb_probe_mutex);
+static DEFINE_SEMAPHORE(adb_probe_mutex);
 static int sleepy_trackpad;
 static int autopoll_devs;
 int __adb_probe_sync;
index ed4900ade93a4d80b84784e76aafc406689020b2..e4fb58db5454d4bfc5cc5257bb74435907b3b3f2 100644 (file)
@@ -1000,10 +1000,11 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
                                page = bitmap->sb_page;
                                offset = sizeof(bitmap_super_t);
                                if (!file)
-                                       read_sb_page(bitmap->mddev,
-                                                    bitmap->mddev->bitmap_info.offset,
-                                                    page,
-                                                    index, count);
+                                       page = read_sb_page(
+                                               bitmap->mddev,
+                                               bitmap->mddev->bitmap_info.offset,
+                                               page,
+                                               index, count);
                        } else if (file) {
                                page = read_page(file, index, bitmap, count);
                                offset = 0;
index 43cf9cc9c1df3650c228ce01920645fb474f105a..f20d13e717d55e0a04de747f62384d2ce4b0b310 100644 (file)
@@ -1643,7 +1643,9 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
                bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
                if (rdev->sb_size & bmask)
                        rdev->sb_size = (rdev->sb_size | bmask) + 1;
-       }
+       } else
+               max_dev = le32_to_cpu(sb->max_dev);
+
        for (i=0; i<max_dev;i++)
                sb->dev_roles[i] = cpu_to_le16(0xfffe);
        
@@ -7069,7 +7071,7 @@ void md_check_recovery(mddev_t *mddev)
        if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
                return;
        if ( ! (
-               (mddev->flags && !mddev->external) ||
+               (mddev->flags & ~ (1<<MD_CHANGE_PENDING)) ||
                test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
                test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
                (mddev->external == 0 && mddev->safemode == 1) ||
index ad83a4dcadc3ed7cafa914d2e4dcb7ef1a939fdf..0b830bbe1d8b6323bac02106c2ccc4d806e1e390 100644 (file)
@@ -1839,7 +1839,9 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
 
                /* take from bio_init */
                bio->bi_next = NULL;
+               bio->bi_flags &= ~(BIO_POOL_MASK-1);
                bio->bi_flags |= 1 << BIO_UPTODATE;
+               bio->bi_comp_cpu = -1;
                bio->bi_rw = READ;
                bio->bi_vcnt = 0;
                bio->bi_idx = 0;
@@ -1912,7 +1914,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
                            !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
                                break;
                        BUG_ON(sync_blocks < (PAGE_SIZE>>9));
-                       if (len > (sync_blocks<<9))
+                       if ((len >> 9) > sync_blocks)
                                len = sync_blocks<<9;
                }
 
index 7e82a9df726b51ab6d90f00fc1b1fa22998c8862..7961d59f5cace91b18fc6a67fef4caea09f08265 100644 (file)
@@ -319,7 +319,7 @@ static void ir_timer_keyup(unsigned long cookie)
         * a keyup event might follow immediately after the keydown.
         */
        spin_lock_irqsave(&ir->keylock, flags);
-       if (time_is_after_eq_jiffies(ir->keyup_jiffies))
+       if (time_is_before_eq_jiffies(ir->keyup_jiffies))
                ir_keyup(ir);
        spin_unlock_irqrestore(&ir->keylock, flags);
 }
@@ -510,6 +510,13 @@ int __ir_input_register(struct input_dev *input_dev,
                   (ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_IR_RAW) ?
                        " in raw mode" : "");
 
+       /*
+        * Default delay of 250ms is too short for some protocols, expecially
+        * since the timeout is currently set to 250ms. Increase it to 500ms,
+        * to avoid wrong repetition of the keycodes.
+        */
+       input_dev->rep[REP_DELAY] = 500;
+
        return 0;
 
 out_event:
index 77b5946413c0203739d9bdcecb129e218f194356..e63f757d5d72ca0ba6c1b4e206914c7d7625112a 100644 (file)
@@ -267,7 +267,7 @@ static int ir_lirc_register(struct input_dev *input_dev)
                        features |= LIRC_CAN_SET_SEND_CARRIER;
 
                if (ir_dev->props->s_tx_duty_cycle)
-                       features |= LIRC_CAN_SET_REC_DUTY_CYCLE;
+                       features |= LIRC_CAN_SET_SEND_DUTY_CYCLE;
        }
 
        if (ir_dev->props->s_rx_carrier_range)
index 43094e7eccfa92ba6213115ecdab06d88b1387ec..8e0e1b1f8c87ef9f83f05ab6a564e30ddd78aa4c 100644 (file)
@@ -279,9 +279,11 @@ int ir_raw_event_register(struct input_dev *input_dev)
                        "rc%u",  (unsigned int)ir->devno);
 
        if (IS_ERR(ir->raw->thread)) {
+               int ret = PTR_ERR(ir->raw->thread);
+
                kfree(ir->raw);
                ir->raw = NULL;
-               return PTR_ERR(ir->raw->thread);
+               return ret;
        }
 
        mutex_lock(&ir_raw_handler_lock);
index 96dafc425c8e61495cd662bf7f4c11182d674e79..46d42467f9b43010739895f165ed7bfe93793137 100644 (file)
@@ -67,13 +67,14 @@ static ssize_t show_protocols(struct device *d,
        char *tmp = buf;
        int i;
 
-       if (ir_dev->props->driver_type == RC_DRIVER_SCANCODE) {
+       if (ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_SCANCODE) {
                enabled = ir_dev->rc_tab.ir_type;
                allowed = ir_dev->props->allowed_protos;
-       } else {
+       } else if (ir_dev->raw) {
                enabled = ir_dev->raw->enabled_protocols;
                allowed = ir_raw_get_allowed_protocols();
-       }
+       } else
+               return sprintf(tmp, "[builtin]\n");
 
        IR_dprintk(1, "allowed - 0x%llx, enabled - 0x%llx\n",
                   (long long)allowed,
@@ -121,10 +122,14 @@ static ssize_t store_protocols(struct device *d,
        int rc, i, count = 0;
        unsigned long flags;
 
-       if (ir_dev->props->driver_type == RC_DRIVER_SCANCODE)
+       if (ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_SCANCODE)
                type = ir_dev->rc_tab.ir_type;
-       else
+       else if (ir_dev->raw)
                type = ir_dev->raw->enabled_protocols;
+       else {
+               IR_dprintk(1, "Protocol switching not supported\n");
+               return -EINVAL;
+       }
 
        while ((tmp = strsep((char **) &data, " \n")) != NULL) {
                if (!*tmp)
@@ -185,7 +190,7 @@ static ssize_t store_protocols(struct device *d,
                }
        }
 
-       if (ir_dev->props->driver_type == RC_DRIVER_SCANCODE) {
+       if (ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_SCANCODE) {
                spin_lock_irqsave(&ir_dev->rc_tab.lock, flags);
                ir_dev->rc_tab.ir_type = type;
                spin_unlock_irqrestore(&ir_dev->rc_tab.lock, flags);
index 64264f7f838f29a0be7861f872f8e852bf376669..39557ad401b63fce5a892e5245fe7d6f42b83ee1 100644 (file)
@@ -19,6 +19,7 @@ static struct ir_scancode rc6_mce[] = {
 
        { 0x800f0416, KEY_PLAY },
        { 0x800f0418, KEY_PAUSE },
+       { 0x800f046e, KEY_PLAYPAUSE },
        { 0x800f0419, KEY_STOP },
        { 0x800f0417, KEY_RECORD },
 
@@ -37,6 +38,8 @@ static struct ir_scancode rc6_mce[] = {
        { 0x800f0411, KEY_VOLUMEDOWN },
        { 0x800f0412, KEY_CHANNELUP },
        { 0x800f0413, KEY_CHANNELDOWN },
+       { 0x800f043a, KEY_BRIGHTNESSUP },
+       { 0x800f0480, KEY_BRIGHTNESSDOWN },
 
        { 0x800f0401, KEY_NUMERIC_1 },
        { 0x800f0402, KEY_NUMERIC_2 },
index ac6bb2c01a4810446451d2651df53936b57d104c..bc620e10ef77e46149f57bdf278cc8bf79d150b3 100644 (file)
@@ -120,6 +120,10 @@ static struct usb_device_id mceusb_dev_table[] = {
        { USB_DEVICE(VENDOR_PHILIPS, 0x0613) },
        /* Philips eHome Infrared Transceiver */
        { USB_DEVICE(VENDOR_PHILIPS, 0x0815) },
+       /* Philips/Spinel plus IR transceiver for ASUS */
+       { USB_DEVICE(VENDOR_PHILIPS, 0x206c) },
+       /* Philips/Spinel plus IR transceiver for ASUS */
+       { USB_DEVICE(VENDOR_PHILIPS, 0x2088) },
        /* Realtek MCE IR Receiver */
        { USB_DEVICE(VENDOR_REALTEK, 0x0161) },
        /* SMK/Toshiba G83C0004D410 */
index fe818348b8a36450357f2b19571b000afdcd873f..48397f103d326264b261506539bc1c4cee9a343d 100644 (file)
@@ -673,9 +673,6 @@ static int dib0700_probe(struct usb_interface *intf,
                        else
                                dev->props.rc.core.bulk_mode = false;
 
-                       /* Need a higher delay, to avoid wrong repeat */
-                       dev->rc_input_dev->rep[REP_DELAY] = 500;
-
                        dib0700_rc_setup(dev);
 
                        return 0;
index f634d2e784b2ce24a005b0593372b505b903e1c3..e06acd1fecb61b9f3bf2676f0a16375f9101e907 100644 (file)
@@ -940,6 +940,58 @@ static int stk7070p_frontend_attach(struct dvb_usb_adapter *adap)
        return adap->fe == NULL ? -ENODEV : 0;
 }
 
+/* STK7770P */
+static struct dib7000p_config dib7770p_dib7000p_config = {
+       .output_mpeg2_in_188_bytes = 1,
+
+       .agc_config_count = 1,
+       .agc = &dib7070_agc_config,
+       .bw  = &dib7070_bw_config_12_mhz,
+       .tuner_is_baseband = 1,
+       .spur_protect = 1,
+
+       .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS,
+       .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES,
+       .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS,
+
+       .hostbus_diversity = 1,
+       .enable_current_mirror = 1,
+       .disable_sample_and_hold = 0,
+};
+
+static int stk7770p_frontend_attach(struct dvb_usb_adapter *adap)
+{
+       struct usb_device_descriptor *p = &adap->dev->udev->descriptor;
+       if (p->idVendor  == cpu_to_le16(USB_VID_PINNACLE) &&
+           p->idProduct == cpu_to_le16(USB_PID_PINNACLE_PCTV72E))
+               dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0);
+       else
+               dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1);
+       msleep(10);
+       dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1);
+       dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1);
+       dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1);
+       dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0);
+
+       dib0700_ctrl_clock(adap->dev, 72, 1);
+
+       msleep(10);
+       dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1);
+       msleep(10);
+       dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1);
+
+       if (dib7000p_i2c_enumeration(&adap->dev->i2c_adap, 1, 18,
+                                    &dib7770p_dib7000p_config) != 0) {
+               err("%s: dib7000p_i2c_enumeration failed.  Cannot continue\n",
+                   __func__);
+               return -ENODEV;
+       }
+
+       adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x80,
+               &dib7770p_dib7000p_config);
+       return adap->fe == NULL ? -ENODEV : 0;
+}
+
 /* DIB807x generic */
 static struct dibx000_agc_config dib807x_agc_config[2] = {
        {
@@ -1781,7 +1833,7 @@ struct usb_device_id dib0700_usb_id_table[] = {
 /* 60 */{ USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_XXS_2) },
        { USB_DEVICE(USB_VID_DIBCOM,    USB_PID_DIBCOM_STK807XPVR) },
        { USB_DEVICE(USB_VID_DIBCOM,    USB_PID_DIBCOM_STK807XP) },
-       { USB_DEVICE(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD) },
+       { USB_DEVICE_VER(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD, 0x000, 0x3f00) },
        { USB_DEVICE(USB_VID_EVOLUTEPC, USB_PID_TVWAY_PLUS) },
 /* 65 */{ USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV73ESE) },
        { USB_DEVICE(USB_VID_PINNACLE,  USB_PID_PINNACLE_PCTV282E) },
@@ -2406,7 +2458,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
                                .pid_filter_count = 32,
                                .pid_filter       = stk70x0p_pid_filter,
                                .pid_filter_ctrl  = stk70x0p_pid_filter_ctrl,
-                               .frontend_attach  = stk7070p_frontend_attach,
+                               .frontend_attach  = stk7770p_frontend_attach,
                                .tuner_attach     = dib7770p_tuner_attach,
 
                                DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
index 6b22ec64ab0cc69d7124bc16421309d3e9c070cd..f896337b453518e603419beedd5093a9a5f1fb16 100644 (file)
@@ -483,9 +483,7 @@ static int opera1_xilinx_load_firmware(struct usb_device *dev,
                }
        }
        kfree(p);
-       if (fw) {
-               release_firmware(fw);
-       }
+       release_firmware(fw);
        return ret;
 }
 
index 2e28b973dfd3cbb1743ac48d3fb931c9d2600063..3aed0d43392152688bbe4176ebc8ee68712ab724 100644 (file)
@@ -260,6 +260,9 @@ static void dib7000p_set_adc_state(struct dib7000p_state *state, enum dibx000_ad
 
 //     dprintk( "908: %x, 909: %x\n", reg_908, reg_909);
 
+       reg_909 |= (state->cfg.disable_sample_and_hold & 1) << 4;
+       reg_908 |= (state->cfg.enable_current_mirror & 1) << 7;
+
        dib7000p_write_word(state, 908, reg_908);
        dib7000p_write_word(state, 909, reg_909);
 }
@@ -778,7 +781,10 @@ static void dib7000p_set_channel(struct dib7000p_state *state, struct dvb_fronte
                default:
                case GUARD_INTERVAL_1_32: value *= 1; break;
        }
-       state->div_sync_wait = (value * 3) / 2 + 32; // add 50% SFN margin + compensate for one DVSY-fifo TODO
+       if (state->cfg.diversity_delay == 0)
+               state->div_sync_wait = (value * 3) / 2 + 48; // add 50% SFN margin + compensate for one DVSY-fifo
+       else
+               state->div_sync_wait = (value * 3) / 2 + state->cfg.diversity_delay; // add 50% SFN margin + compensate for one DVSY-fifo
 
        /* deactive the possibility of diversity reception if extended interleaver */
        state->div_force_off = !1 && ch->u.ofdm.transmission_mode != TRANSMISSION_MODE_8K;
index 805dd13a97ee347d3b06e517e266d2bb1de9e4d3..da17345bf5bdd66002ea64080e2c4456bfde1ca3 100644 (file)
@@ -33,6 +33,11 @@ struct dib7000p_config {
        int (*agc_control) (struct dvb_frontend *, u8 before);
 
        u8 output_mode;
+       u8 disable_sample_and_hold : 1;
+
+       u8 enable_current_mirror : 1;
+       u8 diversity_delay;
+
 };
 
 #define DEFAULT_DIB7000P_I2C_ADDRESS 18
index d93468cd3a85e1a5a3d8eee586c7ec29b69d3051..ff3b0fa901b39f00e23250e20dda2502fbb42e04 100644 (file)
@@ -1098,33 +1098,26 @@ EXPORT_SYMBOL_GPL(smscore_onresponse);
  *
  * @return pointer to descriptor on success, NULL on error.
  */
-struct smscore_buffer_t *smscore_getbuffer(struct smscore_device_t *coredev)
+
+struct smscore_buffer_t *get_entry(struct smscore_device_t *coredev)
 {
        struct smscore_buffer_t *cb = NULL;
        unsigned long flags;
 
-       DEFINE_WAIT(wait);
-
        spin_lock_irqsave(&coredev->bufferslock, flags);
-
-       /* This function must return a valid buffer, since the buffer list is
-        * finite, we check that there is an available buffer, if not, we wait
-        * until such buffer become available.
-        */
-
-       prepare_to_wait(&coredev->buffer_mng_waitq, &wait, TASK_INTERRUPTIBLE);
-       if (list_empty(&coredev->buffers)) {
-               spin_unlock_irqrestore(&coredev->bufferslock, flags);
-               schedule();
-               spin_lock_irqsave(&coredev->bufferslock, flags);
+       if (!list_empty(&coredev->buffers)) {
+               cb = (struct smscore_buffer_t *) coredev->buffers.next;
+               list_del(&cb->entry);
        }
+       spin_unlock_irqrestore(&coredev->bufferslock, flags);
+       return cb;
+}
 
-       finish_wait(&coredev->buffer_mng_waitq, &wait);
-
-       cb = (struct smscore_buffer_t *) coredev->buffers.next;
-       list_del(&cb->entry);
+struct smscore_buffer_t *smscore_getbuffer(struct smscore_device_t *coredev)
+{
+       struct smscore_buffer_t *cb = NULL;
 
-       spin_unlock_irqrestore(&coredev->bufferslock, flags);
+       wait_event(coredev->buffer_mng_waitq, (cb = get_entry(coredev)));
 
        return cb;
 }
index 67a4ec8768a6145ecfd6fb1d44095bfb51f29fdc..4ce541a5eb47558f5b26dc9bd14523e937c5cfaf 100644 (file)
@@ -395,7 +395,7 @@ static int __devinit si470x_i2c_probe(struct i2c_client *client,
        radio->registers[POWERCFG] = POWERCFG_ENABLE;
        if (si470x_set_register(radio, POWERCFG) < 0) {
                retval = -EIO;
-               goto err_all;
+               goto err_video;
        }
        msleep(110);
 
index 755dd0ce65ff724f2fd479f502e314e0cabf236e..6f2b57384488b3bb8124eed072217ef8ecf3194b 100644 (file)
@@ -11,4 +11,5 @@ EXTRA_CFLAGS += -Idrivers/media/video
 EXTRA_CFLAGS += -Idrivers/media/common/tuners
 EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
 EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
+EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-usb
 
index 6bdc0ef18119716dadc5facc2347f22fda5c6192..f2a4900014bc5c1dace49615b123cb86184ba814 100644 (file)
@@ -32,6 +32,7 @@
 #include <media/v4l2-chip-ident.h>
 
 #include <media/cx25840.h>
+#include "dvb-usb-ids.h"
 #include "xc5000.h"
 
 #include "cx231xx.h"
@@ -175,6 +176,8 @@ struct usb_device_id cx231xx_id_table[] = {
         .driver_info = CX231XX_BOARD_CNXT_RDE_250},
        {USB_DEVICE(0x0572, 0x58A1),
         .driver_info = CX231XX_BOARD_CNXT_RDU_250},
+       {USB_DEVICE_VER(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD, 0x4000,0x4fff),
+        .driver_info = CX231XX_BOARD_UNKNOWN},
        {},
 };
 
@@ -226,14 +229,16 @@ void cx231xx_pre_card_setup(struct cx231xx *dev)
                     dev->board.name, dev->model);
 
        /* set the direction for GPIO pins */
-       cx231xx_set_gpio_direction(dev, dev->board.tuner_gpio->bit, 1);
-       cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 1);
-       cx231xx_set_gpio_direction(dev, dev->board.tuner_sif_gpio, 1);
+       if (dev->board.tuner_gpio) {
+               cx231xx_set_gpio_direction(dev, dev->board.tuner_gpio->bit, 1);
+               cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 1);
+               cx231xx_set_gpio_direction(dev, dev->board.tuner_sif_gpio, 1);
 
-       /* request some modules if any required */
+               /* request some modules if any required */
 
-       /* reset the Tuner */
-       cx231xx_gpio_set(dev, dev->board.tuner_gpio);
+               /* reset the Tuner */
+               cx231xx_gpio_set(dev, dev->board.tuner_gpio);
+       }
 
        /* set the mode to Analog mode initially */
        cx231xx_set_mode(dev, CX231XX_ANALOG_MODE);
index 86ca8c2359dd8bb409cadd434dc951a17ff170a9..f5a3e74c3c7cc0e6e52ebec222b28604e7cf53ef 100644 (file)
@@ -1996,7 +1996,7 @@ static int cx25840_probe(struct i2c_client *client,
 
                state->volume = v4l2_ctrl_new_std(&state->hdl,
                        &cx25840_audio_ctrl_ops, V4L2_CID_AUDIO_VOLUME,
-                       0, 65335, 65535 / 100, default_volume);
+                       0, 65535, 65535 / 100, default_volume);
                state->mute = v4l2_ctrl_new_std(&state->hdl,
                        &cx25840_audio_ctrl_ops, V4L2_CID_AUDIO_MUTE,
                        0, 1, 1, 0);
index 99dbae1175919befc55ae7e2e00a3a524f666053..0fa85cbefbb12ffbe1e5729581ce6544dbadcfce 100644 (file)
@@ -17,7 +17,7 @@ config VIDEO_CX88
 
 config VIDEO_CX88_ALSA
        tristate "Conexant 2388x DMA audio support"
-       depends on VIDEO_CX88 && SND && EXPERIMENTAL
+       depends on VIDEO_CX88 && SND
        select SND_PCM
        ---help---
          This is a video4linux driver for direct (DMA) audio on
index b9846106913eb4871f429924092dd27563f0f115..78abc1c1f9d52766704af26c0ec3769cd08f9198 100644 (file)
@@ -223,6 +223,7 @@ static int alloc_and_submit_int_urb(struct gspca_dev *gspca_dev,
                usb_rcvintpipe(dev, ep->bEndpointAddress),
                buffer, buffer_len,
                int_irq, (void *)gspca_dev, interval);
+       urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
        gspca_dev->int_urb = urb;
        ret = usb_submit_urb(urb, GFP_KERNEL);
        if (ret < 0) {
index 83a718f0f3f9841b8412c34967f41476040398e0..9052d5702556539fbf77dbd8cf1da66e5d87bdc4 100644 (file)
@@ -2357,8 +2357,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
                            (data[33] << 10);
                avg_lum >>= 9;
                atomic_set(&sd->avg_lum, avg_lum);
-               gspca_frame_add(gspca_dev, LAST_PACKET,
-                               data, len);
+               gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
                return;
        }
        if (gspca_dev->last_packet_type == LAST_PACKET) {
index be03a712731c3b61a249d607927fb852f0517881..f0316d02f09f6df1297a6f3e5e6ed4e0c1b8ff1f 100644 (file)
@@ -466,6 +466,8 @@ static int ivtvfb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long ar
                        struct fb_vblank vblank;
                        u32 trace;
 
+                       memset(&vblank, 0, sizeof(struct fb_vblank));
+
                        vblank.flags = FB_VBLANK_HAVE_COUNT |FB_VBLANK_HAVE_VCOUNT |
                                        FB_VBLANK_HAVE_VSYNC;
                        trace = read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16;
index 4525335f9bd416388484cbc5872ea0db3c3af768..a7210d981388e8c4724f524e3fd5c77bbd672dca 100644 (file)
@@ -239,7 +239,7 @@ static int device_process(struct m2mtest_ctx *ctx,
                return -EFAULT;
        }
 
-       if (in_buf->vb.size < out_buf->vb.size) {
+       if (in_buf->vb.size > out_buf->vb.size) {
                v4l2_err(&dev->v4l2_dev, "Output buffer is too small\n");
                return -EINVAL;
        }
@@ -1014,6 +1014,7 @@ static int m2mtest_remove(struct platform_device *pdev)
        v4l2_m2m_release(dev->m2m_dev);
        del_timer_sync(&dev->timer);
        video_unregister_device(dev->vfd);
+       video_device_release(dev->vfd);
        v4l2_device_unregister(&dev->v4l2_dev);
        kfree(dev);
 
index 758a4db27d65651481eec16b970f755a9036f622..c71af4e0e517f61631b1cc936021104dbcd30f92 100644 (file)
@@ -447,6 +447,9 @@ static int mt9m111_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
        dev_dbg(&client->dev, "%s left=%d, top=%d, width=%d, height=%d\n",
                __func__, rect.left, rect.top, rect.width, rect.height);
 
+       if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+               return -EINVAL;
+
        ret = mt9m111_make_rect(client, &rect);
        if (!ret)
                mt9m111->rect = rect;
@@ -466,12 +469,14 @@ static int mt9m111_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
 
 static int mt9m111_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
 {
+       if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+               return -EINVAL;
+
        a->bounds.left                  = MT9M111_MIN_DARK_COLS;
        a->bounds.top                   = MT9M111_MIN_DARK_ROWS;
        a->bounds.width                 = MT9M111_MAX_WIDTH;
        a->bounds.height                = MT9M111_MAX_HEIGHT;
        a->defrect                      = a->bounds;
-       a->type                         = V4L2_BUF_TYPE_VIDEO_CAPTURE;
        a->pixelaspect.numerator        = 1;
        a->pixelaspect.denominator      = 1;
 
@@ -487,6 +492,7 @@ static int mt9m111_g_fmt(struct v4l2_subdev *sd,
        mf->width       = mt9m111->rect.width;
        mf->height      = mt9m111->rect.height;
        mf->code        = mt9m111->fmt->code;
+       mf->colorspace  = mt9m111->fmt->colorspace;
        mf->field       = V4L2_FIELD_NONE;
 
        return 0;
index e7cd23cd63941ecb3380243f623960cf91c8ea31..b48473c7896b4b31d6d816ac544be70e6a1ac3c7 100644 (file)
@@ -402,9 +402,6 @@ static int mt9v022_s_fmt(struct v4l2_subdev *sd,
                if (mt9v022->model != V4L2_IDENT_MT9V022IX7ATC)
                        return -EINVAL;
                break;
-       case 0:
-               /* No format change, only geometry */
-               break;
        default:
                return -EINVAL;
        }
index 66ff174151b5f3d909022fbc096f8c432e6f65f4..b6ea67221d1d5fc64594348f49715539c19f9840 100644 (file)
@@ -378,6 +378,9 @@ static void mx25_camera_frame_done(struct mx2_camera_dev *pcdev, int fb,
 
        spin_lock_irqsave(&pcdev->lock, flags);
 
+       if (*fb_active == NULL)
+               goto out;
+
        vb = &(*fb_active)->vb;
        dev_dbg(pcdev->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
                vb, vb->baddr, vb->bsize);
@@ -402,6 +405,7 @@ static void mx25_camera_frame_done(struct mx2_camera_dev *pcdev, int fb,
 
        *fb_active = buf;
 
+out:
        spin_unlock_irqrestore(&pcdev->lock, flags);
 }
 
index 1b992b847198a486bc0eb62342d3d1b020c99919..55ea914c7fcd3e0a21a17671ef68294831b4a567 100644 (file)
@@ -513,7 +513,7 @@ int pvr2_ctrl_sym_to_value(struct pvr2_ctrl *cptr,
                        if (ret >= 0) {
                                ret = pvr2_ctrl_range_check(cptr,*valptr);
                        }
-                       if (maskptr) *maskptr = ~0;
+                       *maskptr = ~0;
                } else if (cptr->info->type == pvr2_ctl_bool) {
                        ret = parse_token(ptr,len,valptr,boolNames,
                                          ARRAY_SIZE(boolNames));
@@ -522,7 +522,7 @@ int pvr2_ctrl_sym_to_value(struct pvr2_ctrl *cptr,
                        } else if (ret == 0) {
                                *valptr = (*valptr & 1) ? !0 : 0;
                        }
-                       if (maskptr) *maskptr = 1;
+                       *maskptr = 1;
                } else if (cptr->info->type == pvr2_ctl_enum) {
                        ret = parse_token(
                                ptr,len,valptr,
@@ -531,7 +531,7 @@ int pvr2_ctrl_sym_to_value(struct pvr2_ctrl *cptr,
                        if (ret >= 0) {
                                ret = pvr2_ctrl_range_check(cptr,*valptr);
                        }
-                       if (maskptr) *maskptr = ~0;
+                       *maskptr = ~0;
                } else if (cptr->info->type == pvr2_ctl_bitmask) {
                        ret = parse_tlist(
                                ptr,len,maskptr,valptr,
index b151c7be8a506b18ae1ff544de0ba604f7e05e8f..6961c55baf9b1140609dd470b4abc3e05eed86c3 100644 (file)
@@ -393,6 +393,37 @@ static void fimc_set_yuv_order(struct fimc_ctx *ctx)
        dbg("ctx->out_order_1p= %d", ctx->out_order_1p);
 }
 
+static void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f)
+{
+       struct samsung_fimc_variant *variant = ctx->fimc_dev->variant;
+
+       f->dma_offset.y_h = f->offs_h;
+       if (!variant->pix_hoff)
+               f->dma_offset.y_h *= (f->fmt->depth >> 3);
+
+       f->dma_offset.y_v = f->offs_v;
+
+       f->dma_offset.cb_h = f->offs_h;
+       f->dma_offset.cb_v = f->offs_v;
+
+       f->dma_offset.cr_h = f->offs_h;
+       f->dma_offset.cr_v = f->offs_v;
+
+       if (!variant->pix_hoff) {
+               if (f->fmt->planes_cnt == 3) {
+                       f->dma_offset.cb_h >>= 1;
+                       f->dma_offset.cr_h >>= 1;
+               }
+               if (f->fmt->color == S5P_FIMC_YCBCR420) {
+                       f->dma_offset.cb_v >>= 1;
+                       f->dma_offset.cr_v >>= 1;
+               }
+       }
+
+       dbg("in_offset: color= %d, y_h= %d, y_v= %d",
+           f->fmt->color, f->dma_offset.y_h, f->dma_offset.y_v);
+}
+
 /**
  * fimc_prepare_config - check dimensions, operation and color mode
  *                      and pre-calculate offset and the scaling coefficients.
@@ -406,7 +437,6 @@ static int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags)
 {
        struct fimc_frame *s_frame, *d_frame;
        struct fimc_vid_buffer *buf = NULL;
-       struct samsung_fimc_variant *variant = ctx->fimc_dev->variant;
        int ret = 0;
 
        s_frame = &ctx->s_frame;
@@ -419,61 +449,16 @@ static int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags)
                        swap(d_frame->width, d_frame->height);
                }
 
-               /* Prepare the output offset ratios for scaler. */
-               d_frame->dma_offset.y_h = d_frame->offs_h;
-               if (!variant->pix_hoff)
-                       d_frame->dma_offset.y_h *= (d_frame->fmt->depth >> 3);
-
-               d_frame->dma_offset.y_v = d_frame->offs_v;
-
-               d_frame->dma_offset.cb_h = d_frame->offs_h;
-               d_frame->dma_offset.cb_v = d_frame->offs_v;
-
-               d_frame->dma_offset.cr_h = d_frame->offs_h;
-               d_frame->dma_offset.cr_v = d_frame->offs_v;
+               /* Prepare the DMA offset ratios for scaler. */
+               fimc_prepare_dma_offset(ctx, &ctx->s_frame);
+               fimc_prepare_dma_offset(ctx, &ctx->d_frame);
 
-               if (!variant->pix_hoff && d_frame->fmt->planes_cnt == 3) {
-                       d_frame->dma_offset.cb_h >>= 1;
-                       d_frame->dma_offset.cb_v >>= 1;
-                       d_frame->dma_offset.cr_h >>= 1;
-                       d_frame->dma_offset.cr_v >>= 1;
-               }
-
-               dbg("out offset: color= %d, y_h= %d, y_v= %d",
-                       d_frame->fmt->color,
-                       d_frame->dma_offset.y_h, d_frame->dma_offset.y_v);
-
-               /* Prepare the input offset ratios for scaler. */
-               s_frame->dma_offset.y_h = s_frame->offs_h;
-               if (!variant->pix_hoff)
-                       s_frame->dma_offset.y_h *= (s_frame->fmt->depth >> 3);
-               s_frame->dma_offset.y_v = s_frame->offs_v;
-
-               s_frame->dma_offset.cb_h = s_frame->offs_h;
-               s_frame->dma_offset.cb_v = s_frame->offs_v;
-
-               s_frame->dma_offset.cr_h = s_frame->offs_h;
-               s_frame->dma_offset.cr_v = s_frame->offs_v;
-
-               if (!variant->pix_hoff && s_frame->fmt->planes_cnt == 3) {
-                       s_frame->dma_offset.cb_h >>= 1;
-                       s_frame->dma_offset.cb_v >>= 1;
-                       s_frame->dma_offset.cr_h >>= 1;
-                       s_frame->dma_offset.cr_v >>= 1;
-               }
-
-               dbg("in offset: color= %d, y_h= %d, y_v= %d",
-                       s_frame->fmt->color, s_frame->dma_offset.y_h,
-                       s_frame->dma_offset.y_v);
-
-               fimc_set_yuv_order(ctx);
-
-               /* Check against the scaler ratio. */
                if (s_frame->height > (SCALER_MAX_VRATIO * d_frame->height) ||
                    s_frame->width > (SCALER_MAX_HRATIO * d_frame->width)) {
                        err("out of scaler range");
                        return -EINVAL;
                }
+               fimc_set_yuv_order(ctx);
        }
 
        /* Input DMA mode is not allowed when the scaler is disabled. */
@@ -822,7 +807,8 @@ static int fimc_m2m_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
        } else {
                v4l2_err(&ctx->fimc_dev->m2m.v4l2_dev,
                         "Wrong buffer/video queue type (%d)\n", f->type);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto s_fmt_out;
        }
 
        pix = &f->fmt.pix;
@@ -1414,8 +1400,10 @@ static int fimc_probe(struct platform_device *pdev)
        }
 
        fimc->work_queue = create_workqueue(dev_name(&fimc->pdev->dev));
-       if (!fimc->work_queue)
+       if (!fimc->work_queue) {
+               ret = -ENOMEM;
                goto err_irq;
+       }
 
        ret = fimc_register_m2m_device(fimc);
        if (ret)
@@ -1492,6 +1480,7 @@ static struct samsung_fimc_variant fimc2_variant_s5p = {
 };
 
 static struct samsung_fimc_variant fimc01_variant_s5pv210 = {
+       .pix_hoff       = 1,
        .has_inp_rot    = 1,
        .has_out_rot    = 1,
        .min_inp_pixsize = 16,
@@ -1506,6 +1495,7 @@ static struct samsung_fimc_variant fimc01_variant_s5pv210 = {
 };
 
 static struct samsung_fimc_variant fimc2_variant_s5pv210 = {
+       .pix_hoff        = 1,
        .min_inp_pixsize = 16,
        .min_out_pixsize = 32,
 
index ec697fcd406ede6b23c9691ceb5d0fe1fc0134a4..bb8d83d8ddafbd79dd892e971e6d8152f5483a37 100644 (file)
@@ -4323,13 +4323,13 @@ struct saa7134_board saa7134_boards[] = {
        },
        [SAA7134_BOARD_BEHOLD_COLUMBUS_TVFM] = {
                /*       Beholder Intl. Ltd. 2008      */
-               /*Dmitry Belimov <d.belimov@gmail.com> */
-               .name           = "Beholder BeholdTV Columbus TVFM",
+               /* Dmitry Belimov <d.belimov@gmail.com> */
+               .name           = "Beholder BeholdTV Columbus TV/FM",
                .audio_clock    = 0x00187de7,
                .tuner_type     = TUNER_ALPS_TSBE5_PAL,
-               .radio_type     = UNSET,
-               .tuner_addr     = ADDR_UNSET,
-               .radio_addr     = ADDR_UNSET,
+               .radio_type     = TUNER_TEA5767,
+               .tuner_addr     = 0xc2 >> 1,
+               .radio_addr     = 0xc0 >> 1,
                .tda9887_conf   = TDA9887_PRESENT,
                .gpiomask       = 0x000A8004,
                .inputs         = {{
index 5713f3a4b76c952bf9b1db333bb547caf3e40376..ddd25d32723dc0436f2477a641afaa0d75d9f2d3 100644 (file)
@@ -136,10 +136,11 @@ ret:
 int saa7164_buffer_dealloc(struct saa7164_tsport *port,
        struct saa7164_buffer *buf)
 {
-       struct saa7164_dev *dev = port->dev;
+       struct saa7164_dev *dev;
 
-       if ((buf == 0) || (port == 0))
+       if (!buf || !port)
                return SAA_ERR_BAD_PARAMETER;
+       dev = port->dev;
 
        dprintk(DBGLVL_BUF, "%s() deallocating buffer @ 0x%p\n", __func__, buf);
 
index 8bdd940f32e689c5b51a94ec8975014567d9f00c..2ac85d8984f025cb0ec9933f87bf00e91bf656dd 100644 (file)
@@ -486,6 +486,12 @@ static int uvc_parse_format(struct uvc_device *dev,
                            max(frame->dwFrameInterval[0],
                                frame->dwDefaultFrameInterval));
 
+               if (dev->quirks & UVC_QUIRK_RESTRICT_FRAME_RATE) {
+                       frame->bFrameIntervalType = 1;
+                       frame->dwFrameInterval[0] =
+                               frame->dwDefaultFrameInterval;
+               }
+
                uvc_trace(UVC_TRACE_DESCR, "- %ux%u (%u.%u fps)\n",
                        frame->wWidth, frame->wHeight,
                        10000000/frame->dwDefaultFrameInterval,
@@ -2026,6 +2032,15 @@ static struct usb_device_id uvc_ids[] = {
          .bInterfaceClass      = USB_CLASS_VENDOR_SPEC,
          .bInterfaceSubClass   = 1,
          .bInterfaceProtocol   = 0 },
+       /* Chicony CNF7129 (Asus EEE 100HE) */
+       { .match_flags          = USB_DEVICE_ID_MATCH_DEVICE
+                               | USB_DEVICE_ID_MATCH_INT_INFO,
+         .idVendor             = 0x04f2,
+         .idProduct            = 0xb071,
+         .bInterfaceClass      = USB_CLASS_VIDEO,
+         .bInterfaceSubClass   = 1,
+         .bInterfaceProtocol   = 0,
+         .driver_info          = UVC_QUIRK_RESTRICT_FRAME_RATE },
        /* Alcor Micro AU3820 (Future Boy PC USB Webcam) */
        { .match_flags          = USB_DEVICE_ID_MATCH_DEVICE
                                | USB_DEVICE_ID_MATCH_INT_INFO,
@@ -2091,6 +2106,15 @@ static struct usb_device_id uvc_ids[] = {
          .bInterfaceProtocol   = 0,
          .driver_info          = UVC_QUIRK_PROBE_MINMAX
                                | UVC_QUIRK_PROBE_DEF },
+       /* IMC Networks (Medion Akoya) */
+       { .match_flags          = USB_DEVICE_ID_MATCH_DEVICE
+                               | USB_DEVICE_ID_MATCH_INT_INFO,
+         .idVendor             = 0x13d3,
+         .idProduct            = 0x5103,
+         .bInterfaceClass      = USB_CLASS_VIDEO,
+         .bInterfaceSubClass   = 1,
+         .bInterfaceProtocol   = 0,
+         .driver_info          = UVC_QUIRK_STREAM_NO_FID },
        /* Syntek (HP Spartan) */
        { .match_flags          = USB_DEVICE_ID_MATCH_DEVICE
                                | USB_DEVICE_ID_MATCH_INT_INFO,
index bdacf3beabf54fcbe1f9f901692a0134e6b48ed1..892e0e51916c31853d9e8fa681ecce27e75edc53 100644 (file)
@@ -182,6 +182,7 @@ struct uvc_xu_control {
 #define UVC_QUIRK_IGNORE_SELECTOR_UNIT 0x00000020
 #define UVC_QUIRK_FIX_BANDWIDTH                0x00000080
 #define UVC_QUIRK_PROBE_DEF            0x00000100
+#define UVC_QUIRK_RESTRICT_FRAME_RATE  0x00000200
 
 /* Format flags */
 #define UVC_FMT_FLAG_COMPRESSED                0x00000001
index 073f01390cdd0a00de7e34dc7cd30360b72912c4..86294ed35c9b643cc7bab7411904476d0f7467a7 100644 (file)
@@ -193,17 +193,24 @@ static int put_video_window32(struct video_window *kp, struct video_window32 __u
 struct video_code32 {
        char            loadwhat[16];   /* name or tag of file being passed */
        compat_int_t    datasize;
-       unsigned char   *data;
+       compat_uptr_t   data;
 };
 
-static int get_microcode32(struct video_code *kp, struct video_code32 __user *up)
+static struct video_code __user *get_microcode32(struct video_code32 *kp)
 {
-       if (!access_ok(VERIFY_READ, up, sizeof(struct video_code32)) ||
-               copy_from_user(kp->loadwhat, up->loadwhat, sizeof(up->loadwhat)) ||
-               get_user(kp->datasize, &up->datasize) ||
-               copy_from_user(kp->data, up->data, up->datasize))
-                       return -EFAULT;
-       return 0;
+       struct video_code __user *up;
+
+       up = compat_alloc_user_space(sizeof(*up));
+
+       /*
+        * NOTE! We don't actually care if these fail. If the
+        * user address is invalid, the native ioctl will do
+        * the error handling for us
+        */
+       (void) copy_to_user(up->loadwhat, kp->loadwhat, sizeof(up->loadwhat));
+       (void) put_user(kp->datasize, &up->datasize);
+       (void) put_user(compat_ptr(kp->data), &up->data);
+       return up;
 }
 
 #define VIDIOCGTUNER32         _IOWR('v', 4, struct video_tuner32)
@@ -739,7 +746,7 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
                struct video_tuner vt;
                struct video_buffer vb;
                struct video_window vw;
-               struct video_code vc;
+               struct video_code32 vc;
                struct video_audio va;
 #endif
                struct v4l2_format v2f;
@@ -818,8 +825,11 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
                break;
 
        case VIDIOCSMICROCODE:
-               err = get_microcode32(&karg.vc, up);
-               compatible_arg = 0;
+               /* Copy the 32-bit "video_code32" to kernel space */
+               if (copy_from_user(&karg.vc, up, sizeof(karg.vc)))
+                       return -EFAULT;
+               /* Convert the 32-bit version to a 64-bit version in user space */
+               up = get_microcode32(&karg.vc);
                break;
 
        case VIDIOCSFREQ:
index 372b87efcd0538ec6c91c1bb45749177294f7442..6ff9e4bac3ea14fd6248bf07e1dfc6109fe43d27 100644 (file)
@@ -393,8 +393,10 @@ void videobuf_dma_contig_free(struct videobuf_queue *q,
        }
 
        /* read() method */
-       dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle);
-       mem->vaddr = NULL;
+       if (mem->vaddr) {
+               dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle);
+               mem->vaddr = NULL;
+       }
 }
 EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
 
index 06f9a9c2a39add9256a58850d0cf5c4b0c52bf5b..2ad0bc252b0eaed1612ddaac477d21f6d7033b0c 100644 (file)
@@ -94,7 +94,7 @@ err:
  * must free the memory.
  */
 static struct scatterlist *videobuf_pages_to_sg(struct page **pages,
-                                               int nr_pages, int offset)
+                                       int nr_pages, int offset, size_t size)
 {
        struct scatterlist *sglist;
        int i;
@@ -110,12 +110,14 @@ static struct scatterlist *videobuf_pages_to_sg(struct page **pages,
                /* DMA to highmem pages might not work */
                goto highmem;
        sg_set_page(&sglist[0], pages[0], PAGE_SIZE - offset, offset);
+       size -= PAGE_SIZE - offset;
        for (i = 1; i < nr_pages; i++) {
                if (NULL == pages[i])
                        goto nopage;
                if (PageHighMem(pages[i]))
                        goto highmem;
-               sg_set_page(&sglist[i], pages[i], PAGE_SIZE, 0);
+               sg_set_page(&sglist[i], pages[i], min(PAGE_SIZE, size), 0);
+               size -= min(PAGE_SIZE, size);
        }
        return sglist;
 
@@ -170,7 +172,8 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
 
        first = (data          & PAGE_MASK) >> PAGE_SHIFT;
        last  = ((data+size-1) & PAGE_MASK) >> PAGE_SHIFT;
-       dma->offset   = data & ~PAGE_MASK;
+       dma->offset = data & ~PAGE_MASK;
+       dma->size = size;
        dma->nr_pages = last-first+1;
        dma->pages = kmalloc(dma->nr_pages * sizeof(struct page *), GFP_KERNEL);
        if (NULL == dma->pages)
@@ -252,7 +255,7 @@ int videobuf_dma_map(struct device *dev, struct videobuf_dmabuf *dma)
 
        if (dma->pages) {
                dma->sglist = videobuf_pages_to_sg(dma->pages, dma->nr_pages,
-                                                  dma->offset);
+                                                  dma->offset, dma->size);
        }
        if (dma->vaddr) {
                dma->sglist = videobuf_vmalloc_to_sg(dma->vaddr,
index 04028a9ee082735278557b606840619ade7e29a9..428377a5a6f56fe94ca033730a49102f19164dd2 100644 (file)
@@ -429,24 +429,25 @@ static void max8925_irq_sync_unlock(unsigned int irq)
        irq_tsc = cache_tsc;
        for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) {
                irq_data = &max8925_irqs[i];
+               /* 1 -- disable, 0 -- enable */
                switch (irq_data->mask_reg) {
                case MAX8925_CHG_IRQ1_MASK:
-                       irq_chg[0] &= irq_data->enable;
+                       irq_chg[0] &= ~irq_data->enable;
                        break;
                case MAX8925_CHG_IRQ2_MASK:
-                       irq_chg[1] &= irq_data->enable;
+                       irq_chg[1] &= ~irq_data->enable;
                        break;
                case MAX8925_ON_OFF_IRQ1_MASK:
-                       irq_on[0] &= irq_data->enable;
+                       irq_on[0] &= ~irq_data->enable;
                        break;
                case MAX8925_ON_OFF_IRQ2_MASK:
-                       irq_on[1] &= irq_data->enable;
+                       irq_on[1] &= ~irq_data->enable;
                        break;
                case MAX8925_RTC_IRQ_MASK:
-                       irq_rtc &= irq_data->enable;
+                       irq_rtc &= ~irq_data->enable;
                        break;
                case MAX8925_TSC_IRQ_MASK:
-                       irq_tsc &= irq_data->enable;
+                       irq_tsc &= ~irq_data->enable;
                        break;
                default:
                        dev_err(chip->dev, "wrong IRQ\n");
index 7dabe4dbd3732e1d75c396b9b1e01bdeafafa57c..294183b6260b1facff3d26764eb3cea8c6d4b011 100644 (file)
@@ -394,8 +394,13 @@ static int wm831x_irq_set_type(unsigned int irq, unsigned int type)
 
        irq = irq - wm831x->irq_base;
 
-       if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11)
-               return -EINVAL;
+       if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) {
+               /* Ignore internal-only IRQs */
+               if (irq >= 0 && irq < WM831X_NUM_IRQS)
+                       return 0;
+               else
+                       return -EINVAL;
+       }
 
        switch (type) {
        case IRQ_TYPE_EDGE_BOTH:
index 0b591b658243a7675ac66d24b97e7ea1ceaa9781..b74331260744db8829a2a8712a0d08bf5d42f949 100644 (file)
@@ -368,7 +368,7 @@ config VMWARE_BALLOON
          If unsure, say N.
 
          To compile this driver as a module, choose M here: the
-         module will be called vmware_balloon.
+         module will be called vmw_balloon.
 
 config ARM_CHARLCD
        bool "ARM Ltd. Character LCD Driver"
index 255a80dc9d73267a115da07c5729e04aa1c8a8ee..42eab95cde2af49e8ae737036695a1093caa01d9 100644 (file)
@@ -33,5 +33,5 @@ obj-$(CONFIG_IWMC3200TOP)      += iwmc3200top/
 obj-$(CONFIG_HMC6352)          += hmc6352.o
 obj-y                          += eeprom/
 obj-y                          += cb710/
-obj-$(CONFIG_VMWARE_BALLOON)   += vmware_balloon.o
+obj-$(CONFIG_VMWARE_BALLOON)   += vmw_balloon.o
 obj-$(CONFIG_ARM_CHARLCD)      += arm-charlcd.o
index 714c6b487313a4ad7b360687591b75a6cddedc55..d5f3a3fd231931508948a9b0227aae2a3648a96f 100644 (file)
@@ -190,7 +190,6 @@ static int __devexit bh1780_remove(struct i2c_client *client)
 
        ddata = i2c_get_clientdata(client);
        sysfs_remove_group(&client->dev.kobj, &bh1780_attr_group);
-       i2c_set_clientdata(client, NULL);
        kfree(ddata);
 
        return 0;
index 5db49b124ffa158793e0cfb3d1db72321679d440..09eee6df0653c84fc5c08023f8913a6afbb884a7 100644 (file)
@@ -1631,6 +1631,19 @@ int mmc_suspend_host(struct mmc_host *host)
        if (host->bus_ops && !host->bus_dead) {
                if (host->bus_ops->suspend)
                        err = host->bus_ops->suspend(host);
+               if (err == -ENOSYS || !host->bus_ops->resume) {
+                       /*
+                        * We simply "remove" the card in this case.
+                        * It will be redetected on resume.
+                        */
+                       if (host->bus_ops->remove)
+                               host->bus_ops->remove(host);
+                       mmc_claim_host(host);
+                       mmc_detach_bus(host);
+                       mmc_release_host(host);
+                       host->pm_flags = 0;
+                       err = 0;
+               }
        }
        mmc_bus_put(host);
 
index bd2755e8d9a3d327f7ae54bedd0e7820f85ea5fe..f332c52968b75d7528ee8c5f21eaf561a76d373d 100644 (file)
@@ -362,9 +362,8 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
                goto err;
        }
 
-       err = mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid);
-
-       if (!err) {
+       if (ocr & R4_MEMORY_PRESENT
+           && mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid) == 0) {
                card->type = MMC_TYPE_SD_COMBO;
 
                if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
index 5f3a599ead07bbdfae11f7abc0198285e743db81..87226cd202a5086f7d90699f0a43d6d4e99725a1 100644 (file)
@@ -66,6 +66,7 @@
 #include <linux/clk.h>
 #include <linux/atmel_pdc.h>
 #include <linux/gfp.h>
+#include <linux/highmem.h>
 
 #include <linux/mmc/host.h>
 
index 9a68ff4353a2e83878fce5429afe9351140daf57..5a950b16d9e629dc3d08041bda547b165cadee76 100644 (file)
@@ -148,11 +148,12 @@ static int imxmci_start_clock(struct imxmci_host *host)
 
                while (delay--) {
                        reg = readw(host->base + MMC_REG_STATUS);
-                       if (reg & STATUS_CARD_BUS_CLK_RUN)
+                       if (reg & STATUS_CARD_BUS_CLK_RUN) {
                                /* Check twice before cut */
                                reg = readw(host->base + MMC_REG_STATUS);
                                if (reg & STATUS_CARD_BUS_CLK_RUN)
                                        return 0;
+                       }
 
                        if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
                                return 0;
index 4a8776f8afdd690048c69de91e755d35d2c884a5..4526d2791f2990229acbe9ef0f5c88286819807f 100644 (file)
@@ -2305,7 +2305,6 @@ static int omap_hsmmc_suspend(struct device *dev)
        int ret = 0;
        struct platform_device *pdev = to_platform_device(dev);
        struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
-       pm_message_t state = PMSG_SUSPEND; /* unused by MMC core */
 
        if (host && host->suspended)
                return 0;
@@ -2324,8 +2323,8 @@ static int omap_hsmmc_suspend(struct device *dev)
                        }
                }
                cancel_work_sync(&host->mmc_carddetect_work);
-               mmc_host_enable(host->mmc);
                ret = mmc_suspend_host(host->mmc);
+               mmc_host_enable(host->mmc);
                if (ret == 0) {
                        omap_hsmmc_disable_irq(host);
                        OMAP_HSMMC_WRITE(host->base, HCTL,
index 2e16e0a90a5e1a5d8d3d7487727baa39d701b8fb..976330de379ecc78cbe91f19c4bd9495d4722dae 100644 (file)
@@ -1600,7 +1600,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev)
        host->pio_active        = XFER_NONE;
 
 #ifdef CONFIG_MMC_S3C_PIODMA
-       host->dodma             = host->pdata->dma;
+       host->dodma             = host->pdata->use_dma;
 #endif
 
        host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
index 71ad4163b95e12b45aab7d41379c8022995d8ac4..aacb862ecc8a979f8022b8e0dd3428ed26c15baa 100644 (file)
@@ -241,8 +241,10 @@ static struct sdhci_ops sdhci_s3c_ops = {
 static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
 {
        struct sdhci_host *host = platform_get_drvdata(dev);
+       unsigned long flags;
+
        if (host) {
-               spin_lock(&host->lock);
+               spin_lock_irqsave(&host->lock, flags);
                if (state) {
                        dev_dbg(&dev->dev, "card inserted.\n");
                        host->flags &= ~SDHCI_DEVICE_DEAD;
@@ -253,7 +255,7 @@ static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
                        host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
                }
                tasklet_schedule(&host->card_tasklet);
-               spin_unlock(&host->lock);
+               spin_unlock_irqrestore(&host->lock, flags);
        }
 }
 
@@ -481,8 +483,10 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
        sdhci_remove_host(host, 1);
 
        for (ptr = 0; ptr < 3; ptr++) {
-               clk_disable(sc->clk_bus[ptr]);
-               clk_put(sc->clk_bus[ptr]);
+               if (sc->clk_bus[ptr]) {
+                       clk_disable(sc->clk_bus[ptr]);
+                       clk_put(sc->clk_bus[ptr]);
+               }
        }
        clk_disable(sc->clk_io);
        clk_put(sc->clk_io);
index ee7d0a5a51c496cb92b04e7b9bc8172f8a233875..69d98e3bf6abaa3c784d1d70f171387b3142eb64 100644 (file)
@@ -164,6 +164,7 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
 static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
 {
        struct mmc_data *data = host->data;
+       void *sg_virt;
        unsigned short *buf;
        unsigned int count;
        unsigned long flags;
@@ -173,8 +174,8 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
                return;
        }
 
-       buf = (unsigned short *)(tmio_mmc_kmap_atomic(host, &flags) +
-             host->sg_off);
+       sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
+       buf = (unsigned short *)(sg_virt + host->sg_off);
 
        count = host->sg_ptr->length - host->sg_off;
        if (count > data->blksz)
@@ -191,7 +192,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
 
        host->sg_off += count;
 
-       tmio_mmc_kunmap_atomic(host, &flags);
+       tmio_mmc_kunmap_atomic(sg_virt, &flags);
 
        if (host->sg_off == host->sg_ptr->length)
                tmio_mmc_next_sg(host);
index 64f7d5dfc106ac7b39e37842c5eca9a898dbbb06..0fedc78e3ea5c4613767d7d31e534143b4bf1780 100644 (file)
 
 #define ack_mmc_irqs(host, i) \
        do { \
-               u32 mask;\
-               mask  = sd_ctrl_read32((host), CTL_STATUS); \
-               mask &= ~((i) & TMIO_MASK_IRQ); \
-               sd_ctrl_write32((host), CTL_STATUS, mask); \
+               sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
        } while (0)
 
 
@@ -177,19 +174,17 @@ static inline int tmio_mmc_next_sg(struct tmio_mmc_host *host)
        return --host->sg_len;
 }
 
-static inline char *tmio_mmc_kmap_atomic(struct tmio_mmc_host *host,
+static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
        unsigned long *flags)
 {
-       struct scatterlist *sg = host->sg_ptr;
-
        local_irq_save(*flags);
        return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
 }
 
-static inline void tmio_mmc_kunmap_atomic(struct tmio_mmc_host *host,
+static inline void tmio_mmc_kunmap_atomic(void *virt,
        unsigned long *flags)
 {
-       kunmap_atomic(sg_page(host->sg_ptr), KM_BIO_SRC_IRQ);
+       kunmap_atomic(virt, KM_BIO_SRC_IRQ);
        local_irq_restore(*flags);
 }
 
index a382e3dd0a5dc8cdcddc2fc5f0b993c3badadd33..6fbeefa3a7662fb5fb85e8bfdbb86e0c63307ab3 100644 (file)
@@ -682,7 +682,6 @@ static int __devinit bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
 static int __devexit bf5xx_nand_remove(struct platform_device *pdev)
 {
        struct bf5xx_nand_info *info = to_nand_info(pdev);
-       struct mtd_info *mtd = NULL;
 
        platform_set_drvdata(pdev, NULL);
 
@@ -690,11 +689,7 @@ static int __devexit bf5xx_nand_remove(struct platform_device *pdev)
         * and their partitions, then go through freeing the
         * resources used
         */
-       mtd = &info->mtd;
-       if (mtd) {
-               nand_release(mtd);
-               kfree(mtd);
-       }
+       nand_release(&info->mtd);
 
        peripheral_free_list(bfin_nfc_pin_req);
        bf5xx_nand_dma_remove(info);
@@ -710,7 +705,7 @@ static int bf5xx_nand_scan(struct mtd_info *mtd)
        struct nand_chip *chip = mtd->priv;
        int ret;
 
-       ret = nand_scan_ident(mtd, 1);
+       ret = nand_scan_ident(mtd, 1, NULL);
        if (ret)
                return ret;
 
index fcf8ceb277d44cd3e78b5c5e0249ec1b40d7ab33..214b03afdd482920adda308e51088092d884a2d1 100644 (file)
@@ -30,6 +30,8 @@
 #include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/completion.h>
 
 #include <asm/mach/flash.h>
 #include <mach/mxc_nand.h>
@@ -67,7 +69,9 @@
 #define NFC_V1_V2_CONFIG1_BIG          (1 << 5)
 #define NFC_V1_V2_CONFIG1_RST          (1 << 6)
 #define NFC_V1_V2_CONFIG1_CE           (1 << 7)
-#define NFC_V1_V2_CONFIG1_ONE_CYCLE    (1 << 8)
+#define NFC_V2_CONFIG1_ONE_CYCLE       (1 << 8)
+#define NFC_V2_CONFIG1_PPB(x)          (((x) & 0x3) << 9)
+#define NFC_V2_CONFIG1_FP_INT          (1 << 11)
 
 #define NFC_V1_V2_CONFIG2_INT          (1 << 15)
 
@@ -149,7 +153,7 @@ struct mxc_nand_host {
        int                     irq;
        int                     eccsize;
 
-       wait_queue_head_t       irq_waitq;
+       struct completion       op_completion;
 
        uint8_t                 *data_buf;
        unsigned int            buf_start;
@@ -162,6 +166,7 @@ struct mxc_nand_host {
        void                    (*send_read_id)(struct mxc_nand_host *);
        uint16_t                (*get_dev_status)(struct mxc_nand_host *);
        int                     (*check_int)(struct mxc_nand_host *);
+       void                    (*irq_control)(struct mxc_nand_host *, int);
 };
 
 /* OOB placement block for use with hardware ecc generation */
@@ -214,9 +219,12 @@ static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
 {
        struct mxc_nand_host *host = dev_id;
 
-       disable_irq_nosync(irq);
+       if (!host->check_int(host))
+               return IRQ_NONE;
 
-       wake_up(&host->irq_waitq);
+       host->irq_control(host, 0);
+
+       complete(&host->op_completion);
 
        return IRQ_HANDLED;
 }
@@ -243,11 +251,54 @@ static int check_int_v1_v2(struct mxc_nand_host *host)
        if (!(tmp & NFC_V1_V2_CONFIG2_INT))
                return 0;
 
-       writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2);
+       if (!cpu_is_mx21())
+               writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2);
 
        return 1;
 }
 
+/*
+ * It has been observed that the i.MX21 cannot read the CONFIG2:INT bit
+ * if interrupts are masked (CONFIG1:INT_MSK is set). To handle this, the
+ * driver can enable/disable the irq line rather than simply masking the
+ * interrupts.
+ */
+static void irq_control_mx21(struct mxc_nand_host *host, int activate)
+{
+       if (activate)
+               enable_irq(host->irq);
+       else
+               disable_irq_nosync(host->irq);
+}
+
+static void irq_control_v1_v2(struct mxc_nand_host *host, int activate)
+{
+       uint16_t tmp;
+
+       tmp = readw(NFC_V1_V2_CONFIG1);
+
+       if (activate)
+               tmp &= ~NFC_V1_V2_CONFIG1_INT_MSK;
+       else
+               tmp |= NFC_V1_V2_CONFIG1_INT_MSK;
+
+       writew(tmp, NFC_V1_V2_CONFIG1);
+}
+
+static void irq_control_v3(struct mxc_nand_host *host, int activate)
+{
+       uint32_t tmp;
+
+       tmp = readl(NFC_V3_CONFIG2);
+
+       if (activate)
+               tmp &= ~NFC_V3_CONFIG2_INT_MSK;
+       else
+               tmp |= NFC_V3_CONFIG2_INT_MSK;
+
+       writel(tmp, NFC_V3_CONFIG2);
+}
+
 /* This function polls the NANDFC to wait for the basic operation to
  * complete by checking the INT bit of config2 register.
  */
@@ -257,10 +308,9 @@ static void wait_op_done(struct mxc_nand_host *host, int useirq)
 
        if (useirq) {
                if (!host->check_int(host)) {
-
-                       enable_irq(host->irq);
-
-                       wait_event(host->irq_waitq, host->check_int(host));
+                       INIT_COMPLETION(host->op_completion);
+                       host->irq_control(host, 1);
+                       wait_for_completion(&host->op_completion);
                }
        } else {
                while (max_retries-- > 0) {
@@ -402,16 +452,16 @@ static void send_read_id_v1_v2(struct mxc_nand_host *host)
        /* Wait for operation to complete */
        wait_op_done(host, true);
 
+       memcpy(host->data_buf, host->main_area0, 16);
+
        if (this->options & NAND_BUSWIDTH_16) {
-               void __iomem *main_buf = host->main_area0;
                /* compress the ID info */
-               writeb(readb(main_buf + 2), main_buf + 1);
-               writeb(readb(main_buf + 4), main_buf + 2);
-               writeb(readb(main_buf + 6), main_buf + 3);
-               writeb(readb(main_buf + 8), main_buf + 4);
-               writeb(readb(main_buf + 10), main_buf + 5);
+               host->data_buf[1] = host->data_buf[2];
+               host->data_buf[2] = host->data_buf[4];
+               host->data_buf[3] = host->data_buf[6];
+               host->data_buf[4] = host->data_buf[8];
+               host->data_buf[5] = host->data_buf[10];
        }
-       memcpy(host->data_buf, host->main_area0, 16);
 }
 
 static uint16_t get_dev_status_v3(struct mxc_nand_host *host)
@@ -729,27 +779,30 @@ static void preset_v1_v2(struct mtd_info *mtd)
 {
        struct nand_chip *nand_chip = mtd->priv;
        struct mxc_nand_host *host = nand_chip->priv;
-       uint16_t tmp;
+       uint16_t config1 = 0;
 
-       /* enable interrupt, disable spare enable */
-       tmp = readw(NFC_V1_V2_CONFIG1);
-       tmp &= ~NFC_V1_V2_CONFIG1_INT_MSK;
-       tmp &= ~NFC_V1_V2_CONFIG1_SP_EN;
-       if (nand_chip->ecc.mode == NAND_ECC_HW) {
-               tmp |= NFC_V1_V2_CONFIG1_ECC_EN;
-       } else {
-               tmp &= ~NFC_V1_V2_CONFIG1_ECC_EN;
-       }
+       if (nand_chip->ecc.mode == NAND_ECC_HW)
+               config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
+
+       if (nfc_is_v21())
+               config1 |= NFC_V2_CONFIG1_FP_INT;
+
+       if (!cpu_is_mx21())
+               config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
 
        if (nfc_is_v21() && mtd->writesize) {
+               uint16_t pages_per_block = mtd->erasesize / mtd->writesize;
+
                host->eccsize = get_eccsize(mtd);
                if (host->eccsize == 4)
-                       tmp |= NFC_V2_CONFIG1_ECC_MODE_4;
+                       config1 |= NFC_V2_CONFIG1_ECC_MODE_4;
+
+               config1 |= NFC_V2_CONFIG1_PPB(ffs(pages_per_block) - 6);
        } else {
                host->eccsize = 1;
        }
 
-       writew(tmp, NFC_V1_V2_CONFIG1);
+       writew(config1, NFC_V1_V2_CONFIG1);
        /* preset operation */
 
        /* Unlock the internal RAM Buffer */
@@ -794,6 +847,7 @@ static void preset_v3(struct mtd_info *mtd)
                NFC_V3_CONFIG2_2CMD_PHASES |
                NFC_V3_CONFIG2_SPAS(mtd->oobsize >> 1) |
                NFC_V3_CONFIG2_ST_CMD(0x70) |
+               NFC_V3_CONFIG2_INT_MSK |
                NFC_V3_CONFIG2_NUM_ADDR_PHASE0;
 
        if (chip->ecc.mode == NAND_ECC_HW)
@@ -1019,6 +1073,10 @@ static int __init mxcnd_probe(struct platform_device *pdev)
                host->send_read_id = send_read_id_v1_v2;
                host->get_dev_status = get_dev_status_v1_v2;
                host->check_int = check_int_v1_v2;
+               if (cpu_is_mx21())
+                       host->irq_control = irq_control_mx21;
+               else
+                       host->irq_control = irq_control_v1_v2;
        }
 
        if (nfc_is_v21()) {
@@ -1057,6 +1115,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
                host->send_read_id = send_read_id_v3;
                host->check_int = check_int_v3;
                host->get_dev_status = get_dev_status_v3;
+               host->irq_control = irq_control_v3;
                oob_smallpage = &nandv2_hw_eccoob_smallpage;
                oob_largepage = &nandv2_hw_eccoob_largepage;
        } else
@@ -1088,14 +1147,34 @@ static int __init mxcnd_probe(struct platform_device *pdev)
                this->options |= NAND_USE_FLASH_BBT;
        }
 
-       init_waitqueue_head(&host->irq_waitq);
+       init_completion(&host->op_completion);
 
        host->irq = platform_get_irq(pdev, 0);
 
+       /*
+        * mask the interrupt. For i.MX21 explicitely call
+        * irq_control_v1_v2 to use the mask bit. We can't call
+        * disable_irq_nosync() for an interrupt we do not own yet.
+        */
+       if (cpu_is_mx21())
+               irq_control_v1_v2(host, 0);
+       else
+               host->irq_control(host, 0);
+
        err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host);
        if (err)
                goto eirq;
 
+       host->irq_control(host, 0);
+
+       /*
+        * Now that the interrupt is disabled make sure the interrupt
+        * mask bit is cleared on i.MX21. Otherwise we can't read
+        * the interrupt status bit on this machine.
+        */
+       if (cpu_is_mx21())
+               irq_control_v1_v2(host, 1);
+
        /* first scan to find the device and get the page size */
        if (nand_scan_ident(mtd, 1, NULL)) {
                err = -ENXIO;
index 133d51528f8dc0fb79eae4d12230e1a65bd7595e..513e0a76a4a73866d52bba8151e43556a3b30a54 100644 (file)
@@ -413,7 +413,7 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
                prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT);
        } while (prefetch_status);
        /* disable and stop the PFPW engine */
-       gpmc_prefetch_reset();
+       gpmc_prefetch_reset(info->gpmc_cs);
 
        dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
        return 0;
index 4d89f37802075a26e602cf4ba6f42de54d07cfc8..4d01cda6884463daa844c02fde951970854e972a 100644 (file)
@@ -1320,6 +1320,7 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
                goto fail_free_irq;
        }
 
+#ifdef CONFIG_MTD_PARTITIONS
        if (mtd_has_cmdlinepart()) {
                static const char *probes[] = { "cmdlinepart", NULL };
                struct mtd_partition *parts;
@@ -1332,6 +1333,9 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
        }
 
        return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
+#else
+       return 0;
+#endif
 
 fail_free_irq:
        free_irq(irq, info);
@@ -1364,7 +1368,9 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
        platform_set_drvdata(pdev, NULL);
 
        del_mtd_device(mtd);
+#ifdef CONFIG_MTD_PARTITIONS
        del_mtd_partitions(mtd);
+#endif
        irq = platform_get_irq(pdev, 0);
        if (irq >= 0)
                free_irq(irq, info);
index cb443af3d45feee407bb79e8440f439cbdba7bd1..a460f1b748c20fbcb29982925820b79e6a7bbd78 100644 (file)
@@ -554,14 +554,13 @@ static int s5pc110_dma_ops(void *dst, void *src, size_t count, int direction)
 
        do {
                status = readl(base + S5PC110_DMA_TRANS_STATUS);
+               if (status & S5PC110_DMA_TRANS_STATUS_TE) {
+                       writel(S5PC110_DMA_TRANS_CMD_TEC,
+                                       base + S5PC110_DMA_TRANS_CMD);
+                       return -EIO;
+               }
        } while (!(status & S5PC110_DMA_TRANS_STATUS_TD));
 
-       if (status & S5PC110_DMA_TRANS_STATUS_TE) {
-               writel(S5PC110_DMA_TRANS_CMD_TEC, base + S5PC110_DMA_TRANS_CMD);
-               writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
-               return -EIO;
-       }
-
        writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
 
        return 0;
@@ -571,13 +570,12 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
                unsigned char *buffer, int offset, size_t count)
 {
        struct onenand_chip *this = mtd->priv;
-       void __iomem *bufferram;
        void __iomem *p;
        void *buf = (void *) buffer;
        dma_addr_t dma_src, dma_dst;
        int err;
 
-       p = bufferram = this->base + area;
+       p = this->base + area;
        if (ONENAND_CURRENT_BUFFERRAM(this)) {
                if (area == ONENAND_DATARAM)
                        p += this->writesize;
@@ -621,7 +619,7 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
 normal:
        if (count != mtd->writesize) {
                /* Copy the bufferram to memory to prevent unaligned access */
-               memcpy(this->page_buf, bufferram, mtd->writesize);
+               memcpy(this->page_buf, p, mtd->writesize);
                p = this->page_buf + offset;
        }
 
index 70705d1306b93e161260852d39f6adf6fe308226..eca55c52bdfdf2ae8c8445bfac2901cfd9d99bd3 100644 (file)
@@ -522,7 +522,7 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
        lp->tx_len              = lp->exec_box->data[9];   /* Transmit list count */
        lp->rx_len              = lp->exec_box->data[11];  /* Receive list count */
 
-       init_MUTEX_LOCKED(&lp->cmd_mutex);
+       sema_init(&lp->cmd_mutex, 0);
        init_completion(&lp->execution_cmd);
        init_completion(&lp->xceiver_cmd);
 
index a045559c81cf09e37bce9dbef9c56f1325523269..179871d9e71f9a1f111645f58c68b7fea55b0861 100644 (file)
@@ -635,6 +635,9 @@ struct vortex_private {
                must_free_region:1,                             /* Flag: if zero, Cardbus owns the I/O region */
                large_frames:1,                 /* accept large frames */
                handling_irq:1;                 /* private in_irq indicator */
+       /* {get|set}_wol operations are already serialized by rtnl.
+        * no additional locking is required for the enable_wol and acpi_set_WOL()
+        */
        int drv_flags;
        u16 status_enable;
        u16 intr_enable;
@@ -1994,10 +1997,9 @@ vortex_error(struct net_device *dev, int status)
                }
        }
 
-       if (status & RxEarly) {                         /* Rx early is unused. */
-               vortex_rx(dev);
+       if (status & RxEarly)                           /* Rx early is unused. */
                iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD);
-       }
+
        if (status & StatsFull) {                       /* Empty statistics. */
                static int DoneDidThat;
                if (vortex_debug > 4)
@@ -2298,7 +2300,12 @@ vortex_interrupt(int irq, void *dev_id)
                if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) {
                        if (status == 0xffff)
                                break;
+                       if (status & RxEarly)
+                               vortex_rx(dev);
+                       spin_unlock(&vp->window_lock);
                        vortex_error(dev, status);
+                       spin_lock(&vp->window_lock);
+                       window_set(vp, 7);
                }
 
                if (--work_done < 0) {
@@ -2935,28 +2942,31 @@ static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 {
        struct vortex_private *vp = netdev_priv(dev);
 
-       spin_lock_irq(&vp->lock);
+       if (!VORTEX_PCI(vp))
+               return;
+
        wol->supported = WAKE_MAGIC;
 
        wol->wolopts = 0;
        if (vp->enable_wol)
                wol->wolopts |= WAKE_MAGIC;
-       spin_unlock_irq(&vp->lock);
 }
 
 static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 {
        struct vortex_private *vp = netdev_priv(dev);
+
+       if (!VORTEX_PCI(vp))
+               return -EOPNOTSUPP;
+
        if (wol->wolopts & ~WAKE_MAGIC)
                return -EINVAL;
 
-       spin_lock_irq(&vp->lock);
        if (wol->wolopts & WAKE_MAGIC)
                vp->enable_wol = 1;
        else
                vp->enable_wol = 0;
        acpi_set_WOL(dev);
-       spin_unlock_irq(&vp->lock);
 
        return 0;
 }
@@ -3198,6 +3208,9 @@ static void acpi_set_WOL(struct net_device *dev)
                        return;
                }
 
+               if (VORTEX_PCI(vp)->current_state < PCI_D3hot)
+                       return;
+
                /* Change the power state to D3; RxEnable doesn't take effect. */
                pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot);
        }
index 2cc81a54cbf322a49ccbf474f5d41f654faf109d..5db667c0b3711f235dfc49c52a4d165b12e6b3fd 100644 (file)
@@ -2428,7 +2428,7 @@ config UGETH_TX_ON_DEMAND
 
 config MV643XX_ETH
        tristate "Marvell Discovery (643XX) and Orion ethernet support"
-       depends on MV64X60 || PPC32 || PLAT_ORION
+       depends on (MV64X60 || PPC32 || PLAT_ORION) && INET
        select INET_LRO
        select PHYLIB
        help
@@ -2803,7 +2803,7 @@ config NIU
 
 config PASEMI_MAC
        tristate "PA Semi 1/10Gbit MAC"
-       depends on PPC_PASEMI && PCI
+       depends on PPC_PASEMI && PCI && INET
        select PHYLIB
        select INET_LRO
        help
index 63b9ba0cc67e13c408686442f1e2fe984ee9b462..c73be2848319deecd38a3d6f5e8e45ed8637e6f9 100644 (file)
@@ -1251,6 +1251,12 @@ static void atl1_free_ring_resources(struct atl1_adapter *adapter)
 
        rrd_ring->desc = NULL;
        rrd_ring->dma = 0;
+
+       adapter->cmb.dma = 0;
+       adapter->cmb.cmb = NULL;
+
+       adapter->smb.dma = 0;
+       adapter->smb.smb = NULL;
 }
 
 static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
@@ -2847,10 +2853,11 @@ static int atl1_resume(struct pci_dev *pdev)
        pci_enable_wake(pdev, PCI_D3cold, 0);
 
        atl1_reset_hw(&adapter->hw);
-       adapter->cmb.cmb->int_stats = 0;
 
-       if (netif_running(netdev))
+       if (netif_running(netdev)) {
+               adapter->cmb.cmb->int_stats = 0;
                atl1_up(adapter);
+       }
        netif_device_attach(netdev);
 
        return 0;
index 37617abc164769aa5f8be38a797ea4337a3585e0..efeffdf9e5fab30d2bd97a07e5e866fec60ed595 100644 (file)
@@ -848,6 +848,15 @@ static int b44_poll(struct napi_struct *napi, int budget)
                b44_tx(bp);
                /* spin_unlock(&bp->tx_lock); */
        }
+       if (bp->istat & ISTAT_RFO) {    /* fast recovery, in ~20msec */
+               bp->istat &= ~ISTAT_RFO;
+               b44_disable_ints(bp);
+               ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
+               b44_init_rings(bp);
+               b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
+               netif_wake_queue(bp->dev);
+       }
+
        spin_unlock_irqrestore(&bp->lock, flags);
 
        work_done = 0;
@@ -2161,8 +2170,6 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
        dev->irq = sdev->irq;
        SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
 
-       netif_carrier_off(dev);
-
        err = ssb_bus_powerup(sdev->bus, 0);
        if (err) {
                dev_err(sdev->dev,
@@ -2204,6 +2211,8 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
                goto err_out_powerdown;
        }
 
+       netif_carrier_off(dev);
+
        ssb_set_drvdata(sdev, dev);
 
        /* Chip reset provides power to the b44 MAC & PCI cores, which
index 99197bd54da558ef26cf8a62a54af19aaa02e0f8..53306bf3f401bee193fc89f5c6c4d1b35759ecb0 100644 (file)
@@ -181,6 +181,7 @@ struct be_drvr_stats {
        u64 be_rx_bytes_prev;
        u64 be_rx_pkts;
        u32 be_rx_rate;
+       u32 be_rx_mcast_pkt;
        /* number of non ether type II frames dropped where
         * frame len > length field of Mac Hdr */
        u32 be_802_3_dropped_frames;
index 3d305494a6066fb987510abebe34a1332f80c114..34abcc9403d6b76428416412904b4c06ff8d593b 100644 (file)
@@ -140,10 +140,8 @@ int be_process_mcc(struct be_adapter *adapter, int *status)
        while ((compl = be_mcc_compl_get(adapter))) {
                if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
                        /* Interpret flags as an async trailer */
-                       BUG_ON(!is_link_state_evt(compl->flags));
-
-                       /* Interpret compl as a async link evt */
-                       be_async_link_state_process(adapter,
+                       if (is_link_state_evt(compl->flags))
+                               be_async_link_state_process(adapter,
                                (struct be_async_event_link_state *) compl);
                } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
                                *status = be_mcc_compl_process(adapter, compl);
@@ -207,7 +205,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
 
                if (msecs > 4000) {
                        dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
-                       be_dump_ue(adapter);
+                       be_detect_dump_ue(adapter);
                        return -1;
                }
 
index bdc10a28cfda9feb11ffdecee141a6e333a9e92d..ad1e6fac60c58869e074609cee3e363672bfecd9 100644 (file)
@@ -992,5 +992,5 @@ extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
 extern int be_cmd_get_phy_info(struct be_adapter *adapter,
                struct be_dma_mem *cmd);
 extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
-extern void be_dump_ue(struct be_adapter *adapter);
+extern void be_detect_dump_ue(struct be_adapter *adapter);
 
index cd16243c7c364a858d849ac3f70ac10bbbfcb966..13f0abbc520550b0b22ef48d0ed2a4da56d69187 100644 (file)
@@ -60,6 +60,7 @@ static const struct be_ethtool_stat et_stats[] = {
        {DRVSTAT_INFO(be_rx_events)},
        {DRVSTAT_INFO(be_tx_compl)},
        {DRVSTAT_INFO(be_rx_compl)},
+       {DRVSTAT_INFO(be_rx_mcast_pkt)},
        {DRVSTAT_INFO(be_ethrx_post_fail)},
        {DRVSTAT_INFO(be_802_3_dropped_frames)},
        {DRVSTAT_INFO(be_802_3_malformed_frames)},
index 5d38046402b235d255b529bb96c3cd07806fe3a9..a2ec5df0d73340bf82e45ab3d50f25402fcaa9b6 100644 (file)
 #define FLASH_FCoE_BIOS_START_g3           (13631488)
 #define FLASH_REDBOOT_START_g3             (262144)
 
-
-
+/************* Rx Packet Type Encoding **************/
+#define BE_UNICAST_PACKET              0
+#define BE_MULTICAST_PACKET            1
+#define BE_BROADCAST_PACKET            2
+#define BE_RSVD_PACKET                 3
 
 /*
  * BE descriptors: host memory data structures whose formats
index 74e146f470c60e9df5ff01806623a0aaaaa0ec82..6eda7a02225623943a35293cada545b8d20d752b 100644 (file)
@@ -247,6 +247,7 @@ void netdev_stats_update(struct be_adapter *adapter)
        dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts;
        dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes;
        dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes;
+       dev_stats->multicast = drvr_stats(adapter)->be_rx_mcast_pkt;
 
        /* bad pkts received */
        dev_stats->rx_errors = port_stats->rx_crc_errors +
@@ -294,7 +295,6 @@ void netdev_stats_update(struct be_adapter *adapter)
        /* no space available in linux */
        dev_stats->tx_dropped = 0;
 
-       dev_stats->multicast = port_stats->rx_multicast_frames;
        dev_stats->collisions = 0;
 
        /* detailed tx_errors */
@@ -848,7 +848,7 @@ static void be_rx_rate_update(struct be_adapter *adapter)
 }
 
 static void be_rx_stats_update(struct be_adapter *adapter,
-               u32 pktsize, u16 numfrags)
+               u32 pktsize, u16 numfrags, u8 pkt_type)
 {
        struct be_drvr_stats *stats = drvr_stats(adapter);
 
@@ -856,6 +856,9 @@ static void be_rx_stats_update(struct be_adapter *adapter,
        stats->be_rx_frags += numfrags;
        stats->be_rx_bytes += pktsize;
        stats->be_rx_pkts++;
+
+       if (pkt_type == BE_MULTICAST_PACKET)
+               stats->be_rx_mcast_pkt++;
 }
 
 static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
@@ -925,9 +928,11 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
        u16 rxq_idx, i, j;
        u32 pktsize, hdr_len, curr_frag_len, size;
        u8 *start;
+       u8 pkt_type;
 
        rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
        pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
+       pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
 
        page_info = get_rx_page_info(adapter, rxq_idx);
 
@@ -993,7 +998,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
        BUG_ON(j > MAX_SKB_FRAGS);
 
 done:
-       be_rx_stats_update(adapter, pktsize, num_rcvd);
+       be_rx_stats_update(adapter, pktsize, num_rcvd, pkt_type);
 }
 
 /* Process the RX completion indicated by rxcp when GRO is disabled */
@@ -1060,6 +1065,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
        u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
        u16 i, rxq_idx = 0, vid, j;
        u8 vtm;
+       u8 pkt_type;
 
        num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
        /* Is it a flush compl that has no data */
@@ -1070,6 +1076,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
        vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
        rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
        vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
+       pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
 
        /* vlanf could be wrongly set in some cards.
         * ignore if vtm is not set */
@@ -1125,7 +1132,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
                vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
        }
 
-       be_rx_stats_update(adapter, pkt_size, num_rcvd);
+       be_rx_stats_update(adapter, pkt_size, num_rcvd, pkt_type);
 }
 
 static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
@@ -1743,26 +1750,7 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
        return 1;
 }
 
-static inline bool be_detect_ue(struct be_adapter *adapter)
-{
-       u32 online0 = 0, online1 = 0;
-
-       pci_read_config_dword(adapter->pdev, PCICFG_ONLINE0, &online0);
-
-       pci_read_config_dword(adapter->pdev, PCICFG_ONLINE1, &online1);
-
-       if (!online0 || !online1) {
-               adapter->ue_detected = true;
-               dev_err(&adapter->pdev->dev,
-                       "UE Detected!! online0=%d online1=%d\n",
-                       online0, online1);
-               return true;
-       }
-
-       return false;
-}
-
-void be_dump_ue(struct be_adapter *adapter)
+void be_detect_dump_ue(struct be_adapter *adapter)
 {
        u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
        u32 i;
@@ -1779,6 +1767,11 @@ void be_dump_ue(struct be_adapter *adapter)
        ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
        ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
 
+       if (ue_status_lo || ue_status_hi) {
+               adapter->ue_detected = true;
+               dev_err(&adapter->pdev->dev, "UE Detected!!\n");
+       }
+
        if (ue_status_lo) {
                for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
                        if (ue_status_lo & 1)
@@ -1814,10 +1807,8 @@ static void be_worker(struct work_struct *work)
                adapter->rx_post_starved = false;
                be_post_rx_frags(adapter);
        }
-       if (!adapter->ue_detected) {
-               if (be_detect_ue(adapter))
-                       be_dump_ue(adapter);
-       }
+       if (!adapter->ue_detected)
+               be_detect_dump_ue(adapter);
 
        schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
 }
index 822f586d72afa67d8abd5c606f38b11f60f0147d..0ddf4c66afe21aa99679a3fb31b1e932dc51612c 100644 (file)
@@ -2466,6 +2466,9 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac
        if (!(dev->flags & IFF_MASTER))
                goto out;
 
+       if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
+               goto out;
+
        read_lock(&bond->lock);
        slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev),
                                        orig_dev);
index c746b331771d38f38771c89aa492ede20cbdf711..26bb118c45334074ef4c8a51deda39e4161fba44 100644 (file)
@@ -362,6 +362,9 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct
                goto out;
        }
 
+       if (!pskb_may_pull(skb, arp_hdr_len(bond_dev)))
+               goto out;
+
        if (skb->len < sizeof(struct arp_pkt)) {
                pr_debug("Packet is too small to be an ARP\n");
                goto out;
index 2cc4cfc31892cd85458dec20b6c46401fdb90d9b..e953c6ad6e6d1ea3fd7e22fddc8f5f27ba1f8b38 100644 (file)
@@ -2797,9 +2797,15 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
         *       so it can wait
         */
        bond_for_each_slave(bond, slave, i) {
+               unsigned long trans_start = dev_trans_start(slave->dev);
+
                if (slave->link != BOND_LINK_UP) {
-                       if (time_before_eq(jiffies, dev_trans_start(slave->dev) + delta_in_ticks) &&
-                           time_before_eq(jiffies, slave->dev->last_rx + delta_in_ticks)) {
+                       if (time_in_range(jiffies,
+                               trans_start - delta_in_ticks,
+                               trans_start + delta_in_ticks) &&
+                           time_in_range(jiffies,
+                               slave->dev->last_rx - delta_in_ticks,
+                               slave->dev->last_rx + delta_in_ticks)) {
 
                                slave->link  = BOND_LINK_UP;
                                slave->state = BOND_STATE_ACTIVE;
@@ -2827,8 +2833,12 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
                         * when the source ip is 0, so don't take the link down
                         * if we don't know our ip yet
                         */
-                       if (time_after_eq(jiffies, dev_trans_start(slave->dev) + 2*delta_in_ticks) ||
-                           (time_after_eq(jiffies, slave->dev->last_rx + 2*delta_in_ticks))) {
+                       if (!time_in_range(jiffies,
+                               trans_start - delta_in_ticks,
+                               trans_start + 2 * delta_in_ticks) ||
+                           !time_in_range(jiffies,
+                               slave->dev->last_rx - delta_in_ticks,
+                               slave->dev->last_rx + 2 * delta_in_ticks)) {
 
                                slave->link  = BOND_LINK_DOWN;
                                slave->state = BOND_STATE_BACKUP;
@@ -2883,13 +2893,16 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
 {
        struct slave *slave;
        int i, commit = 0;
+       unsigned long trans_start;
 
        bond_for_each_slave(bond, slave, i) {
                slave->new_link = BOND_LINK_NOCHANGE;
 
                if (slave->link != BOND_LINK_UP) {
-                       if (time_before_eq(jiffies, slave_last_rx(bond, slave) +
-                                          delta_in_ticks)) {
+                       if (time_in_range(jiffies,
+                               slave_last_rx(bond, slave) - delta_in_ticks,
+                               slave_last_rx(bond, slave) + delta_in_ticks)) {
+
                                slave->new_link = BOND_LINK_UP;
                                commit++;
                        }
@@ -2902,8 +2915,9 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
                 * active.  This avoids bouncing, as the last receive
                 * times need a full ARP monitor cycle to be updated.
                 */
-               if (!time_after_eq(jiffies, slave->jiffies +
-                                  2 * delta_in_ticks))
+               if (time_in_range(jiffies,
+                                 slave->jiffies - delta_in_ticks,
+                                 slave->jiffies + 2 * delta_in_ticks))
                        continue;
 
                /*
@@ -2921,8 +2935,10 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
                 */
                if (slave->state == BOND_STATE_BACKUP &&
                    !bond->current_arp_slave &&
-                   time_after(jiffies, slave_last_rx(bond, slave) +
-                              3 * delta_in_ticks)) {
+                   !time_in_range(jiffies,
+                       slave_last_rx(bond, slave) - delta_in_ticks,
+                       slave_last_rx(bond, slave) + 3 * delta_in_ticks)) {
+
                        slave->new_link = BOND_LINK_DOWN;
                        commit++;
                }
@@ -2933,11 +2949,15 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
                 * - (more than 2*delta since receive AND
                 *    the bond has an IP address)
                 */
+               trans_start = dev_trans_start(slave->dev);
                if ((slave->state == BOND_STATE_ACTIVE) &&
-                   (time_after_eq(jiffies, dev_trans_start(slave->dev) +
-                                   2 * delta_in_ticks) ||
-                     (time_after_eq(jiffies, slave_last_rx(bond, slave)
-                                    + 2 * delta_in_ticks)))) {
+                   (!time_in_range(jiffies,
+                       trans_start - delta_in_ticks,
+                       trans_start + 2 * delta_in_ticks) ||
+                    !time_in_range(jiffies,
+                       slave_last_rx(bond, slave) - delta_in_ticks,
+                       slave_last_rx(bond, slave) + 2 * delta_in_ticks))) {
+
                        slave->new_link = BOND_LINK_DOWN;
                        commit++;
                }
@@ -2956,6 +2976,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
 {
        struct slave *slave;
        int i;
+       unsigned long trans_start;
 
        bond_for_each_slave(bond, slave, i) {
                switch (slave->new_link) {
@@ -2963,10 +2984,11 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
                        continue;
 
                case BOND_LINK_UP:
+                       trans_start = dev_trans_start(slave->dev);
                        if ((!bond->curr_active_slave &&
-                            time_before_eq(jiffies,
-                                           dev_trans_start(slave->dev) +
-                                           delta_in_ticks)) ||
+                            time_in_range(jiffies,
+                                          trans_start - delta_in_ticks,
+                                          trans_start + delta_in_ticks)) ||
                            bond->curr_active_slave != slave) {
                                slave->link = BOND_LINK_UP;
                                bond->current_arp_slave = NULL;
@@ -5142,6 +5164,15 @@ int bond_create(struct net *net, const char *name)
                res = dev_alloc_name(bond_dev, "bond%d");
                if (res < 0)
                        goto out;
+       } else {
+               /*
+                * If we're given a name to register
+                * we need to ensure that its not already
+                * registered
+                */
+               res = -EEXIST;
+               if (__dev_get_by_name(net, name) != NULL)
+                       goto out;
        }
 
        res = register_netdevice(bond_dev);
index ad19585d960be79c0ec350c312fce978afaf2a9b..f208712c0b90d6b675f1ef1179f5825ae9f9f911 100644 (file)
@@ -2296,6 +2296,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
        case CHELSIO_GET_QSET_NUM:{
                struct ch_reg edata;
 
+               memset(&edata, 0, sizeof(struct ch_reg));
+
                edata.cmd = CHELSIO_GET_QSET_NUM;
                edata.val = pi->nqsets;
                if (copy_to_user(useraddr, &edata, sizeof(edata)))
index 66ed08f726fb9bcdd13225086d05f3dcf437da6a..ba302a5c2c30e14c2be16333f38adb2a2668a89c 100644 (file)
@@ -57,6 +57,7 @@ enum e1e_registers {
        E1000_SCTL     = 0x00024, /* SerDes Control - RW */
        E1000_FCAL     = 0x00028, /* Flow Control Address Low - RW */
        E1000_FCAH     = 0x0002C, /* Flow Control Address High -RW */
+       E1000_FEXTNVM4 = 0x00024, /* Future Extended NVM 4 - RW */
        E1000_FEXTNVM  = 0x00028, /* Future Extended NVM - RW */
        E1000_FCT      = 0x00030, /* Flow Control Type - RW */
        E1000_VET      = 0x00038, /* VLAN Ether Type - RW */
index 63930d12711cf44ceb3334e5334fef5089ecb2e2..57b5435599ab1d61c58e393d04e0a2f3912d66fc 100644 (file)
 #define E1000_FEXTNVM_SW_CONFIG                1
 #define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */
 
+#define E1000_FEXTNVM4_BEACON_DURATION_MASK    0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_8USEC   0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_16USEC  0x3
+
 #define PCIE_ICH8_SNOOP_ALL            PCIE_NO_SNOOP_ALL
 
 #define E1000_ICH_RAR_ENTRIES          7
 
 /* SMBus Address Phy Register */
 #define HV_SMB_ADDR            PHY_REG(768, 26)
+#define HV_SMB_ADDR_MASK       0x007F
 #define HV_SMB_ADDR_PEC_EN     0x0200
 #define HV_SMB_ADDR_VALID      0x0080
 
@@ -237,6 +242,8 @@ static s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
+static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
+static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
 
 static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
 {
@@ -272,7 +279,7 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
 {
        struct e1000_phy_info *phy = &hw->phy;
-       u32 ctrl;
+       u32 ctrl, fwsm;
        s32 ret_val = 0;
 
        phy->addr                     = 1;
@@ -294,7 +301,8 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
         * disabled, then toggle the LANPHYPC Value bit to force
         * the interconnect to PCIe mode.
         */
-       if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+       fwsm = er32(FWSM);
+       if (!(fwsm & E1000_ICH_FWSM_FW_VALID)) {
                ctrl = er32(CTRL);
                ctrl |=  E1000_CTRL_LANPHYPC_OVERRIDE;
                ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
@@ -303,6 +311,13 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
                ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
                ew32(CTRL, ctrl);
                msleep(50);
+
+               /*
+                * Gate automatic PHY configuration by hardware on
+                * non-managed 82579
+                */
+               if (hw->mac.type == e1000_pch2lan)
+                       e1000_gate_hw_phy_config_ich8lan(hw, true);
        }
 
        /*
@@ -315,6 +330,13 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
        if (ret_val)
                goto out;
 
+       /* Ungate automatic PHY configuration on non-managed 82579 */
+       if ((hw->mac.type == e1000_pch2lan)  &&
+           !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
+               msleep(10);
+               e1000_gate_hw_phy_config_ich8lan(hw, false);
+       }
+
        phy->id = e1000_phy_unknown;
        ret_val = e1000e_get_phy_id(hw);
        if (ret_val)
@@ -561,13 +583,10 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
        if (mac->type == e1000_ich8lan)
                e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
 
-       /* Disable PHY configuration by hardware, config by software */
-       if (mac->type == e1000_pch2lan) {
-               u32 extcnf_ctrl = er32(EXTCNF_CTRL);
-
-               extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
-               ew32(EXTCNF_CTRL, extcnf_ctrl);
-       }
+       /* Gate automatic PHY configuration by hardware on managed 82579 */
+       if ((mac->type == e1000_pch2lan) &&
+           (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+               e1000_gate_hw_phy_config_ich8lan(hw, true);
 
        return 0;
 }
@@ -652,6 +671,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
                        goto out;
        }
 
+       if (hw->mac.type == e1000_pch2lan) {
+               ret_val = e1000_k1_workaround_lv(hw);
+               if (ret_val)
+                       goto out;
+       }
+
        /*
         * Check if there was DownShift, must be checked
         * immediately after link-up
@@ -894,6 +919,34 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
        return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? 0 : E1000_BLK_PHY_RESET;
 }
 
+/**
+ *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
+ *  @hw: pointer to the HW structure
+ *
+ *  Assumes semaphore already acquired.
+ *
+ **/
+static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
+{
+       u16 phy_data;
+       u32 strap = er32(STRAP);
+       s32 ret_val = 0;
+
+       strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
+
+       ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
+       if (ret_val)
+               goto out;
+
+       phy_data &= ~HV_SMB_ADDR_MASK;
+       phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
+       phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
+       ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
+
+out:
+       return ret_val;
+}
+
 /**
  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
  *  @hw:   pointer to the HW structure
@@ -903,7 +956,6 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
  **/
 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
 {
-       struct e1000_adapter *adapter = hw->adapter;
        struct e1000_phy_info *phy = &hw->phy;
        u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
        s32 ret_val = 0;
@@ -921,7 +973,8 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
                if (phy->type != e1000_phy_igp_3)
                        return ret_val;
 
-               if (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) {
+               if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) ||
+                   (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) {
                        sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
                        break;
                }
@@ -961,21 +1014,16 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
        cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
        cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
 
-       if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
-           ((hw->mac.type == e1000_pchlan) ||
-            (hw->mac.type == e1000_pch2lan))) {
+       if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
+           (hw->mac.type == e1000_pchlan)) ||
+            (hw->mac.type == e1000_pch2lan)) {
                /*
                 * HW configures the SMBus address and LEDs when the
                 * OEM and LCD Write Enable bits are set in the NVM.
                 * When both NVM bits are cleared, SW will configure
                 * them instead.
                 */
-               data = er32(STRAP);
-               data &= E1000_STRAP_SMBUS_ADDRESS_MASK;
-               reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT;
-               reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
-               ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR,
-                                                       reg_data);
+               ret_val = e1000_write_smbus_addr(hw);
                if (ret_val)
                        goto out;
 
@@ -1440,10 +1488,6 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
                        goto out;
 
                /* Enable jumbo frame workaround in the PHY */
-               e1e_rphy(hw, PHY_REG(769, 20), &data);
-               ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14));
-               if (ret_val)
-                       goto out;
                e1e_rphy(hw, PHY_REG(769, 23), &data);
                data &= ~(0x7F << 5);
                data |= (0x37 << 5);
@@ -1452,7 +1496,6 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
                        goto out;
                e1e_rphy(hw, PHY_REG(769, 16), &data);
                data &= ~(1 << 13);
-               data |= (1 << 12);
                ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
                if (ret_val)
                        goto out;
@@ -1477,7 +1520,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
 
                mac_reg = er32(RCTL);
                mac_reg &= ~E1000_RCTL_SECRC;
-               ew32(FFLT_DBG, mac_reg);
+               ew32(RCTL, mac_reg);
 
                ret_val = e1000e_read_kmrn_reg(hw,
                                                E1000_KMRNCTRLSTA_CTRL_OFFSET,
@@ -1503,17 +1546,12 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
                        goto out;
 
                /* Write PHY register values back to h/w defaults */
-               e1e_rphy(hw, PHY_REG(769, 20), &data);
-               ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14));
-               if (ret_val)
-                       goto out;
                e1e_rphy(hw, PHY_REG(769, 23), &data);
                data &= ~(0x7F << 5);
                ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
                if (ret_val)
                        goto out;
                e1e_rphy(hw, PHY_REG(769, 16), &data);
-               data &= ~(1 << 12);
                data |= (1 << 13);
                ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
                if (ret_val)
@@ -1558,6 +1596,69 @@ out:
        return ret_val;
 }
 
+/**
+ *  e1000_k1_gig_workaround_lv - K1 Si workaround
+ *  @hw:   pointer to the HW structure
+ *
+ *  Workaround to set the K1 beacon duration for 82579 parts
+ **/
+static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
+{
+       s32 ret_val = 0;
+       u16 status_reg = 0;
+       u32 mac_reg;
+
+       if (hw->mac.type != e1000_pch2lan)
+               goto out;
+
+       /* Set K1 beacon duration based on 1Gbps speed or otherwise */
+       ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
+       if (ret_val)
+               goto out;
+
+       if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
+           == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
+               mac_reg = er32(FEXTNVM4);
+               mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
+
+               if (status_reg & HV_M_STATUS_SPEED_1000)
+                       mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
+               else
+                       mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
+
+               ew32(FEXTNVM4, mac_reg);
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
+ *  @hw:   pointer to the HW structure
+ *  @gate: boolean set to true to gate, false to ungate
+ *
+ *  Gate/ungate the automatic PHY configuration via hardware; perform
+ *  the configuration via software instead.
+ **/
+static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
+{
+       u32 extcnf_ctrl;
+
+       if (hw->mac.type != e1000_pch2lan)
+               return;
+
+       extcnf_ctrl = er32(EXTCNF_CTRL);
+
+       if (gate)
+               extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+       else
+               extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+
+       ew32(EXTCNF_CTRL, extcnf_ctrl);
+       return;
+}
+
 /**
  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
  *  @hw: pointer to the HW structure
@@ -1602,6 +1703,9 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
        if (e1000_check_reset_block(hw))
                goto out;
 
+       /* Allow time for h/w to get to quiescent state after reset */
+       msleep(10);
+
        /* Perform any necessary post-reset workarounds */
        switch (hw->mac.type) {
        case e1000_pchlan:
@@ -1630,6 +1734,13 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
        /* Configure the LCD with the OEM bits in NVM */
        ret_val = e1000_oem_bits_config_ich8lan(hw, true);
 
+       /* Ungate automatic PHY configuration on non-managed 82579 */
+       if ((hw->mac.type == e1000_pch2lan) &&
+           !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+               msleep(10);
+               e1000_gate_hw_phy_config_ich8lan(hw, false);
+       }
+
 out:
        return ret_val;
 }
@@ -1646,6 +1757,11 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
 {
        s32 ret_val = 0;
 
+       /* Gate automatic PHY configuration by hardware on non-managed 82579 */
+       if ((hw->mac.type == e1000_pch2lan) &&
+           !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+               e1000_gate_hw_phy_config_ich8lan(hw, true);
+
        ret_val = e1000e_phy_hw_reset_generic(hw);
        if (ret_val)
                goto out;
@@ -2910,6 +3026,14 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
                 * external PHY is reset.
                 */
                ctrl |= E1000_CTRL_PHY_RST;
+
+               /*
+                * Gate automatic PHY configuration by hardware on
+                * non-managed 82579
+                */
+               if ((hw->mac.type == e1000_pch2lan) &&
+                   !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+                       e1000_gate_hw_phy_config_ich8lan(hw, true);
        }
        ret_val = e1000_acquire_swflag_ich8lan(hw);
        e_dbg("Issuing a global reset to ich8lan\n");
@@ -3460,13 +3584,20 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
 void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
 {
        u32 phy_ctrl;
+       s32 ret_val;
 
        phy_ctrl = er32(PHY_CTRL);
        phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE;
        ew32(PHY_CTRL, phy_ctrl);
 
-       if (hw->mac.type >= e1000_pchlan)
-               e1000_phy_hw_reset_ich8lan(hw);
+       if (hw->mac.type >= e1000_pchlan) {
+               e1000_oem_bits_config_ich8lan(hw, true);
+               ret_val = hw->phy.ops.acquire(hw);
+               if (ret_val)
+                       return;
+               e1000_write_smbus_addr(hw);
+               hw->phy.ops.release(hw);
+       }
 }
 
 /**
index 2b8ef44bd2b1629c5d203206479a3b8bab510fab..e561d15c3eb161558f9a7da3825f3cc6c90840c7 100644 (file)
@@ -2704,6 +2704,16 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
        u32 psrctl = 0;
        u32 pages = 0;
 
+       /* Workaround Si errata on 82579 - configure jumbo frame flow */
+       if (hw->mac.type == e1000_pch2lan) {
+               s32 ret_val;
+
+               if (adapter->netdev->mtu > ETH_DATA_LEN)
+                       ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
+               else
+                       ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
+       }
+
        /* Program MC offset vector base */
        rctl = er32(RCTL);
        rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
@@ -2744,16 +2754,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
                e1e_wphy(hw, 22, phy_data);
        }
 
-       /* Workaround Si errata on 82579 - configure jumbo frame flow */
-       if (hw->mac.type == e1000_pch2lan) {
-               s32 ret_val;
-
-               if (rctl & E1000_RCTL_LPE)
-                       ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
-               else
-                       ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
-       }
-
        /* Setup buffer sizes */
        rctl &= ~E1000_RCTL_SZ_4096;
        rctl |= E1000_RCTL_BSEX;
@@ -4833,6 +4833,15 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
                return -EINVAL;
        }
 
+       /* Jumbo frame workaround on 82579 requires CRC be stripped */
+       if ((adapter->hw.mac.type == e1000_pch2lan) &&
+           !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
+           (new_mtu > ETH_DATA_LEN)) {
+               e_err("Jumbo Frames not supported on 82579 when CRC "
+                     "stripping is disabled.\n");
+               return -EINVAL;
+       }
+
        /* 82573 Errata 17 */
        if (((adapter->hw.mac.type == e1000_82573) ||
             (adapter->hw.mac.type == e1000_82574)) &&
index a333b42111b8c2ba20b92eca94bf5648704c4f9a..6372610ed24093b8ed99fdf002e44442be49f696 100644 (file)
@@ -533,8 +533,15 @@ static inline void ehea_fill_skb(struct net_device *dev,
        int length = cqe->num_bytes_transfered - 4;     /*remove CRC */
 
        skb_put(skb, length);
-       skb->ip_summed = CHECKSUM_UNNECESSARY;
        skb->protocol = eth_type_trans(skb, dev);
+
+       /* The packet was not an IPV4 packet so a complemented checksum was
+          calculated. The value is found in the Internet Checksum field. */
+       if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
+               skb->ip_summed = CHECKSUM_COMPLETE;
+               skb->csum = csum_unfold(~cqe->inet_checksum_value);
+       } else
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
 }
 
 static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
index f608a6c54af5845727494c9749e2b786390dfa78..38104734a3be82b0fb4480526295510a4769e8bf 100644 (file)
@@ -150,6 +150,7 @@ struct ehea_rwqe {
 #define EHEA_CQE_TYPE_RQ           0x60
 #define EHEA_CQE_STAT_ERR_MASK     0x700F
 #define EHEA_CQE_STAT_FAT_ERR_MASK 0xF
+#define EHEA_CQE_BLIND_CKSUM       0x8000
 #define EHEA_CQE_STAT_ERR_TCP      0x4000
 #define EHEA_CQE_STAT_ERR_IP       0x2000
 #define EHEA_CQE_STAT_ERR_CRC      0x1000
index dda2c7944da9a45872d55f626503d15c67c67641..0cb1cf9cf4b0c2c38abd69f964838f276c1a6ff9 100644 (file)
@@ -555,6 +555,8 @@ static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mcp)
        equalizer_t *eql;
        master_config_t mc;
 
+       memset(&mc, 0, sizeof(master_config_t));
+
        if (eql_is_master(dev)) {
                eql = netdev_priv(dev);
                mc.max_slaves = eql->max_slaves;
index 768b840aeb6b7b0bf2ceec87469bb0b4925b5d20..cce32d43175f5c3ed5f6e7ef2ebd56eede8e9139 100644 (file)
@@ -678,24 +678,37 @@ static int fec_enet_mii_probe(struct net_device *dev)
 {
        struct fec_enet_private *fep = netdev_priv(dev);
        struct phy_device *phy_dev = NULL;
-       int ret;
+       char mdio_bus_id[MII_BUS_ID_SIZE];
+       char phy_name[MII_BUS_ID_SIZE + 3];
+       int phy_id;
 
        fep->phy_dev = NULL;
 
-       /* find the first phy */
-       phy_dev = phy_find_first(fep->mii_bus);
-       if (!phy_dev) {
-               printk(KERN_ERR "%s: no PHY found\n", dev->name);
-               return -ENODEV;
+       /* check for attached phy */
+       for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
+               if ((fep->mii_bus->phy_mask & (1 << phy_id)))
+                       continue;
+               if (fep->mii_bus->phy_map[phy_id] == NULL)
+                       continue;
+               if (fep->mii_bus->phy_map[phy_id]->phy_id == 0)
+                       continue;
+               strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
+               break;
        }
 
-       /* attach the mac to the phy */
-       ret = phy_connect_direct(dev, phy_dev,
-                            &fec_enet_adjust_link, 0,
-                            PHY_INTERFACE_MODE_MII);
-       if (ret) {
-               printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
-               return ret;
+       if (phy_id >= PHY_MAX_ADDR) {
+               printk(KERN_INFO "%s: no PHY, assuming direct connection "
+                       "to switch\n", dev->name);
+               strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE);
+               phy_id = 0;
+       }
+
+       snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
+       phy_dev = phy_connect(dev, phy_name, &fec_enet_adjust_link, 0,
+               PHY_INTERFACE_MODE_MII);
+       if (IS_ERR(phy_dev)) {
+               printk(KERN_ERR "%s: could not attach to PHY\n", dev->name);
+               return PTR_ERR(phy_dev);
        }
 
        /* mask with MAC supported features */
@@ -738,7 +751,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
        fep->mii_bus->read = fec_enet_mdio_read;
        fep->mii_bus->write = fec_enet_mdio_write;
        fep->mii_bus->reset = fec_enet_mdio_reset;
-       snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
+       snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id + 1);
        fep->mii_bus->priv = fep;
        fep->mii_bus->parent = &pdev->dev;
 
@@ -1311,6 +1324,9 @@ fec_probe(struct platform_device *pdev)
        if (ret)
                goto failed_mii_init;
 
+       /* Carrier starts down, phylib will bring it up */
+       netif_carrier_off(ndev);
+
        ret = register_netdev(ndev);
        if (ret)
                goto failed_register;
index 4b52c767ad056ced9aafcafea8299384b807c169..3e5d0b6b6516133039192fa5dfd1fdf88660d81b 100644 (file)
@@ -608,7 +608,7 @@ static int sixpack_open(struct tty_struct *tty)
 
        spin_lock_init(&sp->lock);
        atomic_set(&sp->refcnt, 1);
-       init_MUTEX_LOCKED(&sp->dead_sem);
+       sema_init(&sp->dead_sem, 0);
 
        /* !!! length of the buffers. MTU is IP MTU, not PACLEN!  */
 
index 66e88bd59caada26f9cdaa4b6dbb7c1a78026e1f..4c628393c8b157cbc09de52d902b5fa8c3d370a3 100644 (file)
@@ -747,7 +747,7 @@ static int mkiss_open(struct tty_struct *tty)
 
        spin_lock_init(&ax->buflock);
        atomic_set(&ax->refcnt, 1);
-       init_MUTEX_LOCKED(&ax->dead_sem);
+       sema_init(&ax->dead_sem, 0);
 
        ax->tty = tty;
        tty->disc_data = ax;
index 3506fd6ad7263be467b9128938b248bbaf3dd054..519e19e23955a3c3a86d8e04f615b67daa7a996e 100644 (file)
@@ -2928,7 +2928,7 @@ static int __devinit emac_probe(struct platform_device *ofdev,
        if (dev->emac_irq != NO_IRQ)
                irq_dispose_mapping(dev->emac_irq);
  err_free:
-       kfree(ndev);
+       free_netdev(ndev);
  err_gone:
        /* if we were on the bootlist, remove us as we won't show up and
         * wake up all waiters to notify them in case they were waiting
@@ -2971,7 +2971,7 @@ static int __devexit emac_remove(struct platform_device *ofdev)
        if (dev->emac_irq != NO_IRQ)
                irq_dispose_mapping(dev->emac_irq);
 
-       kfree(dev->ndev);
+       free_netdev(dev->ndev);
 
        return 0;
 }
index 1b051dab7b298a761a5d9e4f2ff38af16f062238..51d74447f8f8cb7d428c7bbdf22b14f71cdfc4aa 100644 (file)
@@ -909,7 +909,7 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
        dev->tx_skb = NULL;
 
        spin_lock_init(&dev->tx_lock);
-       init_MUTEX(&dev->fsm.sem);
+       sema_init(&dev->fsm.sem, 1);
 
        dev->drv = drv;
        dev->netdev = ndev;
index b4fb07a6f13ffd489c957816eebb4f831157ccd1..51919fcd50c26e2c0c6b8c23ba393887eca50254 100644 (file)
@@ -503,30 +503,33 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
                ks8851_wrreg16(ks, KS_RXQCR,
                               ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE);
 
-               if (rxlen > 0) {
-                       skb = netdev_alloc_skb(ks->netdev, rxlen + 2 + 8);
-                       if (!skb) {
-                               /* todo - dump frame and move on */
-                       }
+               if (rxlen > 4) {
+                       unsigned int rxalign;
+
+                       rxlen -= 4;
+                       rxalign = ALIGN(rxlen, 4);
+                       skb = netdev_alloc_skb_ip_align(ks->netdev, rxalign);
+                       if (skb) {
 
-                       /* two bytes to ensure ip is aligned, and four bytes
-                        * for the status header and 4 bytes of garbage */
-                       skb_reserve(skb, 2 + 4 + 4);
+                               /* 4 bytes of status header + 4 bytes of
+                                * garbage: we put them before ethernet
+                                * header, so that they are copied,
+                                * but ignored.
+                                */
 
-                       rxpkt = skb_put(skb, rxlen - 4) - 8;
+                               rxpkt = skb_put(skb, rxlen) - 8;
 
-                       /* align the packet length to 4 bytes, and add 4 bytes
-                        * as we're getting the rx status header as well */
-                       ks8851_rdfifo(ks, rxpkt, ALIGN(rxlen, 4) + 8);
+                               ks8851_rdfifo(ks, rxpkt, rxalign + 8);
 
-                       if (netif_msg_pktdata(ks))
-                               ks8851_dbg_dumpkkt(ks, rxpkt);
+                               if (netif_msg_pktdata(ks))
+                                       ks8851_dbg_dumpkkt(ks, rxpkt);
 
-                       skb->protocol = eth_type_trans(skb, ks->netdev);
-                       netif_rx(skb);
+                               skb->protocol = eth_type_trans(skb, ks->netdev);
+                               netif_rx(skb);
 
-                       ks->netdev->stats.rx_packets++;
-                       ks->netdev->stats.rx_bytes += rxlen - 4;
+                               ks->netdev->stats.rx_packets++;
+                               ks->netdev->stats.rx_bytes += rxlen;
+                       }
                }
 
                ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
index bdf2149e529689b1135603904da6343fcbbbebc2..87f0a93b165c33478e1d59aa5e23942afaccc80f 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/of_device.h>
 #include <linux/of_mdio.h>
 #include <linux/of_platform.h>
+#include <linux/of_address.h>
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
 #include <linux/tcp.h>      /* needed for sizeof(tcphdr) */
index 5ae28c975b384bf64ff7a7dc763a3d1766669b5a..8cf9d4f56bb2223893dd912e1d475a48fc30f683 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/phy.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/of_address.h>
 #include <linux/slab.h>
 #include <linux/of_mdio.h>
 
index cabae7bb1fc6777d3366c5a8728feadcd53d0aa3..b075a35b85d4ef09cfab1b2df3b42be0313c2516 100644 (file)
@@ -1540,7 +1540,6 @@ netxen_process_rcv(struct netxen_adapter *adapter,
        if (pkt_offset)
                skb_pull(skb, pkt_offset);
 
-       skb->truesize = skb->len + sizeof(struct sk_buff);
        skb->protocol = eth_type_trans(skb, netdev);
 
        napi_gro_receive(&sds_ring->napi, skb);
@@ -1602,8 +1601,6 @@ netxen_process_lro(struct netxen_adapter *adapter,
 
        skb_put(skb, lro_length + data_offset);
 
-       skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
-
        skb_pull(skb, l2_hdr_offset);
        skb->protocol = eth_type_trans(skb, netdev);
 
index bc695d53cdccbcca3eb1c2ccb30018acb3c8bde6..fe6983af6918fe37cdb5589907346557ddeeb8df 100644 (file)
@@ -7269,32 +7269,28 @@ static int niu_get_ethtool_tcam_all(struct niu *np,
        struct niu_parent *parent = np->parent;
        struct niu_tcam_entry *tp;
        int i, idx, cnt;
-       u16 n_entries;
        unsigned long flags;
-
+       int ret = 0;
 
        /* put the tcam size here */
        nfc->data = tcam_get_size(np);
 
        niu_lock_parent(np, flags);
-       n_entries = nfc->rule_cnt;
        for (cnt = 0, i = 0; i < nfc->data; i++) {
                idx = tcam_get_index(np, i);
                tp = &parent->tcam[idx];
                if (!tp->valid)
                        continue;
+               if (cnt == nfc->rule_cnt) {
+                       ret = -EMSGSIZE;
+                       break;
+               }
                rule_locs[cnt] = i;
                cnt++;
        }
        niu_unlock_parent(np, flags);
 
-       if (n_entries != cnt) {
-               /* print warning, this should not happen */
-               netdev_info(np->dev, "niu%d: In %s(): n_entries[%d] != cnt[%d]!!!\n",
-                           np->parent->index, __func__, n_entries, cnt);
-       }
-
-       return 0;
+       return ret;
 }
 
 static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
index 49279b0ee526a54a534c6f8c04e7c48aa4aba0d0..f9b509a6b09a702d214ed2821d3bf6a79a671b49 100644 (file)
@@ -508,7 +508,8 @@ static int pcnet_confcheck(struct pcmcia_device *p_dev,
                           unsigned int vcc,
                           void *priv_data)
 {
-       int *has_shmem = priv_data;
+       int *priv = priv_data;
+       int try = (*priv & 0x1);
        int i;
        cistpl_io_t *io = &cfg->io;
 
@@ -525,77 +526,103 @@ static int pcnet_confcheck(struct pcmcia_device *p_dev,
                i = p_dev->resource[1]->end = 0;
        }
 
-       *has_shmem = ((cfg->mem.nwin == 1) &&
-                     (cfg->mem.win[0].len >= 0x4000));
+       *priv &= ((cfg->mem.nwin == 1) &&
+                 (cfg->mem.win[0].len >= 0x4000)) ? 0x10 : ~0x10;
+
        p_dev->resource[0]->start = io->win[i].base;
        p_dev->resource[0]->end = io->win[i].len;
-       p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
+       if (!try)
+               p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
+       else
+               p_dev->io_lines = 16;
        if (p_dev->resource[0]->end + p_dev->resource[1]->end >= 32)
                return try_io_port(p_dev);
 
-       return 0;
+       return -EINVAL;
+}
+
+static hw_info_t *pcnet_try_config(struct pcmcia_device *link,
+                                  int *has_shmem, int try)
+{
+       struct net_device *dev = link->priv;
+       hw_info_t *local_hw_info;
+       pcnet_dev_t *info = PRIV(dev);
+       int priv = try;
+       int ret;
+
+       ret = pcmcia_loop_config(link, pcnet_confcheck, &priv);
+       if (ret) {
+               dev_warn(&link->dev, "no useable port range found\n");
+               return NULL;
+       }
+       *has_shmem = (priv & 0x10);
+
+       if (!link->irq)
+               return NULL;
+
+       if (resource_size(link->resource[1]) == 8) {
+               link->conf.Attributes |= CONF_ENABLE_SPKR;
+               link->conf.Status = CCSR_AUDIO_ENA;
+       }
+       if ((link->manf_id == MANFID_IBM) &&
+           (link->card_id == PRODID_IBM_HOME_AND_AWAY))
+               link->conf.ConfigIndex |= 0x10;
+
+       ret = pcmcia_request_configuration(link, &link->conf);
+       if (ret)
+               return NULL;
+
+       dev->irq = link->irq;
+       dev->base_addr = link->resource[0]->start;
+
+       if (info->flags & HAS_MISC_REG) {
+               if ((if_port == 1) || (if_port == 2))
+                       dev->if_port = if_port;
+               else
+                       dev_notice(&link->dev, "invalid if_port requested\n");
+       } else
+               dev->if_port = 0;
+
+       if ((link->conf.ConfigBase == 0x03c0) &&
+           (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) {
+               dev_info(&link->dev,
+                       "this is an AX88190 card - use axnet_cs instead.\n");
+               return NULL;
+       }
+
+       local_hw_info = get_hwinfo(link);
+       if (!local_hw_info)
+               local_hw_info = get_prom(link);
+       if (!local_hw_info)
+               local_hw_info = get_dl10019(link);
+       if (!local_hw_info)
+               local_hw_info = get_ax88190(link);
+       if (!local_hw_info)
+               local_hw_info = get_hwired(link);
+
+       return local_hw_info;
 }
 
 static int pcnet_config(struct pcmcia_device *link)
 {
     struct net_device *dev = link->priv;
     pcnet_dev_t *info = PRIV(dev);
-    int ret, start_pg, stop_pg, cm_offset;
+    int start_pg, stop_pg, cm_offset;
     int has_shmem = 0;
     hw_info_t *local_hw_info;
 
     dev_dbg(&link->dev, "pcnet_config\n");
 
-    ret = pcmcia_loop_config(link, pcnet_confcheck, &has_shmem);
-    if (ret)
-       goto failed;
-
-    if (!link->irq)
-           goto failed;
-
-    if (resource_size(link->resource[1]) == 8) {
-       link->conf.Attributes |= CONF_ENABLE_SPKR;
-       link->conf.Status = CCSR_AUDIO_ENA;
-    }
-    if ((link->manf_id == MANFID_IBM) &&
-       (link->card_id == PRODID_IBM_HOME_AND_AWAY))
-       link->conf.ConfigIndex |= 0x10;
-
-    ret = pcmcia_request_configuration(link, &link->conf);
-    if (ret)
-           goto failed;
-    dev->irq = link->irq;
-    dev->base_addr = link->resource[0]->start;
-    if (info->flags & HAS_MISC_REG) {
-       if ((if_port == 1) || (if_port == 2))
-           dev->if_port = if_port;
-       else
-           printk(KERN_NOTICE "pcnet_cs: invalid if_port requested\n");
-    } else {
-       dev->if_port = 0;
-    }
-
-    if ((link->conf.ConfigBase == 0x03c0) &&
-       (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) {
-       printk(KERN_INFO "pcnet_cs: this is an AX88190 card!\n");
-       printk(KERN_INFO "pcnet_cs: use axnet_cs instead.\n");
-       goto failed;
-    }
-
-    local_hw_info = get_hwinfo(link);
-    if (local_hw_info == NULL)
-       local_hw_info = get_prom(link);
-    if (local_hw_info == NULL)
-       local_hw_info = get_dl10019(link);
-    if (local_hw_info == NULL)
-       local_hw_info = get_ax88190(link);
-    if (local_hw_info == NULL)
-       local_hw_info = get_hwired(link);
-
-    if (local_hw_info == NULL) {
-       printk(KERN_NOTICE "pcnet_cs: unable to read hardware net"
-              " address for io base %#3lx\n", dev->base_addr);
-       goto failed;
+    local_hw_info = pcnet_try_config(link, &has_shmem, 0);
+    if (!local_hw_info) {
+           /* check whether forcing io_lines to 16 helps... */
+           pcmcia_disable_device(link);
+           local_hw_info = pcnet_try_config(link, &has_shmem, 1);
+           if (local_hw_info == NULL) {
+                   dev_notice(&link->dev, "unable to read hardware net"
+                           " address for io base %#3lx\n", dev->base_addr);
+                   goto failed;
+           }
     }
 
     info->flags = local_hw_info->flags;
index 6a6b8199a0d6862467260384e32979584f24d95a..6c58da2b882c845e453ddd7b6bb0b544f4705b83 100644 (file)
@@ -308,7 +308,7 @@ static int mdio_bus_suspend(struct device *dev)
         * may call phy routines that try to grab the same lock, and that may
         * lead to a deadlock.
         */
-       if (phydev->attached_dev)
+       if (phydev->attached_dev && phydev->adjust_link)
                phy_stop_machine(phydev);
 
        if (!mdio_bus_phy_may_suspend(phydev))
@@ -331,7 +331,7 @@ static int mdio_bus_resume(struct device *dev)
                return ret;
 
 no_resume:
-       if (phydev->attached_dev)
+       if (phydev->attached_dev && phydev->adjust_link)
                phy_start_machine(phydev, NULL);
 
        return 0;
index af50a530daee25bf3debf8493690861c2ea2fcc2..78d70a6481bfa7f986e165e427b8c76bd75ae835 100644 (file)
@@ -184,7 +184,7 @@ ppp_asynctty_open(struct tty_struct *tty)
        tasklet_init(&ap->tsk, ppp_async_process, (unsigned long) ap);
 
        atomic_set(&ap->refcnt, 1);
-       init_MUTEX_LOCKED(&ap->dead_sem);
+       sema_init(&ap->dead_sem, 0);
 
        ap->chan.private = ap;
        ap->chan.ops = &async_ops;
index 6695a51e09e9b86340aa0cee342f2fb8d99e1159..736b91703b3e11578eda930e7b119a06f96c0fc6 100644 (file)
@@ -1314,8 +1314,13 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
        hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
        i = 0;
        list_for_each_entry(pch, &ppp->channels, clist) {
-               navail += pch->avail = (pch->chan != NULL);
-               pch->speed = pch->chan->speed;
+               if (pch->chan) {
+                       pch->avail = 1;
+                       navail++;
+                       pch->speed = pch->chan->speed;
+               } else {
+                       pch->avail = 0;
+               }
                if (pch->avail) {
                        if (skb_queue_empty(&pch->file.xq) ||
                                !pch->had_frag) {
index 75ba744b173c89d0f598d1d61183fd5d9dca1675..2c7cf0b64811ed72d4d6368c02d46fa966e7de2f 100644 (file)
@@ -1316,7 +1316,7 @@ qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
                return -ENOMEM;
        }
 
-       skb_reserve(skb, 2);
+       skb_reserve(skb, NET_IP_ALIGN);
 
        dma = pci_map_single(pdev, skb->data,
                        rds_ring->dma_size, PCI_DMA_FROMDEVICE);
@@ -1404,7 +1404,6 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
        if (pkt_offset)
                skb_pull(skb, pkt_offset);
 
-       skb->truesize = skb->len + sizeof(struct sk_buff);
        skb->protocol = eth_type_trans(skb, netdev);
 
        napi_gro_receive(&sds_ring->napi, skb);
@@ -1466,8 +1465,6 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
 
        skb_put(skb, lro_length + data_offset);
 
-       skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
-
        skb_pull(skb, l2_hdr_offset);
        skb->protocol = eth_type_trans(skb, netdev);
 
@@ -1700,8 +1697,6 @@ qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter,
        if (pkt_offset)
                skb_pull(skb, pkt_offset);
 
-       skb->truesize = skb->len + sizeof(struct sk_buff);
-
        if (!qlcnic_check_loopback_buff(skb->data))
                adapter->diag_cnt++;
 
index 078bbf4e6f1933f3ee1e95e2c0dccfea92157088..992db2fa136e9c5e6f5130c053393aa662d4e5c9 100644 (file)
@@ -1212,7 +1212,8 @@ static void rtl8169_update_counters(struct net_device *dev)
        if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
                return;
 
-       counters = pci_alloc_consistent(tp->pci_dev, sizeof(*counters), &paddr);
+       counters = dma_alloc_coherent(&tp->pci_dev->dev, sizeof(*counters),
+                                     &paddr, GFP_KERNEL);
        if (!counters)
                return;
 
@@ -1233,7 +1234,8 @@ static void rtl8169_update_counters(struct net_device *dev)
        RTL_W32(CounterAddrLow, 0);
        RTL_W32(CounterAddrHigh, 0);
 
-       pci_free_consistent(tp->pci_dev, sizeof(*counters), counters, paddr);
+       dma_free_coherent(&tp->pci_dev->dev, sizeof(*counters), counters,
+                         paddr);
 }
 
 static void rtl8169_get_ethtool_stats(struct net_device *dev,
@@ -2934,7 +2936,7 @@ static const struct rtl_cfg_info {
                .hw_start       = rtl_hw_start_8168,
                .region         = 2,
                .align          = 8,
-               .intr_event     = SYSErr | LinkChg | RxOverflow |
+               .intr_event     = SYSErr | RxFIFOOver | LinkChg | RxOverflow |
                                  TxErr | TxOK | RxOK | RxErr,
                .napi_event     = TxErr | TxOK | RxOK | RxOverflow,
                .features       = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
@@ -3292,15 +3294,15 @@ static int rtl8169_open(struct net_device *dev)
 
        /*
         * Rx and Tx desscriptors needs 256 bytes alignment.
-        * pci_alloc_consistent provides more.
+        * dma_alloc_coherent provides more.
         */
-       tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES,
-                                              &tp->TxPhyAddr);
+       tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
+                                            &tp->TxPhyAddr, GFP_KERNEL);
        if (!tp->TxDescArray)
                goto err_pm_runtime_put;
 
-       tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES,
-                                              &tp->RxPhyAddr);
+       tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
+                                            &tp->RxPhyAddr, GFP_KERNEL);
        if (!tp->RxDescArray)
                goto err_free_tx_0;
 
@@ -3334,12 +3336,12 @@ out:
 err_release_ring_2:
        rtl8169_rx_clear(tp);
 err_free_rx_1:
-       pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
-                           tp->RxPhyAddr);
+       dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
+                         tp->RxPhyAddr);
        tp->RxDescArray = NULL;
 err_free_tx_0:
-       pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
-                           tp->TxPhyAddr);
+       dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
+                         tp->TxPhyAddr);
        tp->TxDescArray = NULL;
 err_pm_runtime_put:
        pm_runtime_put_noidle(&pdev->dev);
@@ -3975,7 +3977,7 @@ static void rtl8169_free_rx_skb(struct rtl8169_private *tp,
 {
        struct pci_dev *pdev = tp->pci_dev;
 
-       pci_unmap_single(pdev, le64_to_cpu(desc->addr), tp->rx_buf_sz,
+       dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), tp->rx_buf_sz,
                         PCI_DMA_FROMDEVICE);
        dev_kfree_skb(*sk_buff);
        *sk_buff = NULL;
@@ -4000,7 +4002,7 @@ static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
 static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev,
                                            struct net_device *dev,
                                            struct RxDesc *desc, int rx_buf_sz,
-                                           unsigned int align)
+                                           unsigned int align, gfp_t gfp)
 {
        struct sk_buff *skb;
        dma_addr_t mapping;
@@ -4008,13 +4010,13 @@ static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev,
 
        pad = align ? align : NET_IP_ALIGN;
 
-       skb = netdev_alloc_skb(dev, rx_buf_sz + pad);
+       skb = __netdev_alloc_skb(dev, rx_buf_sz + pad, gfp);
        if (!skb)
                goto err_out;
 
        skb_reserve(skb, align ? ((pad - 1) & (unsigned long)skb->data) : pad);
 
-       mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
+       mapping = dma_map_single(&pdev->dev, skb->data, rx_buf_sz,
                                 PCI_DMA_FROMDEVICE);
 
        rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
@@ -4039,7 +4041,7 @@ static void rtl8169_rx_clear(struct rtl8169_private *tp)
 }
 
 static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
-                          u32 start, u32 end)
+                          u32 start, u32 end, gfp_t gfp)
 {
        u32 cur;
 
@@ -4054,7 +4056,7 @@ static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
 
                skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev,
                                           tp->RxDescArray + i,
-                                          tp->rx_buf_sz, tp->align);
+                                          tp->rx_buf_sz, tp->align, gfp);
                if (!skb)
                        break;
 
@@ -4082,7 +4084,7 @@ static int rtl8169_init_ring(struct net_device *dev)
        memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
        memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
 
-       if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
+       if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC, GFP_KERNEL) != NUM_RX_DESC)
                goto err_out;
 
        rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
@@ -4099,7 +4101,8 @@ static void rtl8169_unmap_tx_skb(struct pci_dev *pdev, struct ring_info *tx_skb,
 {
        unsigned int len = tx_skb->len;
 
-       pci_unmap_single(pdev, le64_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
+       dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), len,
+                        PCI_DMA_TODEVICE);
        desc->opts1 = 0x00;
        desc->opts2 = 0x00;
        desc->addr = 0x00;
@@ -4243,7 +4246,8 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
                txd = tp->TxDescArray + entry;
                len = frag->size;
                addr = ((void *) page_address(frag->page)) + frag->page_offset;
-               mapping = pci_map_single(tp->pci_dev, addr, len, PCI_DMA_TODEVICE);
+               mapping = dma_map_single(&tp->pci_dev->dev, addr, len,
+                                        PCI_DMA_TODEVICE);
 
                /* anti gcc 2.95.3 bugware (sic) */
                status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
@@ -4313,7 +4317,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
                tp->tx_skb[entry].skb = skb;
        }
 
-       mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
+       mapping = dma_map_single(&tp->pci_dev->dev, skb->data, len,
+                                PCI_DMA_TODEVICE);
 
        tp->tx_skb[entry].len = len;
        txd->addr = cpu_to_le64(mapping);
@@ -4477,8 +4482,8 @@ static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff,
        if (!skb)
                goto out;
 
-       pci_dma_sync_single_for_cpu(tp->pci_dev, addr, pkt_size,
-                                   PCI_DMA_FROMDEVICE);
+       dma_sync_single_for_cpu(&tp->pci_dev->dev, addr, pkt_size,
+                               PCI_DMA_FROMDEVICE);
        skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size);
        *sk_buff = skb;
        done = true;
@@ -4549,11 +4554,11 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
                        rtl8169_rx_csum(skb, desc);
 
                        if (rtl8169_try_rx_copy(&skb, tp, pkt_size, addr)) {
-                               pci_dma_sync_single_for_device(pdev, addr,
+                               dma_sync_single_for_device(&pdev->dev, addr,
                                        pkt_size, PCI_DMA_FROMDEVICE);
                                rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
                        } else {
-                               pci_unmap_single(pdev, addr, tp->rx_buf_sz,
+                               dma_unmap_single(&pdev->dev, addr, tp->rx_buf_sz,
                                                 PCI_DMA_FROMDEVICE);
                                tp->Rx_skbuff[entry] = NULL;
                        }
@@ -4583,7 +4588,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
        count = cur_rx - tp->cur_rx;
        tp->cur_rx = cur_rx;
 
-       delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
+       delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx, GFP_ATOMIC);
        if (!delta && count)
                netif_info(tp, intr, dev, "no Rx buffer allocated\n");
        tp->dirty_rx += delta;
@@ -4625,8 +4630,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
                }
 
                /* Work around for rx fifo overflow */
-               if (unlikely(status & RxFIFOOver) &&
-               (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
+               if (unlikely(status & RxFIFOOver)) {
                        netif_stop_queue(dev);
                        rtl8169_tx_timeout(dev);
                        break;
@@ -4770,10 +4774,10 @@ static int rtl8169_close(struct net_device *dev)
 
        free_irq(dev->irq, dev);
 
-       pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
-                           tp->RxPhyAddr);
-       pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
-                           tp->TxPhyAddr);
+       dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
+                         tp->RxPhyAddr);
+       dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
+                         tp->TxPhyAddr);
        tp->TxDescArray = NULL;
        tp->RxDescArray = NULL;
 
index 07eb884ff982405c3d15204a04d6188394e329f3..44150f2f7bfd6b206b17e0a64064d5996d0033ea 100644 (file)
@@ -384,7 +384,7 @@ static void rionet_remove(struct rio_dev *rdev)
        free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ?
                                        __ilog2(sizeof(void *)) + 4 : 0);
        unregister_netdev(ndev);
-       kfree(ndev);
+       free_netdev(ndev);
 
        list_for_each_entry_safe(peer, tmp, &rionet_peers, node) {
                list_del(&peer->node);
index cc4bd8c65f8b6a58846065430f8b89c1be6a4bc3..9265315baa0b29bdc2c7a9e20db20462dcf52ae5 100644 (file)
@@ -804,7 +804,7 @@ static int __devinit sgiseeq_probe(struct platform_device *pdev)
 err_out_free_page:
        free_page((unsigned long) sp->srings);
 err_out_free_dev:
-       kfree(dev);
+       free_netdev(dev);
 
 err_out:
        return err;
index 40e5c46e7571ad46f1c7abf655d0235f89762bfd..465ae7e84507385b079c7922cb7360cec428a30f 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/seq_file.h>
 #include <linux/mii.h>
 #include <linux/slab.h>
+#include <linux/dmi.h>
 #include <asm/irq.h>
 
 #include "skge.h"
@@ -3868,6 +3869,8 @@ static void __devinit skge_show_addr(struct net_device *dev)
        netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr);
 }
 
+static int only_32bit_dma;
+
 static int __devinit skge_probe(struct pci_dev *pdev,
                                const struct pci_device_id *ent)
 {
@@ -3889,7 +3892,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
 
        pci_set_master(pdev);
 
-       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+       if (!only_32bit_dma && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
                using_dac = 1;
                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
        } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
@@ -4147,8 +4150,21 @@ static struct pci_driver skge_driver = {
        .shutdown =     skge_shutdown,
 };
 
+static struct dmi_system_id skge_32bit_dma_boards[] = {
+       {
+               .ident = "Gigabyte nForce boards",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co"),
+                       DMI_MATCH(DMI_BOARD_NAME, "nForce"),
+               },
+       },
+       {}
+};
+
 static int __init skge_init_module(void)
 {
+       if (dmi_check_system(skge_32bit_dma_boards))
+               only_32bit_dma = 1;
        skge_debug_init();
        return pci_register_driver(&skge_driver);
 }
index 0909ae934ad0fcee52c5eb67101240e77fb70dde..8150ba1541161ab2e7781ade13be970540f835ec 100644 (file)
@@ -58,6 +58,7 @@
 
 MODULE_LICENSE("GPL");
 MODULE_VERSION(SMSC_DRV_VERSION);
+MODULE_ALIAS("platform:smsc911x");
 
 #if USE_DEBUG > 0
 static int debug = 16;
index bbb7951b9c4c34bb3992deed9c7a8e84ff52f68e..ea0461eb2dbe4314c223ab2ad47f17ea9b90e740 100644 (file)
@@ -1865,15 +1865,15 @@ static int stmmac_resume(struct platform_device *pdev)
        if (!netif_running(dev))
                return 0;
 
-       spin_lock(&priv->lock);
-
        if (priv->shutdown) {
                /* Re-open the interface and re-init the MAC/DMA
-                  and the rings. */
+                  and the rings (i.e. on hibernation stage) */
                stmmac_open(dev);
-               goto out_resume;
+               return 0;
        }
 
+       spin_lock(&priv->lock);
+
        /* Power Down bit, into the PM register, is cleared
         * automatically as soon as a magic packet or a Wake-up frame
         * is received. Anyway, it's better to manually clear
@@ -1901,7 +1901,6 @@ static int stmmac_resume(struct platform_device *pdev)
 
        netif_start_queue(dev);
 
-out_resume:
        spin_unlock(&priv->lock);
        return 0;
 }
index bc3af78a869ff52881077b89cf6e5e544b6e8a91..1ec4b9e0239a8ff8c0d3cb18882ff517ef528881 100644 (file)
@@ -4666,7 +4666,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
                                       desc_idx, *post_ptr);
                drop_it_no_recycle:
                        /* Other statistics kept track of by card. */
-                       tp->net_stats.rx_dropped++;
+                       tp->rx_dropped++;
                        goto next_pkt;
                }
 
@@ -4726,7 +4726,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
                if (len > (tp->dev->mtu + ETH_HLEN) &&
                    skb->protocol != htons(ETH_P_8021Q)) {
                        dev_kfree_skb(skb);
-                       goto next_pkt;
+                       goto drop_it_no_recycle;
                }
 
                if (desc->type_flags & RXD_FLAG_VLAN &&
@@ -9240,6 +9240,8 @@ static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
        stats->rx_missed_errors = old_stats->rx_missed_errors +
                get_stat64(&hw_stats->rx_discards);
 
+       stats->rx_dropped = tp->rx_dropped;
+
        return stats;
 }
 
index 4937bd19096413bae1115b82cc63ce5123207536..be7ff138a7f98d58a4f4d4c8ff7fe70bb83911d9 100644 (file)
@@ -2759,7 +2759,7 @@ struct tg3 {
 
 
        /* begin "everything else" cacheline(s) section */
-       struct rtnl_link_stats64        net_stats;
+       unsigned long                   rx_dropped;
        struct rtnl_link_stats64        net_stats_prev;
        struct tg3_ethtool_stats        estats;
        struct tg3_ethtool_stats        estats_prev;
index 5efa57757a2c8507f5bcf0d0d7f3ac5d1b5a26c3..6888e3d41462081952c7320b501736f03230ba78 100644 (file)
@@ -243,6 +243,7 @@ enum {
        NWayState               = (1 << 14) | (1 << 13) | (1 << 12),
        NWayRestart             = (1 << 12),
        NonselPortActive        = (1 << 9),
+       SelPortActive           = (1 << 8),
        LinkFailStatus          = (1 << 2),
        NetCxnErr               = (1 << 1),
 };
@@ -363,7 +364,9 @@ static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
 
 /* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
 static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
-static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, };
+static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
+/* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */
+static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
 static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
 
 
@@ -1064,6 +1067,9 @@ static void de21041_media_timer (unsigned long data)
        unsigned int carrier;
        unsigned long flags;
 
+       /* clear port active bits */
+       dw32(SIAStatus, NonselPortActive | SelPortActive);
+
        carrier = (status & NetCxnErr) ? 0 : 1;
 
        if (carrier) {
@@ -1158,14 +1164,29 @@ no_link_yet:
 static void de_media_interrupt (struct de_private *de, u32 status)
 {
        if (status & LinkPass) {
+               /* Ignore if current media is AUI or BNC and we can't use TP */
+               if ((de->media_type == DE_MEDIA_AUI ||
+                    de->media_type == DE_MEDIA_BNC) &&
+                   (de->media_lock ||
+                    !de_ok_to_advertise(de, DE_MEDIA_TP_AUTO)))
+                       return;
+               /* If current media is not TP, change it to TP */
+               if ((de->media_type == DE_MEDIA_AUI ||
+                    de->media_type == DE_MEDIA_BNC)) {
+                       de->media_type = DE_MEDIA_TP_AUTO;
+                       de_stop_rxtx(de);
+                       de_set_media(de);
+                       de_start_rxtx(de);
+               }
                de_link_up(de);
                mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
                return;
        }
 
        BUG_ON(!(status & LinkFail));
-
-       if (netif_carrier_ok(de->dev)) {
+       /* Mark the link as down only if current media is TP */
+       if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI &&
+           de->media_type != DE_MEDIA_BNC) {
                de_link_down(de);
                mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
        }
@@ -1229,6 +1250,7 @@ static void de_adapter_sleep (struct de_private *de)
        if (de->de21040)
                return;
 
+       dw32(CSR13, 0); /* Reset phy */
        pci_read_config_dword(de->pdev, PCIPM, &pmctl);
        pmctl |= PM_Sleep;
        pci_write_config_dword(de->pdev, PCIPM, pmctl);
@@ -1574,12 +1596,15 @@ static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
                return 0; /* nothing to change */
 
        de_link_down(de);
+       mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
        de_stop_rxtx(de);
 
        de->media_type = new_media;
        de->media_lock = media_lock;
        de->media_advertise = ecmd->advertising;
        de_set_media(de);
+       if (netif_running(de->dev))
+               de_start_rxtx(de);
 
        return 0;
 }
@@ -1911,8 +1936,14 @@ fill_defaults:
        for (i = 0; i < DE_MAX_MEDIA; i++) {
                if (de->media[i].csr13 == 0xffff)
                        de->media[i].csr13 = t21041_csr13[i];
-               if (de->media[i].csr14 == 0xffff)
-                       de->media[i].csr14 = t21041_csr14[i];
+               if (de->media[i].csr14 == 0xffff) {
+                       /* autonegotiation is broken at least on some chip
+                          revisions - rev. 0x21 works, 0x11 does not */
+                       if (de->pdev->revision < 0x20)
+                               de->media[i].csr14 = t21041_csr14_brk[i];
+                       else
+                               de->media[i].csr14 = t21041_csr14[i];
+               }
                if (de->media[i].csr15 == 0xffff)
                        de->media[i].csr15 = t21041_csr15[i];
        }
@@ -2158,6 +2189,8 @@ static int de_resume (struct pci_dev *pdev)
                dev_err(&dev->dev, "pci_enable_device failed in resume\n");
                goto out;
        }
+       pci_set_master(pdev);
+       de_init_rings(de);
        de_init_hw(de);
 out_attach:
        netif_device_attach(dev);
index 6efca66b87663a84ec7f36529c190aa5a107d401..1cd752f9a6e1e8586f3eda9fcb854ac0226de67a 100644 (file)
@@ -1652,6 +1652,8 @@ static int hso_get_count(struct hso_serial *serial,
        struct uart_icount cnow;
        struct hso_tiocmget  *tiocmget = serial->tiocmget;
 
+       memset(&icount, 0, sizeof(struct serial_icounter_struct));
+
        if (!tiocmget)
                 return -ENOENT;
        spin_lock_irq(&serial->serial_lock);
index 8ed30fa35d0a5d789121eb3042c4b87a5e53a7f2..b2bcf99e6f087ab1dfca2aef092233eea9aa5fe6 100644 (file)
@@ -429,10 +429,6 @@ static const struct net_device_ops ipheth_netdev_ops = {
        .ndo_get_stats = &ipheth_stats,
 };
 
-static struct device_type ipheth_type = {
-       .name   = "wwan",
-};
-
 static int ipheth_probe(struct usb_interface *intf,
                        const struct usb_device_id *id)
 {
@@ -450,7 +446,7 @@ static int ipheth_probe(struct usb_interface *intf,
 
        netdev->netdev_ops = &ipheth_netdev_ops;
        netdev->watchdog_timeo = IPHETH_TX_TIMEOUT;
-       strcpy(netdev->name, "wwan%d");
+       strcpy(netdev->name, "eth%d");
 
        dev = netdev_priv(netdev);
        dev->udev = udev;
@@ -500,7 +496,6 @@ static int ipheth_probe(struct usb_interface *intf,
 
        SET_NETDEV_DEV(netdev, &intf->dev);
        SET_ETHTOOL_OPS(netdev, &ops);
-       SET_NETDEV_DEVTYPE(netdev, &ipheth_type);
 
        retval = register_netdev(netdev);
        if (retval) {
index fd69095ef6e33d61698556abac79a4e84429c8fe..f53412368ce1e1745a7796a8cc8181b16027e797 100644 (file)
@@ -2824,7 +2824,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
        netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
 
        dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
-               NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM | NETIF_F_SG;
+               NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM;
 
        ret = register_netdev(dev);
        if (ret < 0)
index 04c6cd4333f1ec83d69be506b040b75183cdfb81..10bafd59f9c336e36fbec4c6703530ce54a92a36 100644 (file)
@@ -575,7 +575,7 @@ static int cosa_probe(int base, int irq, int dma)
 
                /* Initialize the chardev data structures */
                mutex_init(&chan->rlock);
-               init_MUTEX(&chan->wsem);
+               sema_init(&chan->wsem, 1);
 
                /* Register the network interface */
                if (!(chan->netdev = alloc_hdlcdev(chan))) {
index 8cc9e319f4356da8904cbf4e7a495d59eba94645..1737d1488b35704f3196975a76bcc919e943808b 100644 (file)
@@ -1244,16 +1244,16 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
        int i, result;
        struct device *dev = i2400m_dev(i2400m);
        const struct i2400m_msg_hdr *msg_hdr;
-       size_t pl_itr, pl_size, skb_len;
+       size_t pl_itr, pl_size;
        unsigned long flags;
-       unsigned num_pls, single_last;
+       unsigned num_pls, single_last, skb_len;
 
        skb_len = skb->len;
-       d_fnstart(4, dev, "(i2400m %p skb %p [size %zu])\n",
+       d_fnstart(4, dev, "(i2400m %p skb %p [size %u])\n",
                  i2400m, skb, skb_len);
        result = -EIO;
        msg_hdr = (void *) skb->data;
-       result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb->len);
+       result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb_len);
        if (result < 0)
                goto error_msg_hdr_check;
        result = -EIO;
@@ -1261,10 +1261,10 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
        pl_itr = sizeof(*msg_hdr) +     /* Check payload descriptor(s) */
                num_pls * sizeof(msg_hdr->pld[0]);
        pl_itr = ALIGN(pl_itr, I2400M_PL_ALIGN);
-       if (pl_itr > skb->len) {        /* got all the payload descriptors? */
+       if (pl_itr > skb_len) { /* got all the payload descriptors? */
                dev_err(dev, "RX: HW BUG? message too short (%u bytes) for "
                        "%u payload descriptors (%zu each, total %zu)\n",
-                       skb->len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr);
+                       skb_len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr);
                goto error_pl_descr_short;
        }
        /* Walk each payload payload--check we really got it */
@@ -1272,7 +1272,7 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
                /* work around old gcc warnings */
                pl_size = i2400m_pld_size(&msg_hdr->pld[i]);
                result = i2400m_rx_pl_descr_check(i2400m, &msg_hdr->pld[i],
-                                                 pl_itr, skb->len);
+                                                 pl_itr, skb_len);
                if (result < 0)
                        goto error_pl_descr_check;
                single_last = num_pls == 1 || i == num_pls - 1;
@@ -1290,16 +1290,16 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
        if (i < i2400m->rx_pl_min)
                i2400m->rx_pl_min = i;
        i2400m->rx_num++;
-       i2400m->rx_size_acc += skb->len;
-       if (skb->len < i2400m->rx_size_min)
-               i2400m->rx_size_min = skb->len;
-       if (skb->len > i2400m->rx_size_max)
-               i2400m->rx_size_max = skb->len;
+       i2400m->rx_size_acc += skb_len;
+       if (skb_len < i2400m->rx_size_min)
+               i2400m->rx_size_min = skb_len;
+       if (skb_len > i2400m->rx_size_max)
+               i2400m->rx_size_max = skb_len;
        spin_unlock_irqrestore(&i2400m->rx_lock, flags);
 error_pl_descr_check:
 error_pl_descr_short:
 error_msg_hdr_check:
-       d_fnend(4, dev, "(i2400m %p skb %p [size %zu]) = %d\n",
+       d_fnend(4, dev, "(i2400m %p skb %p [size %u]) = %d\n",
                i2400m, skb, skb_len, result);
        return result;
 }
index cc648b6ae31cef3b3a650ab7414b76fa061173e4..a3d95cca8f0c5be9a32c76d57082da1206cd44d1 100644 (file)
@@ -543,7 +543,7 @@ static u8 ath9k_hw_chan_2_clockrate_mhz(struct ath_hw *ah)
        if (conf_is_ht40(conf))
                return clockrate * 2;
 
-       return clockrate * 2;
+       return clockrate;
 }
 
 static int32_t ath9k_hw_ani_get_listen_time(struct ath_hw *ah)
index 9dd9e64c2b0b1a69f11a6d40a5e0e2cbc84d8b22..8fd00a6e512019075e966a038d2c5d09539ccf85 100644 (file)
@@ -1411,7 +1411,7 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
        clear_bit(STATUS_SCAN_HW, &priv->status);
        clear_bit(STATUS_SCANNING, &priv->status);
        /* inform mac80211 scan aborted */
-       queue_work(priv->workqueue, &priv->scan_completed);
+       queue_work(priv->workqueue, &priv->abort_scan);
 }
 
 int iwlagn_manage_ibss_station(struct iwl_priv *priv,
index 07dbc27964480eebedb67760029dcadab9584f84..e23c4060a0f093e966ca355af1d466880f65b0ee 100644 (file)
@@ -2613,6 +2613,11 @@ int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
        if (test_bit(STATUS_EXIT_PENDING, &priv->status))
                return -EINVAL;
 
+       if (test_bit(STATUS_SCANNING, &priv->status)) {
+               IWL_DEBUG_INFO(priv, "scan in progress.\n");
+               return -EINVAL;
+       }
+
        if (mode >= IWL_MAX_FORCE_RESET) {
                IWL_DEBUG_INFO(priv, "invalid reset request.\n");
                return -EINVAL;
index 59a308b02f95fdc077a84a7d12d71db7451b1816..d31661c1ce778259996b5428f9cff95b87f1a3db 100644 (file)
@@ -3018,7 +3018,7 @@ void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
        clear_bit(STATUS_SCANNING, &priv->status);
 
        /* inform mac80211 scan aborted */
-       queue_work(priv->workqueue, &priv->scan_completed);
+       queue_work(priv->workqueue, &priv->abort_scan);
 }
 
 static void iwl3945_bg_restart(struct work_struct *data)
index a9352b2c7ac430d4e4aafac3d65a1b46005ea505..b7e755f4178ad885332ccaaeeb5eda492e6dfcfd 100644 (file)
@@ -141,16 +141,6 @@ static struct notifier_block module_load_nb = {
        .notifier_call = module_load_notify,
 };
 
-
-static void end_sync(void)
-{
-       end_cpu_work();
-       /* make sure we don't leak task structs */
-       process_task_mortuary();
-       process_task_mortuary();
-}
-
-
 int sync_start(void)
 {
        int err;
@@ -158,7 +148,7 @@ int sync_start(void)
        if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
                return -ENOMEM;
 
-       start_cpu_work();
+       mutex_lock(&buffer_mutex);
 
        err = task_handoff_register(&task_free_nb);
        if (err)
@@ -173,7 +163,10 @@ int sync_start(void)
        if (err)
                goto out4;
 
+       start_cpu_work();
+
 out:
+       mutex_unlock(&buffer_mutex);
        return err;
 out4:
        profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
@@ -182,7 +175,6 @@ out3:
 out2:
        task_handoff_unregister(&task_free_nb);
 out1:
-       end_sync();
        free_cpumask_var(marked_cpus);
        goto out;
 }
@@ -190,11 +182,20 @@ out1:
 
 void sync_stop(void)
 {
+       /* flush buffers */
+       mutex_lock(&buffer_mutex);
+       end_cpu_work();
        unregister_module_notifier(&module_load_nb);
        profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
        profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
        task_handoff_unregister(&task_free_nb);
-       end_sync();
+       mutex_unlock(&buffer_mutex);
+       flush_scheduled_work();
+
+       /* make sure we don't leak task structs */
+       process_task_mortuary();
+       process_task_mortuary();
+
        free_cpumask_var(marked_cpus);
 }
 
index 219f79e2210a3fcd561b94456c4960a0a1fbacd9..f179ac2ea80149423034d66a90b1e81251c5069b 100644 (file)
@@ -120,8 +120,6 @@ void end_cpu_work(void)
 
                cancel_delayed_work(&b->work);
        }
-
-       flush_scheduled_work();
 }
 
 /*
index b336cd9ee7a114c54d6b7b85d48cacc6f3ed1a6d..f9bda64fcd1b62771880d5d823b53f797cb9c7f0 100644 (file)
@@ -225,26 +225,17 @@ post_sync:
        mutex_unlock(&start_mutex);
 }
 
-int oprofile_set_backtrace(unsigned long val)
+int oprofile_set_ulong(unsigned long *addr, unsigned long val)
 {
-       int err = 0;
+       int err = -EBUSY;
 
        mutex_lock(&start_mutex);
-
-       if (oprofile_started) {
-               err = -EBUSY;
-               goto out;
-       }
-
-       if (!oprofile_ops.backtrace) {
-               err = -EINVAL;
-               goto out;
+       if (!oprofile_started) {
+               *addr = val;
+               err = 0;
        }
-
-       oprofile_backtrace_depth = val;
-
-out:
        mutex_unlock(&start_mutex);
+
        return err;
 }
 
@@ -257,16 +248,9 @@ static int __init oprofile_init(void)
                printk(KERN_INFO "oprofile: using timer interrupt.\n");
                err = oprofile_timer_init(&oprofile_ops);
                if (err)
-                       goto out_arch;
+                       return err;
        }
-       err = oprofilefs_register();
-       if (err)
-               goto out_arch;
-       return 0;
-
-out_arch:
-       oprofile_arch_exit();
-       return err;
+       return oprofilefs_register();
 }
 
 
index 47e12cb4ee8ba7464e4b0c7ef955be71b1caa201..177b73de5e5f158cbb31fe27f8e31b9363c3a255 100644 (file)
@@ -37,7 +37,7 @@ void oprofile_create_files(struct super_block *sb, struct dentry *root);
 int oprofile_timer_init(struct oprofile_operations *ops);
 void oprofile_timer_exit(void);
 
-int oprofile_set_backtrace(unsigned long depth);
+int oprofile_set_ulong(unsigned long *addr, unsigned long val);
 int oprofile_set_timeout(unsigned long time);
 
 #endif /* OPROF_H */
index bbd7516e0869461c141659004580d509ec5e97a5..ccf099e684a46e90e53bcb17ad72536cdc42a4de 100644 (file)
@@ -79,14 +79,17 @@ static ssize_t depth_write(struct file *file, char const __user *buf, size_t cou
        if (*offset)
                return -EINVAL;
 
+       if (!oprofile_ops.backtrace)
+               return -EINVAL;
+
        retval = oprofilefs_ulong_from_user(&val, buf, count);
        if (retval)
                return retval;
 
-       retval = oprofile_set_backtrace(val);
-
+       retval = oprofile_set_ulong(&oprofile_backtrace_depth, val);
        if (retval)
                return retval;
+
        return count;
 }
 
diff --git a/drivers/oprofile/oprofile_perf.c b/drivers/oprofile/oprofile_perf.c
new file mode 100644 (file)
index 0000000..9046f7b
--- /dev/null
@@ -0,0 +1,328 @@
+/*
+ * Copyright 2010 ARM Ltd.
+ *
+ * Perf-events backend for OProfile.
+ */
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/oprofile.h>
+#include <linux/slab.h>
+
+/*
+ * Per performance monitor configuration as set via oprofilefs.
+ */
+struct op_counter_config {
+       unsigned long count;
+       unsigned long enabled;
+       unsigned long event;
+       unsigned long unit_mask;
+       unsigned long kernel;
+       unsigned long user;
+       struct perf_event_attr attr;
+};
+
+static int oprofile_perf_enabled;
+static DEFINE_MUTEX(oprofile_perf_mutex);
+
+static struct op_counter_config *counter_config;
+static struct perf_event **perf_events[nr_cpumask_bits];
+static int num_counters;
+
+/*
+ * Overflow callback for oprofile.
+ */
+static void op_overflow_handler(struct perf_event *event, int unused,
+                       struct perf_sample_data *data, struct pt_regs *regs)
+{
+       int id;
+       u32 cpu = smp_processor_id();
+
+       for (id = 0; id < num_counters; ++id)
+               if (perf_events[cpu][id] == event)
+                       break;
+
+       if (id != num_counters)
+               oprofile_add_sample(regs, id);
+       else
+               pr_warning("oprofile: ignoring spurious overflow "
+                               "on cpu %u\n", cpu);
+}
+
+/*
+ * Called by oprofile_perf_setup to create perf attributes to mirror the oprofile
+ * settings in counter_config. Attributes are created as `pinned' events and
+ * so are permanently scheduled on the PMU.
+ */
+static void op_perf_setup(void)
+{
+       int i;
+       u32 size = sizeof(struct perf_event_attr);
+       struct perf_event_attr *attr;
+
+       for (i = 0; i < num_counters; ++i) {
+               attr = &counter_config[i].attr;
+               memset(attr, 0, size);
+               attr->type              = PERF_TYPE_RAW;
+               attr->size              = size;
+               attr->config            = counter_config[i].event;
+               attr->sample_period     = counter_config[i].count;
+               attr->pinned            = 1;
+       }
+}
+
+static int op_create_counter(int cpu, int event)
+{
+       struct perf_event *pevent;
+
+       if (!counter_config[event].enabled || perf_events[cpu][event])
+               return 0;
+
+       pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
+                                                 cpu, NULL,
+                                                 op_overflow_handler);
+
+       if (IS_ERR(pevent))
+               return PTR_ERR(pevent);
+
+       if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
+               perf_event_release_kernel(pevent);
+               pr_warning("oprofile: failed to enable event %d "
+                               "on CPU %d\n", event, cpu);
+               return -EBUSY;
+       }
+
+       perf_events[cpu][event] = pevent;
+
+       return 0;
+}
+
+static void op_destroy_counter(int cpu, int event)
+{
+       struct perf_event *pevent = perf_events[cpu][event];
+
+       if (pevent) {
+               perf_event_release_kernel(pevent);
+               perf_events[cpu][event] = NULL;
+       }
+}
+
+/*
+ * Called by oprofile_perf_start to create active perf events based on the
+ * perviously configured attributes.
+ */
+static int op_perf_start(void)
+{
+       int cpu, event, ret = 0;
+
+       for_each_online_cpu(cpu) {
+               for (event = 0; event < num_counters; ++event) {
+                       ret = op_create_counter(cpu, event);
+                       if (ret)
+                               return ret;
+               }
+       }
+
+       return ret;
+}
+
+/*
+ * Called by oprofile_perf_stop at the end of a profiling run.
+ */
+static void op_perf_stop(void)
+{
+       int cpu, event;
+
+       for_each_online_cpu(cpu)
+               for (event = 0; event < num_counters; ++event)
+                       op_destroy_counter(cpu, event);
+}
+
+static int oprofile_perf_create_files(struct super_block *sb, struct dentry *root)
+{
+       unsigned int i;
+
+       for (i = 0; i < num_counters; i++) {
+               struct dentry *dir;
+               char buf[4];
+
+               snprintf(buf, sizeof buf, "%d", i);
+               dir = oprofilefs_mkdir(sb, root, buf);
+               oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
+               oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
+               oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
+               oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
+               oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
+               oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
+       }
+
+       return 0;
+}
+
+static int oprofile_perf_setup(void)
+{
+       spin_lock(&oprofilefs_lock);
+       op_perf_setup();
+       spin_unlock(&oprofilefs_lock);
+       return 0;
+}
+
+static int oprofile_perf_start(void)
+{
+       int ret = -EBUSY;
+
+       mutex_lock(&oprofile_perf_mutex);
+       if (!oprofile_perf_enabled) {
+               ret = 0;
+               op_perf_start();
+               oprofile_perf_enabled = 1;
+       }
+       mutex_unlock(&oprofile_perf_mutex);
+       return ret;
+}
+
+static void oprofile_perf_stop(void)
+{
+       mutex_lock(&oprofile_perf_mutex);
+       if (oprofile_perf_enabled)
+               op_perf_stop();
+       oprofile_perf_enabled = 0;
+       mutex_unlock(&oprofile_perf_mutex);
+}
+
+#ifdef CONFIG_PM
+
+static int oprofile_perf_suspend(struct platform_device *dev, pm_message_t state)
+{
+       mutex_lock(&oprofile_perf_mutex);
+       if (oprofile_perf_enabled)
+               op_perf_stop();
+       mutex_unlock(&oprofile_perf_mutex);
+       return 0;
+}
+
+static int oprofile_perf_resume(struct platform_device *dev)
+{
+       mutex_lock(&oprofile_perf_mutex);
+       if (oprofile_perf_enabled && op_perf_start())
+               oprofile_perf_enabled = 0;
+       mutex_unlock(&oprofile_perf_mutex);
+       return 0;
+}
+
+static struct platform_driver oprofile_driver = {
+       .driver         = {
+               .name           = "oprofile-perf",
+       },
+       .resume         = oprofile_perf_resume,
+       .suspend        = oprofile_perf_suspend,
+};
+
+static struct platform_device *oprofile_pdev;
+
+static int __init init_driverfs(void)
+{
+       int ret;
+
+       ret = platform_driver_register(&oprofile_driver);
+       if (ret)
+               return ret;
+
+       oprofile_pdev = platform_device_register_simple(
+                               oprofile_driver.driver.name, 0, NULL, 0);
+       if (IS_ERR(oprofile_pdev)) {
+               ret = PTR_ERR(oprofile_pdev);
+               platform_driver_unregister(&oprofile_driver);
+       }
+
+       return ret;
+}
+
+static void exit_driverfs(void)
+{
+       platform_device_unregister(oprofile_pdev);
+       platform_driver_unregister(&oprofile_driver);
+}
+
+#else
+
+static inline int  init_driverfs(void) { return 0; }
+static inline void exit_driverfs(void) { }
+
+#endif /* CONFIG_PM */
+
+void oprofile_perf_exit(void)
+{
+       int cpu, id;
+       struct perf_event *event;
+
+       for_each_possible_cpu(cpu) {
+               for (id = 0; id < num_counters; ++id) {
+                       event = perf_events[cpu][id];
+                       if (event)
+                               perf_event_release_kernel(event);
+               }
+
+               kfree(perf_events[cpu]);
+       }
+
+       kfree(counter_config);
+       exit_driverfs();
+}
+
+int __init oprofile_perf_init(struct oprofile_operations *ops)
+{
+       int cpu, ret = 0;
+
+       ret = init_driverfs();
+       if (ret)
+               return ret;
+
+       memset(&perf_events, 0, sizeof(perf_events));
+
+       num_counters = perf_num_counters();
+       if (num_counters <= 0) {
+               pr_info("oprofile: no performance counters\n");
+               ret = -ENODEV;
+               goto out;
+       }
+
+       counter_config = kcalloc(num_counters,
+                       sizeof(struct op_counter_config), GFP_KERNEL);
+
+       if (!counter_config) {
+               pr_info("oprofile: failed to allocate %d "
+                               "counters\n", num_counters);
+               ret = -ENOMEM;
+               num_counters = 0;
+               goto out;
+       }
+
+       for_each_possible_cpu(cpu) {
+               perf_events[cpu] = kcalloc(num_counters,
+                               sizeof(struct perf_event *), GFP_KERNEL);
+               if (!perf_events[cpu]) {
+                       pr_info("oprofile: failed to allocate %d perf events "
+                                       "for cpu %d\n", num_counters, cpu);
+                       ret = -ENOMEM;
+                       goto out;
+               }
+       }
+
+       ops->create_files       = oprofile_perf_create_files;
+       ops->setup              = oprofile_perf_setup;
+       ops->start              = oprofile_perf_start;
+       ops->stop               = oprofile_perf_stop;
+       ops->shutdown           = oprofile_perf_stop;
+       ops->cpu_type           = op_name_from_perf_id();
+
+       if (!ops->cpu_type)
+               ret = -ENODEV;
+       else
+               pr_info("oprofile: using %s\n", ops->cpu_type);
+
+out:
+       if (ret)
+               oprofile_perf_exit();
+
+       return ret;
+}
index 2766a6d3c2e9c8fe2f7ab2a2d1f3457275693f80..1944621930d96fc51a8f0497037b4dac74e0329b 100644 (file)
@@ -91,16 +91,20 @@ static ssize_t ulong_read_file(struct file *file, char __user *buf, size_t count
 
 static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset)
 {
-       unsigned long *value = file->private_data;
+       unsigned long value;
        int retval;
 
        if (*offset)
                return -EINVAL;
 
-       retval = oprofilefs_ulong_from_user(value, buf, count);
+       retval = oprofilefs_ulong_from_user(&value, buf, count);
+       if (retval)
+               return retval;
 
+       retval = oprofile_set_ulong(file->private_data, value);
        if (retval)
                return retval;
+
        return count;
 }
 
@@ -126,50 +130,41 @@ static const struct file_operations ulong_ro_fops = {
 };
 
 
-static struct dentry *__oprofilefs_create_file(struct super_block *sb,
+static int __oprofilefs_create_file(struct super_block *sb,
        struct dentry *root, char const *name, const struct file_operations *fops,
-       int perm)
+       int perm, void *priv)
 {
        struct dentry *dentry;
        struct inode *inode;
 
        dentry = d_alloc_name(root, name);
        if (!dentry)
-               return NULL;
+               return -ENOMEM;
        inode = oprofilefs_get_inode(sb, S_IFREG | perm);
        if (!inode) {
                dput(dentry);
-               return NULL;
+               return -ENOMEM;
        }
        inode->i_fop = fops;
        d_add(dentry, inode);
-       return dentry;
+       dentry->d_inode->i_private = priv;
+       return 0;
 }
 
 
 int oprofilefs_create_ulong(struct super_block *sb, struct dentry *root,
        char const *name, unsigned long *val)
 {
-       struct dentry *d = __oprofilefs_create_file(sb, root, name,
-                                                    &ulong_fops, 0644);
-       if (!d)
-               return -EFAULT;
-
-       d->d_inode->i_private = val;
-       return 0;
+       return __oprofilefs_create_file(sb, root, name,
+                                       &ulong_fops, 0644, val);
 }
 
 
 int oprofilefs_create_ro_ulong(struct super_block *sb, struct dentry *root,
        char const *name, unsigned long *val)
 {
-       struct dentry *d = __oprofilefs_create_file(sb, root, name,
-                                                    &ulong_ro_fops, 0444);
-       if (!d)
-               return -EFAULT;
-
-       d->d_inode->i_private = val;
-       return 0;
+       return __oprofilefs_create_file(sb, root, name,
+                                       &ulong_ro_fops, 0444, val);
 }
 
 
@@ -189,31 +184,22 @@ static const struct file_operations atomic_ro_fops = {
 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
        char const *name, atomic_t *val)
 {
-       struct dentry *d = __oprofilefs_create_file(sb, root, name,
-                                                    &atomic_ro_fops, 0444);
-       if (!d)
-               return -EFAULT;
-
-       d->d_inode->i_private = val;
-       return 0;
+       return __oprofilefs_create_file(sb, root, name,
+                                       &atomic_ro_fops, 0444, val);
 }
 
 
 int oprofilefs_create_file(struct super_block *sb, struct dentry *root,
        char const *name, const struct file_operations *fops)
 {
-       if (!__oprofilefs_create_file(sb, root, name, fops, 0644))
-               return -EFAULT;
-       return 0;
+       return __oprofilefs_create_file(sb, root, name, fops, 0644, NULL);
 }
 
 
 int oprofilefs_create_file_perm(struct super_block *sb, struct dentry *root,
        char const *name, const struct file_operations *fops, int perm)
 {
-       if (!__oprofilefs_create_file(sb, root, name, fops, perm))
-               return -EFAULT;
-       return 0;
+       return __oprofilefs_create_file(sb, root, name, fops, perm, NULL);
 }
 
 
index dffa5d4fb2986f89b6ab26882bd9de63df81e2e1..a2d9d1e59260eab17097e0a12f16908eed26c552 100644 (file)
@@ -306,7 +306,7 @@ struct parport *parport_register_port(unsigned long base, int irq, int dma,
        spin_lock_init(&tmp->pardevice_lock);
        tmp->ieee1284.mode = IEEE1284_MODE_COMPAT;
        tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
-       init_MUTEX_LOCKED (&tmp->ieee1284.irq); /* actually a semaphore at 0 */
+       sema_init(&tmp->ieee1284.irq, 0);
        tmp->spintime = parport_default_spintime;
        atomic_set (&tmp->ref_count, 1);
        INIT_LIST_HEAD(&tmp->full_list);
index c3ceebb5be84168ae71b845bdc52b3c9a6b557b3..4789f8e8bf7ad91cae0b0a1043349ac96ea5fa64 100644 (file)
 #define DMA_32BIT_PFN          IOVA_PFN(DMA_BIT_MASK(32))
 #define DMA_64BIT_PFN          IOVA_PFN(DMA_BIT_MASK(64))
 
+/* page table handling */
+#define LEVEL_STRIDE           (9)
+#define LEVEL_MASK             (((u64)1 << LEVEL_STRIDE) - 1)
+
+static inline int agaw_to_level(int agaw)
+{
+       return agaw + 2;
+}
+
+static inline int agaw_to_width(int agaw)
+{
+       return 30 + agaw * LEVEL_STRIDE;
+}
+
+static inline int width_to_agaw(int width)
+{
+       return (width - 30) / LEVEL_STRIDE;
+}
+
+static inline unsigned int level_to_offset_bits(int level)
+{
+       return (level - 1) * LEVEL_STRIDE;
+}
+
+static inline int pfn_level_offset(unsigned long pfn, int level)
+{
+       return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
+}
+
+static inline unsigned long level_mask(int level)
+{
+       return -1UL << level_to_offset_bits(level);
+}
+
+static inline unsigned long level_size(int level)
+{
+       return 1UL << level_to_offset_bits(level);
+}
+
+static inline unsigned long align_to_level(unsigned long pfn, int level)
+{
+       return (pfn + level_size(level) - 1) & level_mask(level);
+}
 
 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
    are never going to work. */
@@ -434,8 +477,6 @@ void free_iova_mem(struct iova *iova)
 }
 
 
-static inline int width_to_agaw(int width);
-
 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
 {
        unsigned long sagaw;
@@ -646,51 +687,6 @@ out:
        spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
-/* page table handling */
-#define LEVEL_STRIDE           (9)
-#define LEVEL_MASK             (((u64)1 << LEVEL_STRIDE) - 1)
-
-static inline int agaw_to_level(int agaw)
-{
-       return agaw + 2;
-}
-
-static inline int agaw_to_width(int agaw)
-{
-       return 30 + agaw * LEVEL_STRIDE;
-
-}
-
-static inline int width_to_agaw(int width)
-{
-       return (width - 30) / LEVEL_STRIDE;
-}
-
-static inline unsigned int level_to_offset_bits(int level)
-{
-       return (level - 1) * LEVEL_STRIDE;
-}
-
-static inline int pfn_level_offset(unsigned long pfn, int level)
-{
-       return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
-}
-
-static inline unsigned long level_mask(int level)
-{
-       return -1UL << level_to_offset_bits(level);
-}
-
-static inline unsigned long level_size(int level)
-{
-       return 1UL << level_to_offset_bits(level);
-}
-
-static inline unsigned long align_to_level(unsigned long pfn, int level)
-{
-       return (pfn + level_size(level) - 1) & level_mask(level);
-}
-
 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
                                      unsigned long pfn)
 {
@@ -3761,6 +3757,33 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
 
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
 
+#define GGC 0x52
+#define GGC_MEMORY_SIZE_MASK   (0xf << 8)
+#define GGC_MEMORY_SIZE_NONE   (0x0 << 8)
+#define GGC_MEMORY_SIZE_1M     (0x1 << 8)
+#define GGC_MEMORY_SIZE_2M     (0x3 << 8)
+#define GGC_MEMORY_VT_ENABLED  (0x8 << 8)
+#define GGC_MEMORY_SIZE_2M_VT  (0x9 << 8)
+#define GGC_MEMORY_SIZE_3M_VT  (0xa << 8)
+#define GGC_MEMORY_SIZE_4M_VT  (0xb << 8)
+
+static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
+{
+       unsigned short ggc;
+
+       if (pci_read_config_word(dev, GGC, &ggc))
+               return;
+
+       if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
+               printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
+               dmar_map_gfx = 0;
+       }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
+
 /* On Tylersburg chipsets, some BIOSes have been known to enable the
    ISOCH DMAR unit for the Azalia sound device, but not give it any
    TLB entries, which causes it to deadlock. Check for that.  We do
index ce6a3666b3d9878f70be6fb65f3e1272fdf6b6b2..553d8ee55c1c4aa3c9f6567867642d3505cf89df 100644 (file)
@@ -608,7 +608,7 @@ int pci_iov_resource_bar(struct pci_dev *dev, int resno,
  * the VF BAR size multiplied by the number of VFs.  The alignment
  * is just the VF BAR size.
  */
-int pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
+resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
 {
        struct resource tmp;
        enum pci_bar_type type;
index 7754a678ab15cc77445d396aa59409658bf8006f..6beb11b617a92973f7a32cc47a7343a759218d6e 100644 (file)
@@ -264,7 +264,8 @@ extern int pci_iov_init(struct pci_dev *dev);
 extern void pci_iov_release(struct pci_dev *dev);
 extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
                                enum pci_bar_type *type);
-extern int pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
+extern resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev,
+                                                   int resno);
 extern void pci_restore_iov_state(struct pci_dev *dev);
 extern int pci_iov_bus_range(struct pci_bus *bus);
 
@@ -320,7 +321,7 @@ static inline int pci_ats_enabled(struct pci_dev *dev)
 }
 #endif /* CONFIG_PCI_IOV */
 
-static inline int pci_resource_alignment(struct pci_dev *dev,
+static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
                                         struct resource *res)
 {
 #ifdef CONFIG_PCI_IOV
index 89ed181cd90cd9cd68506a212eb423ba60f9c8e2..857ae01734a66156c8abb92335be91cc674964a0 100644 (file)
@@ -162,6 +162,26 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1,       quirk_isa_d
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC,     PCI_DEVICE_ID_NEC_CBUS_2,       quirk_isa_dma_hangs);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC,     PCI_DEVICE_ID_NEC_CBUS_3,       quirk_isa_dma_hangs);
 
+/*
+ * Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear
+ * for some HT machines to use C4 w/o hanging.
+ */
+static void __devinit quirk_tigerpoint_bm_sts(struct pci_dev *dev)
+{
+       u32 pmbase;
+       u16 pm1a;
+
+       pci_read_config_dword(dev, 0x40, &pmbase);
+       pmbase = pmbase & 0xff80;
+       pm1a = inw(pmbase);
+
+       if (pm1a & 0x10) {
+               dev_info(&dev->dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n");
+               outw(0x10, pmbase);
+       }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts);
+
 /*
  *     Chipsets where PCI->PCI transfers vanish or hang
  */
index 54aa1c238cb34a5966a1d8ff6621835846514d87..9ba4dade69a4d67cd5b0efc1e4c55a70dbf349c1 100644 (file)
@@ -163,7 +163,7 @@ static int pcmcia_access_config(struct pcmcia_device *p_dev,
        c = p_dev->function_config;
 
        if (!(c->state & CONFIG_LOCKED)) {
-               dev_dbg(&s->dev, "Configuration isnt't locked\n");
+               dev_dbg(&p_dev->dev, "Configuration isnt't locked\n");
                mutex_unlock(&s->ops_mutex);
                return -EACCES;
        }
@@ -220,7 +220,7 @@ int pcmcia_map_mem_page(struct pcmcia_device *p_dev, window_handle_t wh,
        s->win[w].card_start = offset;
        ret = s->ops->set_mem_map(s, &s->win[w]);
        if (ret)
-               dev_warn(&s->dev, "failed to set_mem_map\n");
+               dev_warn(&p_dev->dev, "failed to set_mem_map\n");
        mutex_unlock(&s->ops_mutex);
        return ret;
 } /* pcmcia_map_mem_page */
@@ -244,18 +244,18 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
        c = p_dev->function_config;
 
        if (!(s->state & SOCKET_PRESENT)) {
-               dev_dbg(&s->dev, "No card present\n");
+               dev_dbg(&p_dev->dev, "No card present\n");
                ret = -ENODEV;
                goto unlock;
        }
        if (!(c->state & CONFIG_LOCKED)) {
-               dev_dbg(&s->dev, "Configuration isnt't locked\n");
+               dev_dbg(&p_dev->dev, "Configuration isnt't locked\n");
                ret = -EACCES;
                goto unlock;
        }
 
        if (mod->Attributes & (CONF_IRQ_CHANGE_VALID | CONF_VCC_CHANGE_VALID)) {
-               dev_dbg(&s->dev,
+               dev_dbg(&p_dev->dev,
                        "changing Vcc or IRQ is not allowed at this time\n");
                ret = -EINVAL;
                goto unlock;
@@ -265,20 +265,22 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
        if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) &&
            (mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
                if (mod->Vpp1 != mod->Vpp2) {
-                       dev_dbg(&s->dev, "Vpp1 and Vpp2 must be the same\n");
+                       dev_dbg(&p_dev->dev,
+                               "Vpp1 and Vpp2 must be the same\n");
                        ret = -EINVAL;
                        goto unlock;
                }
                s->socket.Vpp = mod->Vpp1;
                if (s->ops->set_socket(s, &s->socket)) {
-                       dev_printk(KERN_WARNING, &s->dev,
+                       dev_printk(KERN_WARNING, &p_dev->dev,
                                   "Unable to set VPP\n");
                        ret = -EIO;
                        goto unlock;
                }
        } else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) ||
                   (mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
-               dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n");
+               dev_dbg(&p_dev->dev,
+                       "changing Vcc is not allowed at this time\n");
                ret = -EINVAL;
                goto unlock;
        }
@@ -401,7 +403,7 @@ int pcmcia_release_window(struct pcmcia_device *p_dev, struct resource *res)
        win = &s->win[w];
 
        if (!(p_dev->_win & CLIENT_WIN_REQ(w))) {
-               dev_dbg(&s->dev, "not releasing unknown window\n");
+               dev_dbg(&p_dev->dev, "not releasing unknown window\n");
                mutex_unlock(&s->ops_mutex);
                return -EINVAL;
        }
@@ -439,7 +441,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
                return -ENODEV;
 
        if (req->IntType & INT_CARDBUS) {
-               dev_dbg(&s->dev, "IntType may not be INT_CARDBUS\n");
+               dev_dbg(&p_dev->dev, "IntType may not be INT_CARDBUS\n");
                return -EINVAL;
        }
 
@@ -447,7 +449,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
        c = p_dev->function_config;
        if (c->state & CONFIG_LOCKED) {
                mutex_unlock(&s->ops_mutex);
-               dev_dbg(&s->dev, "Configuration is locked\n");
+               dev_dbg(&p_dev->dev, "Configuration is locked\n");
                return -EACCES;
        }
 
@@ -455,7 +457,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
        s->socket.Vpp = req->Vpp;
        if (s->ops->set_socket(s, &s->socket)) {
                mutex_unlock(&s->ops_mutex);
-               dev_printk(KERN_WARNING, &s->dev,
+               dev_printk(KERN_WARNING, &p_dev->dev,
                           "Unable to set socket state\n");
                return -EINVAL;
        }
@@ -569,19 +571,20 @@ int pcmcia_request_io(struct pcmcia_device *p_dev)
        int ret = -EINVAL;
 
        mutex_lock(&s->ops_mutex);
-       dev_dbg(&s->dev, "pcmcia_request_io: %pR , %pR", &c->io[0], &c->io[1]);
+       dev_dbg(&p_dev->dev, "pcmcia_request_io: %pR , %pR",
+               &c->io[0], &c->io[1]);
 
        if (!(s->state & SOCKET_PRESENT)) {
-               dev_dbg(&s->dev, "pcmcia_request_io: No card present\n");
+               dev_dbg(&p_dev->dev, "pcmcia_request_io: No card present\n");
                goto out;
        }
 
        if (c->state & CONFIG_LOCKED) {
-               dev_dbg(&s->dev, "Configuration is locked\n");
+               dev_dbg(&p_dev->dev, "Configuration is locked\n");
                goto out;
        }
        if (c->state & CONFIG_IO_REQ) {
-               dev_dbg(&s->dev, "IO already configured\n");
+               dev_dbg(&p_dev->dev, "IO already configured\n");
                goto out;
        }
 
@@ -592,7 +595,13 @@ int pcmcia_request_io(struct pcmcia_device *p_dev)
        if (c->io[1].end) {
                ret = alloc_io_space(s, &c->io[1], p_dev->io_lines);
                if (ret) {
+                       struct resource tmp = c->io[0];
+                       /* release the previously allocated resource */
                        release_io_space(s, &c->io[0]);
+                       /* but preserve the settings, for they worked... */
+                       c->io[0].end = resource_size(&tmp);
+                       c->io[0].start = tmp.start;
+                       c->io[0].flags = tmp.flags;
                        goto out;
                }
        } else
@@ -601,7 +610,7 @@ int pcmcia_request_io(struct pcmcia_device *p_dev)
        c->state |= CONFIG_IO_REQ;
        p_dev->_io = 1;
 
-       dev_dbg(&s->dev, "pcmcia_request_io succeeded: %pR , %pR",
+       dev_dbg(&p_dev->dev, "pcmcia_request_io succeeded: %pR , %pR",
                &c->io[0], &c->io[1]);
 out:
        mutex_unlock(&s->ops_mutex);
@@ -800,7 +809,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
        int w;
 
        if (!(s->state & SOCKET_PRESENT)) {
-               dev_dbg(&s->dev, "No card present\n");
+               dev_dbg(&p_dev->dev, "No card present\n");
                return -ENODEV;
        }
 
@@ -809,12 +818,12 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
                req->Size = s->map_size;
        align = (s->features & SS_CAP_MEM_ALIGN) ? req->Size : s->map_size;
        if (req->Size & (s->map_size-1)) {
-               dev_dbg(&s->dev, "invalid map size\n");
+               dev_dbg(&p_dev->dev, "invalid map size\n");
                return -EINVAL;
        }
        if ((req->Base && (s->features & SS_CAP_STATIC_MAP)) ||
            (req->Base & (align-1))) {
-               dev_dbg(&s->dev, "invalid base address\n");
+               dev_dbg(&p_dev->dev, "invalid base address\n");
                return -EINVAL;
        }
        if (req->Base)
@@ -826,7 +835,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
                if (!(s->state & SOCKET_WIN_REQ(w)))
                        break;
        if (w == MAX_WIN) {
-               dev_dbg(&s->dev, "all windows are used already\n");
+               dev_dbg(&p_dev->dev, "all windows are used already\n");
                mutex_unlock(&s->ops_mutex);
                return -EINVAL;
        }
@@ -837,7 +846,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
                win->res = pcmcia_find_mem_region(req->Base, req->Size, align,
                                                0, s);
                if (!win->res) {
-                       dev_dbg(&s->dev, "allocating mem region failed\n");
+                       dev_dbg(&p_dev->dev, "allocating mem region failed\n");
                        mutex_unlock(&s->ops_mutex);
                        return -EINVAL;
                }
@@ -851,7 +860,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
        win->card_start = 0;
 
        if (s->ops->set_mem_map(s, win) != 0) {
-               dev_dbg(&s->dev, "failed to set memory mapping\n");
+               dev_dbg(&p_dev->dev, "failed to set memory mapping\n");
                mutex_unlock(&s->ops_mutex);
                return -EIO;
        }
@@ -874,7 +883,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
        if (win->res)
                request_resource(&iomem_resource, res);
 
-       dev_dbg(&s->dev, "request_window results in %pR\n", res);
+       dev_dbg(&p_dev->dev, "request_window results in %pR\n", res);
 
        mutex_unlock(&s->ops_mutex);
        *wh = res;
index b8a869af0f4410dd25a3cb0b4b5ace093ea5d6a5..deef6656ab7b8e2015e382ed48b85e067c7503a5 100644 (file)
@@ -646,7 +646,7 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev,
        if (!pci_resource_start(dev, 0)) {
                dev_warn(&dev->dev, "refusing to load the driver as the "
                        "io_base is NULL.\n");
-               goto err_out_free_mem;
+               goto err_out_disable;
        }
 
        dev_info(&dev->dev, "Cirrus PD6729 PCI to PCMCIA Bridge at 0x%llx "
index 9024480a82288ec071e26008f616fe2d9556c18b..c44a5e8b8b82da9d06706d9cd3a3ec0fcb2b883c 100644 (file)
@@ -51,7 +51,6 @@
  * TODO:
  *   - handle CPU hotplug
  *   - provide turbo enable/disable api
- *   - make sure we can write turbo enable/disable reg based on MISC_EN
  *
  * Related documents:
  *   - CDI 403777, 403778 - Auburndale EDS vol 1 & 2
 #define THM_TC2                0xac
 #define THM_DTV                0xb0
 #define THM_ITV                0xd8
-#define   ITV_ME_SEQNO_MASK 0x000f0000 /* ME should update every ~200ms */
+#define   ITV_ME_SEQNO_MASK 0x00ff0000 /* ME should update every ~200ms */
 #define   ITV_ME_SEQNO_SHIFT (16)
 #define   ITV_MCH_TEMP_MASK 0x0000ff00
 #define   ITV_MCH_TEMP_SHIFT (8)
@@ -325,6 +324,7 @@ struct ips_driver {
        bool gpu_preferred;
        bool poll_turbo_status;
        bool second_cpu;
+       bool turbo_toggle_allowed;
        struct ips_mcp_limits *limits;
 
        /* Optional MCH interfaces for if i915 is in use */
@@ -415,7 +415,7 @@ static void ips_cpu_lower(struct ips_driver *ips)
        new_limit = cur_limit - 8; /* 1W decrease */
 
        /* Clamp to SKU TDP limit */
-       if (((new_limit * 10) / 8) < (ips->orig_turbo_limit & TURBO_TDP_MASK))
+       if (new_limit  < (ips->orig_turbo_limit & TURBO_TDP_MASK))
                new_limit = ips->orig_turbo_limit & TURBO_TDP_MASK;
 
        thm_writew(THM_MPCPC, (new_limit * 10) / 8);
@@ -461,7 +461,8 @@ static void ips_enable_cpu_turbo(struct ips_driver *ips)
        if (ips->__cpu_turbo_on)
                return;
 
-       on_each_cpu(do_enable_cpu_turbo, ips, 1);
+       if (ips->turbo_toggle_allowed)
+               on_each_cpu(do_enable_cpu_turbo, ips, 1);
 
        ips->__cpu_turbo_on = true;
 }
@@ -498,7 +499,8 @@ static void ips_disable_cpu_turbo(struct ips_driver *ips)
        if (!ips->__cpu_turbo_on)
                return;
 
-       on_each_cpu(do_disable_cpu_turbo, ips, 1);
+       if (ips->turbo_toggle_allowed)
+               on_each_cpu(do_disable_cpu_turbo, ips, 1);
 
        ips->__cpu_turbo_on = false;
 }
@@ -598,17 +600,29 @@ static bool mcp_exceeded(struct ips_driver *ips)
 {
        unsigned long flags;
        bool ret = false;
+       u32 temp_limit;
+       u32 avg_power;
+       const char *msg = "MCP limit exceeded: ";
 
        spin_lock_irqsave(&ips->turbo_status_lock, flags);
-       if (ips->mcp_avg_temp > (ips->mcp_temp_limit * 100))
-               ret = true;
-       if (ips->cpu_avg_power + ips->mch_avg_power > ips->mcp_power_limit)
+
+       temp_limit = ips->mcp_temp_limit * 100;
+       if (ips->mcp_avg_temp > temp_limit) {
+               dev_info(&ips->dev->dev,
+                       "%sAvg temp %u, limit %u\n", msg, ips->mcp_avg_temp,
+                       temp_limit);
                ret = true;
-       spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
+       }
 
-       if (ret)
+       avg_power = ips->cpu_avg_power + ips->mch_avg_power;
+       if (avg_power > ips->mcp_power_limit) {
                dev_info(&ips->dev->dev,
-                        "MCP power or thermal limit exceeded\n");
+                       "%sAvg power %u, limit %u\n", msg, avg_power,
+                       ips->mcp_power_limit);
+               ret = true;
+       }
+
+       spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
 
        return ret;
 }
@@ -662,6 +676,27 @@ static bool mch_exceeded(struct ips_driver *ips)
        return ret;
 }
 
+/**
+ * verify_limits - verify BIOS provided limits
+ * @ips: IPS structure
+ *
+ * BIOS can optionally provide non-default limits for power and temp.  Check
+ * them here and use the defaults if the BIOS values are not provided or
+ * are otherwise unusable.
+ */
+static void verify_limits(struct ips_driver *ips)
+{
+       if (ips->mcp_power_limit < ips->limits->mcp_power_limit ||
+           ips->mcp_power_limit > 35000)
+               ips->mcp_power_limit = ips->limits->mcp_power_limit;
+
+       if (ips->mcp_temp_limit < ips->limits->core_temp_limit ||
+           ips->mcp_temp_limit < ips->limits->mch_temp_limit ||
+           ips->mcp_temp_limit > 150)
+               ips->mcp_temp_limit = min(ips->limits->core_temp_limit,
+                                         ips->limits->mch_temp_limit);
+}
+
 /**
  * update_turbo_limits - get various limits & settings from regs
  * @ips: IPS driver struct
@@ -680,12 +715,21 @@ static void update_turbo_limits(struct ips_driver *ips)
        u32 hts = thm_readl(THM_HTS);
 
        ips->cpu_turbo_enabled = !(hts & HTS_PCTD_DIS);
-       ips->gpu_turbo_enabled = !(hts & HTS_GTD_DIS);
+       /* 
+        * Disable turbo for now, until we can figure out why the power figures
+        * are wrong
+        */
+       ips->cpu_turbo_enabled = false;
+
+       if (ips->gpu_busy)
+               ips->gpu_turbo_enabled = !(hts & HTS_GTD_DIS);
+
        ips->core_power_limit = thm_readw(THM_MPCPC);
        ips->mch_power_limit = thm_readw(THM_MMGPC);
        ips->mcp_temp_limit = thm_readw(THM_PTL);
        ips->mcp_power_limit = thm_readw(THM_MPPC);
 
+       verify_limits(ips);
        /* Ignore BIOS CPU vs GPU pref */
 }
 
@@ -858,7 +902,7 @@ static u32 get_cpu_power(struct ips_driver *ips, u32 *last, int period)
        ret = (ret * 1000) / 65535;
        *last = val;
 
-       return ret;
+       return 0;
 }
 
 static const u16 temp_decay_factor = 2;
@@ -940,7 +984,6 @@ static int ips_monitor(void *data)
                kfree(mch_samples);
                kfree(cpu_samples);
                kfree(mchp_samples);
-               kthread_stop(ips->adjust);
                return -ENOMEM;
        }
 
@@ -948,7 +991,7 @@ static int ips_monitor(void *data)
                ITV_ME_SEQNO_SHIFT;
        seqno_timestamp = get_jiffies_64();
 
-       old_cpu_power = thm_readl(THM_CEC) / 65535;
+       old_cpu_power = thm_readl(THM_CEC);
        schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
 
        /* Collect an initial average */
@@ -1150,11 +1193,18 @@ static irqreturn_t ips_irq_handler(int irq, void *arg)
                                STS_GPL_SHIFT;
                        /* ignore EC CPU vs GPU pref */
                        ips->cpu_turbo_enabled = !(sts & STS_PCTD_DIS);
-                       ips->gpu_turbo_enabled = !(sts & STS_GTD_DIS);
+                       /* 
+                        * Disable turbo for now, until we can figure
+                        * out why the power figures are wrong
+                        */
+                       ips->cpu_turbo_enabled = false;
+                       if (ips->gpu_busy)
+                               ips->gpu_turbo_enabled = !(sts & STS_GTD_DIS);
                        ips->mcp_temp_limit = (sts & STS_PTL_MASK) >>
                                STS_PTL_SHIFT;
                        ips->mcp_power_limit = (tc1 & STS_PPL_MASK) >>
                                STS_PPL_SHIFT;
+                       verify_limits(ips);
                        spin_unlock(&ips->turbo_status_lock);
 
                        thm_writeb(THM_SEC, SEC_ACK);
@@ -1333,8 +1383,10 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
         * turbo manually or we'll get an illegal MSR access, even though
         * turbo will still be available.
         */
-       if (!(misc_en & IA32_MISC_TURBO_EN))
-               ; /* add turbo MSR write allowed flag if necessary */
+       if (misc_en & IA32_MISC_TURBO_EN)
+               ips->turbo_toggle_allowed = true;
+       else
+               ips->turbo_toggle_allowed = false;
 
        if (strstr(boot_cpu_data.x86_model_id, "CPU       M"))
                limits = &ips_sv_limits;
@@ -1351,9 +1403,10 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
        tdp = turbo_power & TURBO_TDP_MASK;
 
        /* Sanity check TDP against CPU */
-       if (limits->mcp_power_limit != (tdp / 8) * 1000) {
-               dev_warn(&ips->dev->dev, "Warning: CPU TDP doesn't match expected value (found %d, expected %d)\n",
-                        tdp / 8, limits->mcp_power_limit / 1000);
+       if (limits->core_power_limit != (tdp / 8) * 1000) {
+               dev_info(&ips->dev->dev, "CPU TDP doesn't match expected value (found %d, expected %d)\n",
+                        tdp / 8, limits->core_power_limit / 1000);
+               limits->core_power_limit = (tdp / 8) * 1000;
        }
 
 out:
@@ -1390,7 +1443,7 @@ static bool ips_get_i915_syms(struct ips_driver *ips)
        return true;
 
 out_put_busy:
-       symbol_put(i915_gpu_turbo_disable);
+       symbol_put(i915_gpu_busy);
 out_put_lower:
        symbol_put(i915_gpu_lower);
 out_put_raise:
@@ -1532,22 +1585,27 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
        /* Save turbo limits & ratios */
        rdmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit);
 
-       ips_enable_cpu_turbo(ips);
-       ips->cpu_turbo_enabled = true;
+       ips_disable_cpu_turbo(ips);
+       ips->cpu_turbo_enabled = false;
 
-       /* Set up the work queue and monitor/adjust threads */
-       ips->monitor = kthread_run(ips_monitor, ips, "ips-monitor");
-       if (IS_ERR(ips->monitor)) {
+       /* Create thermal adjust thread */
+       ips->adjust = kthread_create(ips_adjust, ips, "ips-adjust");
+       if (IS_ERR(ips->adjust)) {
                dev_err(&dev->dev,
-                       "failed to create thermal monitor thread, aborting\n");
+                       "failed to create thermal adjust thread, aborting\n");
                ret = -ENOMEM;
                goto error_free_irq;
+
        }
 
-       ips->adjust = kthread_create(ips_adjust, ips, "ips-adjust");
-       if (IS_ERR(ips->adjust)) {
+       /*
+        * Set up the work queue and monitor thread. The monitor thread
+        * will wake up ips_adjust thread.
+        */
+       ips->monitor = kthread_run(ips_monitor, ips, "ips-monitor");
+       if (IS_ERR(ips->monitor)) {
                dev_err(&dev->dev,
-                       "failed to create thermal adjust thread, aborting\n");
+                       "failed to create thermal monitor thread, aborting\n");
                ret = -ENOMEM;
                goto error_thread_cleanup;
        }
@@ -1566,7 +1624,7 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
        return ret;
 
 error_thread_cleanup:
-       kthread_stop(ips->monitor);
+       kthread_stop(ips->adjust);
 error_free_irq:
        free_irq(ips->dev->irq, ips);
 error_unmap:
index e35ed128bdef439313a107106f36c47e03dc5b7e..2d61186ad5a2e96708fcec4beb0a8402eb2bc09f 100644 (file)
@@ -3093,7 +3093,8 @@ static const struct tpacpi_quirk tpacpi_hotkey_qtable[] __initconst = {
        TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */
 };
 
-typedef u16 tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN];
+typedef u16 tpacpi_keymap_entry_t;
+typedef tpacpi_keymap_entry_t tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN];
 
 static int __init hotkey_init(struct ibm_init_struct *iibm)
 {
@@ -3230,7 +3231,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
        };
 
 #define TPACPI_HOTKEY_MAP_SIZE         sizeof(tpacpi_keymap_t)
-#define TPACPI_HOTKEY_MAP_TYPESIZE     sizeof(tpacpi_keymap_t[0])
+#define TPACPI_HOTKEY_MAP_TYPESIZE     sizeof(tpacpi_keymap_entry_t)
 
        int res, i;
        int status;
index 936bae560fa1f730255bde8a94c9493c953efdfd..dc628cb2e762803b6f3374d33a99feae5b08640c 100644 (file)
@@ -233,6 +233,7 @@ static int calculate_capacity(enum apm_source source)
                empty_design_prop = POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN;
                now_prop = POWER_SUPPLY_PROP_ENERGY_NOW;
                avg_prop = POWER_SUPPLY_PROP_ENERGY_AVG;
+               break;
        case SOURCE_VOLTAGE:
                full_prop = POWER_SUPPLY_PROP_VOLTAGE_MAX;
                empty_prop = POWER_SUPPLY_PROP_VOLTAGE_MIN;
index c61ffec2ff106ca7aa153dac55cebf5a77b495b1..2a10cd361181292f9d6bbef7ef38cda9772c839b 100644 (file)
@@ -185,8 +185,8 @@ static int pmic_scu_ipc_battery_property_get(struct battery_property *prop)
 {
        u32 data[3];
        u8 *p = (u8 *)&data[1];
-       int err = intel_scu_ipc_command(IPC_CMD_BATTERY_PROPERTY,
-                               IPCMSG_BATTERY, NULL, 0, data, 3);
+       int err = intel_scu_ipc_command(IPCMSG_BATTERY,
+                               IPC_CMD_BATTERY_PROPERTY, NULL, 0, data, 3);
 
        prop->capacity = data[0];
        prop->crnt = *p++;
@@ -207,7 +207,7 @@ static int pmic_scu_ipc_battery_property_get(struct battery_property *prop)
 
 static int pmic_scu_ipc_set_charger(int charger)
 {
-       return intel_scu_ipc_simple_command(charger, IPCMSG_BATTERY);
+       return intel_scu_ipc_simple_command(IPCMSG_BATTERY, charger);
 }
 
 /**
index 7d149a8d8d9b6cb7fe92f73b71536a723cbf6249..2ce2eb71d0f5be88e03495ea1de06e16902a461a 100644 (file)
@@ -215,7 +215,7 @@ static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index)
        struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
        int ret = -EINVAL;
 
-       if (info->vol_table && (index < (2 << info->vol_nbits))) {
+       if (info->vol_table && (index < (1 << info->vol_nbits))) {
                ret = info->vol_table[index];
                if (info->slope_double)
                        ret <<= 1;
@@ -233,7 +233,7 @@ static int choose_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
                max_uV = max_uV >> 1;
        }
        if (info->vol_table) {
-               for (i = 0; i < (2 << info->vol_nbits); i++) {
+               for (i = 0; i < (1 << info->vol_nbits); i++) {
                        if (!info->vol_table[i])
                                break;
                        if ((min_uV <= info->vol_table[i])
index 11790990277a3dbd9a96ca2ba398f505480f5e19..b349266a43de63c150b92ab36c8b49274958392b 100644 (file)
@@ -634,12 +634,9 @@ static int __devinit ab3100_regulators_probe(struct platform_device *pdev)
                                "%s: failed to register regulator %s err %d\n",
                                __func__, ab3100_regulator_desc[i].name,
                                err);
-                       i--;
                        /* remove the already registered regulators */
-                       while (i > 0) {
+                       while (--i >= 0)
                                regulator_unregister(ab3100_regulators[i].rdev);
-                               i--;
-                       }
                        return err;
                }
 
index dc3f1a491675abe371a483f862b01b60d1489a47..28c7ae67cec9ea1d573c94b30d8732f7770340b0 100644 (file)
@@ -157,7 +157,7 @@ static int ab8500_list_voltage(struct regulator_dev *rdev, unsigned selector)
        if (info->fixed_uV)
                return info->fixed_uV;
 
-       if (selector > info->voltages_len)
+       if (selector >= info->voltages_len)
                return -EINVAL;
 
        return info->supported_voltages[selector];
@@ -344,13 +344,14 @@ static inline struct ab8500_regulator_info *find_regulator_info(int id)
 static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
 {
        struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
-       struct ab8500_platform_data *pdata = dev_get_platdata(ab8500->dev);
+       struct ab8500_platform_data *pdata;
        int i, err;
 
        if (!ab8500) {
                dev_err(&pdev->dev, "null mfd parent\n");
                return -EINVAL;
        }
+       pdata = dev_get_platdata(ab8500->dev);
 
        /* register all regulators */
        for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
@@ -368,11 +369,9 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
                        dev_err(&pdev->dev, "failed to register regulator %s\n",
                                        info->desc.name);
                        /* when we fail, un-register all earlier regulators */
-                       i--;
-                       while (i > 0) {
+                       while (--i >= 0) {
                                info = &ab8500_regulator_info[i];
                                regulator_unregister(info->regulator);
-                               i--;
                        }
                        return err;
                }
index d59d2f2314afc999b8c9b6b9b9eb1c68df02b08f..a4be41614eebd41fa5733c09a7e9645c64c72c59 100644 (file)
@@ -25,7 +25,7 @@ struct ad5398_chip_info {
        unsigned int current_level;
        unsigned int current_mask;
        unsigned int current_offset;
-       struct regulator_dev rdev;
+       struct regulator_dev *rdev;
 };
 
 static int ad5398_calc_current(struct ad5398_chip_info *chip,
@@ -211,7 +211,6 @@ MODULE_DEVICE_TABLE(i2c, ad5398_id);
 static int __devinit ad5398_probe(struct i2c_client *client,
                                const struct i2c_device_id *id)
 {
-       struct regulator_dev *rdev;
        struct regulator_init_data *init_data = client->dev.platform_data;
        struct ad5398_chip_info *chip;
        const struct ad5398_current_data_format *df =
@@ -233,9 +232,10 @@ static int __devinit ad5398_probe(struct i2c_client *client,
        chip->current_offset = df->current_offset;
        chip->current_mask = (chip->current_level - 1) << chip->current_offset;
 
-       rdev = regulator_register(&ad5398_reg, &client->dev, init_data, chip);
-       if (IS_ERR(rdev)) {
-               ret = PTR_ERR(rdev);
+       chip->rdev = regulator_register(&ad5398_reg, &client->dev,
+                                       init_data, chip);
+       if (IS_ERR(chip->rdev)) {
+               ret = PTR_ERR(chip->rdev);
                dev_err(&client->dev, "failed to register %s %s\n",
                        id->name, ad5398_reg.name);
                goto err;
@@ -254,9 +254,8 @@ static int __devexit ad5398_remove(struct i2c_client *client)
 {
        struct ad5398_chip_info *chip = i2c_get_clientdata(client);
 
-       regulator_unregister(&chip->rdev);
+       regulator_unregister(chip->rdev);
        kfree(chip);
-       i2c_set_clientdata(client, NULL);
 
        return 0;
 }
index 422a709d271d51d593899db82b0b930cf93f2847..cc8b337b9119de5e955aabe1935ad931a895c71a 100644 (file)
@@ -700,7 +700,7 @@ static void print_constraints(struct regulator_dev *rdev)
            constraints->min_uA != constraints->max_uA) {
                ret = _regulator_get_current_limit(rdev);
                if (ret > 0)
-                       count += sprintf(buf + count, "at %d uA ", ret / 1000);
+                       count += sprintf(buf + count, "at %d mA ", ret / 1000);
        }
 
        if (constraints->valid_modes_mask & REGULATOR_MODE_FAST)
@@ -2302,8 +2302,10 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
        dev_set_name(&rdev->dev, "regulator.%d",
                     atomic_inc_return(&regulator_no) - 1);
        ret = device_register(&rdev->dev);
-       if (ret != 0)
+       if (ret != 0) {
+               put_device(&rdev->dev);
                goto clean;
+       }
 
        dev_set_drvdata(&rdev->dev, rdev);
 
index e49d2bd393f27cdd105f0ba9a16e7a9c96b99c62..b8cc6389a541a0e3cbc0cc4a60231c166cd2e2e1 100644 (file)
@@ -165,7 +165,7 @@ static int __devinit isl6271a_probe(struct i2c_client *i2c,
        mutex_init(&pmic->mtx);
 
        for (i = 0; i < 3; i++) {
-               pmic->rdev[i] = regulator_register(&isl_rd[0], &i2c->dev,
+               pmic->rdev[i] = regulator_register(&isl_rd[i], &i2c->dev,
                                                init_data, pmic);
                if (IS_ERR(pmic->rdev[i])) {
                        dev_err(&i2c->dev, "failed to register %s\n", id->name);
@@ -191,8 +191,6 @@ static int __devexit isl6271a_remove(struct i2c_client *i2c)
        struct isl_pmic *pmic = i2c_get_clientdata(i2c);
        int i;
 
-       i2c_set_clientdata(i2c, NULL);
-
        for (i = 0; i < 3; i++)
                regulator_unregister(pmic->rdev[i]);
 
index 8867c2710a6d07319d9ad1b6d926922b280f30dc..559cfa271a4452389577be87543ce6116bfc0ebf 100644 (file)
@@ -121,14 +121,14 @@ static int max1586_v6_set(struct regulator_dev *rdev, int min_uV, int max_uV)
        if (max_uV < MAX1586_V6_MIN_UV || max_uV > MAX1586_V6_MAX_UV)
                return -EINVAL;
 
-       if (min_uV >= 3000000)
-               selector = 3;
-       if (min_uV < 3000000)
-               selector = 2;
-       if (min_uV < 2500000)
-               selector = 1;
        if (min_uV < 1800000)
                selector = 0;
+       else if (min_uV < 2500000)
+               selector = 1;
+       else if (min_uV < 3000000)
+               selector = 2;
+       else if (min_uV >= 3000000)
+               selector = 3;
 
        if (max1586_v6_calc_voltage(selector) > max_uV)
                return -EINVAL;
index 4520ace3f7e707f82ccbf0fb068df921dfc6c2df..6b60a9c0366b3c5236fa7019844274c8b1155b3e 100644 (file)
@@ -330,7 +330,7 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
                /* set external clock frequency */
                info->extclk_freq = pdata->extclk_freq;
                max8649_set_bits(info->i2c, MAX8649_SYNC, MAX8649_EXT_MASK,
-                                info->extclk_freq);
+                                info->extclk_freq << 6);
        }
 
        if (pdata->ramp_timing) {
index ab67298799f95a573f7a901199ac966cdc79847b..a1baf1fbe00472e71845591e2d427715a720a226 100644 (file)
@@ -549,7 +549,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
        if (!max8998)
                return -ENOMEM;
 
-       size = sizeof(struct regulator_dev *) * (pdata->num_regulators + 1);
+       size = sizeof(struct regulator_dev *) * pdata->num_regulators;
        max8998->rdev = kzalloc(size, GFP_KERNEL);
        if (!max8998->rdev) {
                kfree(max8998);
@@ -557,7 +557,9 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
        }
 
        rdev = max8998->rdev;
+       max8998->dev = &pdev->dev;
        max8998->iodev = iodev;
+       max8998->num_regulators = pdata->num_regulators;
        platform_set_drvdata(pdev, max8998);
 
        for (i = 0; i < pdata->num_regulators; i++) {
@@ -583,7 +585,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
 
        return 0;
 err:
-       for (i = 0; i <= max8998->num_regulators; i++)
+       for (i = 0; i < max8998->num_regulators; i++)
                if (rdev[i])
                        regulator_unregister(rdev[i]);
 
@@ -599,7 +601,7 @@ static int __devexit max8998_pmic_remove(struct platform_device *pdev)
        struct regulator_dev **rdev = max8998->rdev;
        int i;
 
-       for (i = 0; i <= max8998->num_regulators; i++)
+       for (i = 0; i < max8998->num_regulators; i++)
                if (rdev[i])
                        regulator_unregister(rdev[i]);
 
index c239f42aa4a3efa4ba47743f3d031c00f750a81a..020f5878d7fff19bb35f58b7cb2745d10beeb484 100644 (file)
@@ -626,12 +626,6 @@ fail:
        return error;
 }
 
-/**
- * tps6507x_remove - TPS6507x driver i2c remove handler
- * @client: i2c driver client device structure
- *
- * Unregister TPS driver as an i2c client device driver
- */
 static int __devexit tps6507x_pmic_remove(struct platform_device *pdev)
 {
        struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev);
index 8cff1413a147f1822336160602c57d253f3d1902..51237fbb1bbb7e15f952296a9ffcb9f668418fc9 100644 (file)
@@ -133,7 +133,7 @@ static int tps6586x_ldo_get_voltage(struct regulator_dev *rdev)
        mask = ((1 << ri->volt_nbits) - 1) << ri->volt_shift;
        val = (val & mask) >> ri->volt_shift;
 
-       if (val > ri->desc.n_voltages)
+       if (val >= ri->desc.n_voltages)
                BUG();
 
        return ri->voltages[val] * 1000;
@@ -150,7 +150,7 @@ static int tps6586x_dvm_set_voltage(struct regulator_dev *rdev,
        if (ret)
                return ret;
 
-       return tps6586x_set_bits(parent, ri->go_reg, ri->go_bit);
+       return tps6586x_set_bits(parent, ri->go_reg, 1 << ri->go_bit);
 }
 
 static int tps6586x_regulator_enable(struct regulator_dev *rdev)
index e686cdb61b97cd8a54ab7fbecb1312a41e333595..9edf8f692341d89ed3645459da80be221c94eeec 100644 (file)
@@ -215,8 +215,7 @@ static int wm831x_gp_ldo_set_mode(struct regulator_dev *rdev,
 
        case REGULATOR_MODE_IDLE:
                ret = wm831x_set_bits(wm831x, ctrl_reg,
-                                     WM831X_LDO1_LP_MODE,
-                                     WM831X_LDO1_LP_MODE);
+                                     WM831X_LDO1_LP_MODE, 0);
                if (ret < 0)
                        return ret;
 
@@ -225,10 +224,12 @@ static int wm831x_gp_ldo_set_mode(struct regulator_dev *rdev,
                                      WM831X_LDO1_ON_MODE);
                if (ret < 0)
                        return ret;
+               break;
 
        case REGULATOR_MODE_STANDBY:
                ret = wm831x_set_bits(wm831x, ctrl_reg,
-                                     WM831X_LDO1_LP_MODE, 0);
+                                     WM831X_LDO1_LP_MODE,
+                                     WM831X_LDO1_LP_MODE);
                if (ret < 0)
                        return ret;
 
index 0e6ed7db93643436eadaeee5fbc48b7fd8ed599f..fe4b8a8a9dfd43a88ba9df10a9496a1b732d4328 100644 (file)
@@ -1129,7 +1129,7 @@ static unsigned int wm8350_dcdc_get_mode(struct regulator_dev *rdev)
                        mode = REGULATOR_MODE_NORMAL;
        } else if (!active && !sleep)
                mode = REGULATOR_MODE_IDLE;
-       else if (!sleep)
+       else if (sleep)
                mode = REGULATOR_MODE_STANDBY;
 
        return mode;
index d26780ea254b5d9e9e296309dc23f2640418a7f5..261a07e0fb24c0dd7cd1d05d3d60c35d184b1e38 100644 (file)
@@ -235,6 +235,7 @@ static int __init ab3100_rtc_probe(struct platform_device *pdev)
                err = PTR_ERR(rtc);
                return err;
        }
+       platform_set_drvdata(pdev, rtc);
 
        return 0;
 }
@@ -244,6 +245,7 @@ static int __exit ab3100_rtc_remove(struct platform_device *pdev)
        struct rtc_device *rtc = platform_get_drvdata(pdev);
 
        rtc_device_unregister(rtc);
+       platform_set_drvdata(pdev, NULL);
        return 0;
 }
 
index 72b2bcc2c22413b1a63e465e355ea65084ec7b8e..d4fb82d85e9b36ab61e1626236cb98bf76364ea2 100644 (file)
@@ -426,7 +426,7 @@ static int bfin_rtc_suspend(struct platform_device *pdev, pm_message_t state)
                enable_irq_wake(IRQ_RTC);
                bfin_rtc_sync_pending(&pdev->dev);
        } else
-               bfin_rtc_int_clear(-1);
+               bfin_rtc_int_clear(0);
 
        return 0;
 }
@@ -435,8 +435,17 @@ static int bfin_rtc_resume(struct platform_device *pdev)
 {
        if (device_may_wakeup(&pdev->dev))
                disable_irq_wake(IRQ_RTC);
-       else
-               bfin_write_RTC_ISTAT(-1);
+
+       /*
+        * Since only some of the RTC bits are maintained externally in the
+        * Vbat domain, we need to wait for the RTC MMRs to be synced into
+        * the core after waking up.  This happens every RTC 1HZ.  Once that
+        * has happened, we can go ahead and re-enable the important write
+        * complete interrupt event.
+        */
+       while (!(bfin_read_RTC_ISTAT() & RTC_ISTAT_SEC))
+               continue;
+       bfin_rtc_int_set(RTC_ISTAT_WRITE_COMPLETE);
 
        return 0;
 }
index 9daed8db83d3e5400559ac3c51c86d0e6b45f00d..9de8516e3531e70bad818747f41de4b8052486bd 100644 (file)
@@ -268,7 +268,6 @@ out_irq:
                free_irq(client->irq, client);
 
 out_free:
-       i2c_set_clientdata(client, NULL);
        kfree(ds3232);
        return ret;
 }
@@ -287,7 +286,6 @@ static int __devexit ds3232_remove(struct i2c_client *client)
        }
 
        rtc_device_unregister(ds3232->rtc);
-       i2c_set_clientdata(client, NULL);
        kfree(ds3232);
        return 0;
 }
index 66377f3e28b851eaa908c6057a9646a639e9c229..d60557cae8ef4fdadadb10b343125f174cb7508d 100644 (file)
@@ -364,7 +364,7 @@ static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t)
        t->time.tm_isdst = -1;
        t->enabled = !!(reg[M41T80_REG_ALARM_MON] & M41T80_ALMON_AFE);
        t->pending = !!(reg[M41T80_REG_FLAGS] & M41T80_FLAGS_AF);
-       return rtc_valid_tm(t);
+       return 0;
 }
 
 static struct rtc_class_ops m41t80_rtc_ops = {
index 6c418fe7f288ae2deaa9f44080a749a9eaafad88..b7a6690e5b35e8744295bf212a8e0d75e0d8dd6f 100644 (file)
@@ -403,7 +403,7 @@ static int pl031_probe(struct amba_device *adev, struct amba_id *id)
        }
 
        if (request_irq(adev->irq[0], pl031_interrupt,
-                       IRQF_DISABLED | IRQF_SHARED, "rtc-pl031", ldata)) {
+                       IRQF_DISABLED, "rtc-pl031", ldata)) {
                ret = -EIO;
                goto out_no_irq;
        }
index a0d3ec89d412ac57d683fa5a446720a375dfab22..f57a87f4ae96abb367a2e08d353378b31f2a19fa 100644 (file)
@@ -310,11 +310,6 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
 
        s3c_rtc_setaie(alrm->enabled);
 
-       if (alrm->enabled)
-               enable_irq_wake(s3c_rtc_alarmno);
-       else
-               disable_irq_wake(s3c_rtc_alarmno);
-
        return 0;
 }
 
@@ -587,6 +582,10 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state)
                ticnt_en_save &= S3C64XX_RTCCON_TICEN;
        }
        s3c_rtc_enable(pdev, 0);
+
+       if (device_may_wakeup(&pdev->dev))
+               enable_irq_wake(s3c_rtc_alarmno);
+
        return 0;
 }
 
@@ -600,6 +599,10 @@ static int s3c_rtc_resume(struct platform_device *pdev)
                tmp = readb(s3c_rtc_base + S3C2410_RTCCON);
                writeb(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON);
        }
+
+       if (device_may_wakeup(&pdev->dev))
+               disable_irq_wake(s3c_rtc_alarmno);
+
        return 0;
 }
 #else
index b7de02525ec901ebbee390b2798043eb1938f4bd..85cf607fc78f62d243ae41b20f2df018710be2e0 100644 (file)
@@ -217,8 +217,7 @@ tapeblock_setup_device(struct tape_device * device)
        if (!blkdat->request_queue)
                return -ENOMEM;
 
-       elevator_exit(blkdat->request_queue->elevator);
-       rc = elevator_init(blkdat->request_queue, "noop");
+       rc = elevator_change(blkdat->request_queue, "noop");
        if (rc)
                goto cleanup_queue;
 
index 6edf20b62de5bae28214275931f0db9fd1fcd1f2..2c7d2d9be4d0cd3099f63d2b2690242cc12eed50 100644 (file)
@@ -1154,7 +1154,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
                                dev_fsm, dev_fsm_len, GFP_KERNEL);
        if (priv->fsm == NULL) {
                CTCMY_DBF_DEV(SETUP, dev, "init_fsm error");
-               kfree(dev);
+               free_netdev(dev);
                return NULL;
        }
        fsm_newstate(priv->fsm, DEV_STATE_STOPPED);
@@ -1165,7 +1165,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
                grp = ctcmpc_init_mpc_group(priv);
                if (grp == NULL) {
                        MPC_DBF_DEV(SETUP, dev, "init_mpc_group error");
-                       kfree(dev);
+                       free_netdev(dev);
                        return NULL;
                }
                tasklet_init(&grp->mpc_tasklet2,
index 7d4d2275573c138d9e1c5223e41928defd4649aa..7f11f3e48e120ec82f1d7e26a0f1055fca771f97 100644 (file)
@@ -300,8 +300,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
                           enum iscsi_host_param param, char *buf)
 {
        struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost);
-       int len = 0;
-       int status;
+       int status = 0;
 
        SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param);
        switch (param) {
@@ -315,7 +314,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
        default:
                return iscsi_host_get_param(shost, param, buf);
        }
-       return len;
+       return status;
 }
 
 int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba)
index 26350e470bccf71ea47b8dd99f387a31aff21670..877324fc594c28b606db82a12ab9b3559615ecbd 100644 (file)
@@ -368,7 +368,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
        memset(req, 0, sizeof(*req));
        wrb->tag0 |= tag;
 
-       be_wrb_hdr_prepare(wrb, sizeof(*req), true, 1);
+       be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
                           OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD,
                           sizeof(*req));
index cd05e049d5f6ad7164c6485ee336db00933c63b0..d0c82340f0e25198d0c56101c76d0d1a00b17254 100644 (file)
@@ -1404,13 +1404,13 @@ void scsi_print_sense(char *name, struct scsi_cmnd *cmd)
 {
        struct scsi_sense_hdr sshdr;
 
-       scmd_printk(KERN_INFO, cmd, "");
+       scmd_printk(KERN_INFO, cmd, " ");
        scsi_decode_sense_buffer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
                                 &sshdr);
        scsi_show_sense_hdr(&sshdr);
        scsi_decode_sense_extras(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
                                 &sshdr);
-       scmd_printk(KERN_INFO, cmd, "");
+       scmd_printk(KERN_INFO, cmd, " ");
        scsi_show_extd_sense(sshdr.asc, sshdr.ascq);
 }
 EXPORT_SYMBOL(scsi_print_sense);
@@ -1453,7 +1453,7 @@ EXPORT_SYMBOL(scsi_show_result);
 
 void scsi_print_result(struct scsi_cmnd *cmd)
 {
-       scmd_printk(KERN_INFO, cmd, "");
+       scmd_printk(KERN_INFO, cmd, " ");
        scsi_show_result(cmd->result);
 }
 EXPORT_SYMBOL(scsi_print_result);
index 4f5551b5fe53d29a1a0473d3d5d799dabb46e0d5..c5d0606ad0974edab8c0326f4fadff905413c171 100644 (file)
@@ -3231,6 +3231,12 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
        misc_fw_support = readl(&cfgtable->misc_fw_support);
        use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
 
+       /* The doorbell reset seems to cause lockups on some Smart
+        * Arrays (e.g. P410, P410i, maybe others).  Until this is
+        * fixed or at least isolated, avoid the doorbell reset.
+        */
+       use_doorbell = 0;
+
        rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
        if (rc)
                goto unmap_cfgtable;
index fda4de3440c4640f6754a1a66d143aeb5da8936c..e88bbdde49c5066b4bfb9fa2d8b5822115777cd8 100644 (file)
@@ -865,7 +865,7 @@ void osd_req_read(struct osd_request *or,
 {
        _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
        WARN_ON(or->in.bio || or->in.total_bytes);
-       WARN_ON(1 == (bio->bi_rw & REQ_WRITE));
+       WARN_ON(bio->bi_rw & REQ_WRITE);
        or->in.bio = bio;
        or->in.total_bytes = len;
 }
index 420238cc794eb7dd69dbe132bdf6be29752ed644..114bc5a81171993ac91c27ba600cee72adb9aed9 100644 (file)
@@ -1838,26 +1838,33 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
 
        qla24xx_disable_vp(vha);
 
+       vha->flags.delete_progress = 1;
+
        fc_remove_host(vha->host);
 
        scsi_remove_host(vha->host);
 
-       qla2x00_free_fcports(vha);
+       if (vha->timer_active) {
+               qla2x00_vp_stop_timer(vha);
+               DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]"
+               " = %p has stopped\n", vha->host_no, vha->vp_idx, vha));
+       }
 
        qla24xx_deallocate_vp_id(vha);
 
+       /* No pending activities shall be there on the vha now */
+       DEBUG(msleep(random32()%10));  /* Just to see if something falls on
+                                       * the net we have placed below */
+
+       BUG_ON(atomic_read(&vha->vref_count));
+
+       qla2x00_free_fcports(vha);
+
        mutex_lock(&ha->vport_lock);
        ha->cur_vport_count--;
        clear_bit(vha->vp_idx, ha->vp_idx_map);
        mutex_unlock(&ha->vport_lock);
 
-       if (vha->timer_active) {
-               qla2x00_vp_stop_timer(vha);
-               DEBUG15(printk ("scsi(%ld): timer for the vport[%d] = %p "
-                   "has stopped\n",
-                   vha->host_no, vha->vp_idx, vha));
-        }
-
        if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
                if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
                        qla_printk(KERN_WARNING, ha,
index 6cfc28a25eb3c41cca4f24b91ba575fd8d12c48d..b74e6b5743dc2931cfefa730beb351f1fa60ea57 100644 (file)
@@ -29,8 +29,6 @@
 /* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */
 /* #define QL_DEBUG_LEVEL_18 */ /* Output T10 CRC trace messages */
 
-/* #define QL_PRINTK_BUF */ /* Captures printk to buffer */
-
 /*
 * Macros use for debugging the driver.
 */
index 3a432ea0c7a3548844dd4013fd06798396135a70..d2a4e1530708add3659525948aec20ce13629f91 100644 (file)
@@ -2641,6 +2641,7 @@ struct qla_hw_data {
 #define MBX_UPDATE_FLASH_ACTIVE        3
 
        struct mutex vport_lock;        /* Virtual port synchronization */
+       spinlock_t vport_slock; /* order is hardware_lock, then vport_slock */
        struct completion mbx_cmd_comp; /* Serialize mbx access */
        struct completion mbx_intr_comp;  /* Used for completion notification */
        struct completion dcbx_comp;    /* For set port config notification */
@@ -2828,6 +2829,7 @@ typedef struct scsi_qla_host {
                uint32_t        management_server_logged_in :1;
                uint32_t        process_response_queue  :1;
                uint32_t        difdix_supported:1;
+               uint32_t        delete_progress:1;
        } flags;
 
        atomic_t        loop_state;
@@ -2922,6 +2924,8 @@ typedef struct scsi_qla_host {
        struct req_que *req;
        int             fw_heartbeat_counter;
        int             seconds_since_last_heartbeat;
+
+       atomic_t        vref_count;
 } scsi_qla_host_t;
 
 /*
@@ -2932,6 +2936,22 @@ typedef struct scsi_qla_host {
         test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \
         atomic_read(&ha->loop_state) == LOOP_DOWN)
 
+#define QLA_VHA_MARK_BUSY(__vha, __bail) do {               \
+       atomic_inc(&__vha->vref_count);                      \
+       mb();                                                \
+       if (__vha->flags.delete_progress) {                  \
+               atomic_dec(&__vha->vref_count);              \
+               __bail = 1;                                  \
+       } else {                                             \
+               __bail = 0;                                  \
+       }                                                    \
+} while (0)
+
+#define QLA_VHA_MARK_NOT_BUSY(__vha) do {                   \
+       atomic_dec(&__vha->vref_count);                      \
+} while (0)
+
+
 #define qla_printk(level, ha, format, arg...) \
        dev_printk(level , &((ha)->pdev->dev) , format , ## arg)
 
index d863ed2619b56853f7d29250dfd17b876fb0ee5c..9c383baebe279d0c27bb3027dc1bd337ab52603e 100644 (file)
@@ -69,21 +69,29 @@ qla2x00_ctx_sp_free(srb_t *sp)
 {
        struct srb_ctx *ctx = sp->ctx;
        struct srb_iocb *iocb = ctx->u.iocb_cmd;
+       struct scsi_qla_host *vha = sp->fcport->vha;
 
        del_timer_sync(&iocb->timer);
        kfree(iocb);
        kfree(ctx);
        mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
+
+       QLA_VHA_MARK_NOT_BUSY(vha);
 }
 
 inline srb_t *
 qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
     unsigned long tmo)
 {
-       srb_t *sp;
+       srb_t *sp = NULL;
        struct qla_hw_data *ha = vha->hw;
        struct srb_ctx *ctx;
        struct srb_iocb *iocb;
+       uint8_t bail;
+
+       QLA_VHA_MARK_BUSY(vha, bail);
+       if (bail)
+               return NULL;
 
        sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
        if (!sp)
@@ -116,6 +124,8 @@ qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
        iocb->timer.function = qla2x00_ctx_sp_timeout;
        add_timer(&iocb->timer);
 done:
+       if (!sp)
+               QLA_VHA_MARK_NOT_BUSY(vha);
        return sp;
 }
 
@@ -1777,11 +1787,15 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
                qla2x00_init_response_q_entries(rsp);
        }
 
+       spin_lock_irqsave(&ha->vport_slock, flags);
        /* Clear RSCN queue. */
        list_for_each_entry(vp, &ha->vp_list, list) {
                vp->rscn_in_ptr = 0;
                vp->rscn_out_ptr = 0;
        }
+
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
+
        ha->isp_ops->config_rings(vha);
 
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3218,12 +3232,17 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
                /* Bypass virtual ports of the same host. */
                found = 0;
                if (ha->num_vhosts) {
+                       unsigned long flags;
+
+                       spin_lock_irqsave(&ha->vport_slock, flags);
                        list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
                                if (new_fcport->d_id.b24 == vp->d_id.b24) {
                                        found = 1;
                                        break;
                                }
                        }
+                       spin_unlock_irqrestore(&ha->vport_slock, flags);
+
                        if (found)
                                continue;
                }
@@ -3343,6 +3362,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
        struct qla_hw_data *ha = vha->hw;
        struct scsi_qla_host *vp;
        struct scsi_qla_host *tvp;
+       unsigned long flags = 0;
 
        rval = QLA_SUCCESS;
 
@@ -3367,6 +3387,8 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
                /* Check for loop ID being already in use. */
                found = 0;
                fcport = NULL;
+
+               spin_lock_irqsave(&ha->vport_slock, flags);
                list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
                        list_for_each_entry(fcport, &vp->vp_fcports, list) {
                                if (fcport->loop_id == dev->loop_id &&
@@ -3379,6 +3401,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
                        if (found)
                                break;
                }
+               spin_unlock_irqrestore(&ha->vport_slock, flags);
 
                /* If not in use then it is free to use. */
                if (!found) {
@@ -3791,14 +3814,27 @@ void
 qla2x00_update_fcports(scsi_qla_host_t *base_vha)
 {
        fc_port_t *fcport;
-       struct scsi_qla_host *tvp, *vha;
+       struct scsi_qla_host *vha;
+       struct qla_hw_data *ha = base_vha->hw;
+       unsigned long flags;
 
+       spin_lock_irqsave(&ha->vport_slock, flags);
        /* Go with deferred removal of rport references. */
-       list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list)
-               list_for_each_entry(fcport, &vha->vp_fcports, list)
+       list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
+               atomic_inc(&vha->vref_count);
+               list_for_each_entry(fcport, &vha->vp_fcports, list) {
                        if (fcport && fcport->drport &&
-                           atomic_read(&fcport->state) != FCS_UNCONFIGURED)
+                           atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
+                               spin_unlock_irqrestore(&ha->vport_slock, flags);
+
                                qla2x00_rport_del(fcport);
+
+                               spin_lock_irqsave(&ha->vport_slock, flags);
+                       }
+               }
+               atomic_dec(&vha->vref_count);
+       }
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
 }
 
 void
@@ -3806,7 +3842,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
 {
        struct qla_hw_data *ha = vha->hw;
        struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
-       struct scsi_qla_host *tvp;
+       unsigned long flags;
 
        vha->flags.online = 0;
        ha->flags.chip_reset_done = 0;
@@ -3824,8 +3860,18 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
        if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
                atomic_set(&vha->loop_state, LOOP_DOWN);
                qla2x00_mark_all_devices_lost(vha, 0);
-               list_for_each_entry_safe(vp, tvp, &base_vha->hw->vp_list, list)
+
+               spin_lock_irqsave(&ha->vport_slock, flags);
+               list_for_each_entry(vp, &base_vha->hw->vp_list, list) {
+                       atomic_inc(&vp->vref_count);
+                       spin_unlock_irqrestore(&ha->vport_slock, flags);
+
                        qla2x00_mark_all_devices_lost(vp, 0);
+
+                       spin_lock_irqsave(&ha->vport_slock, flags);
+                       atomic_dec(&vp->vref_count);
+               }
+               spin_unlock_irqrestore(&ha->vport_slock, flags);
        } else {
                if (!atomic_read(&vha->loop_down_timer))
                        atomic_set(&vha->loop_down_timer,
@@ -3862,8 +3908,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
        uint8_t        status = 0;
        struct qla_hw_data *ha = vha->hw;
        struct scsi_qla_host *vp;
-       struct scsi_qla_host *tvp;
        struct req_que *req = ha->req_q_map[0];
+       unsigned long flags;
 
        if (vha->flags.online) {
                qla2x00_abort_isp_cleanup(vha);
@@ -3970,10 +4016,21 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
                DEBUG(printk(KERN_INFO
                                "qla2x00_abort_isp(%ld): succeeded.\n",
                                vha->host_no));
-               list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
-                       if (vp->vp_idx)
+
+               spin_lock_irqsave(&ha->vport_slock, flags);
+               list_for_each_entry(vp, &ha->vp_list, list) {
+                       if (vp->vp_idx) {
+                               atomic_inc(&vp->vref_count);
+                               spin_unlock_irqrestore(&ha->vport_slock, flags);
+
                                qla2x00_vp_abort_isp(vp);
+
+                               spin_lock_irqsave(&ha->vport_slock, flags);
+                               atomic_dec(&vp->vref_count);
+                       }
                }
+               spin_unlock_irqrestore(&ha->vport_slock, flags);
+
        } else {
                qla_printk(KERN_INFO, ha,
                        "qla2x00_abort_isp: **** FAILED ****\n");
@@ -5185,7 +5242,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
        struct req_que *req = ha->req_q_map[0];
        struct rsp_que *rsp = ha->rsp_q_map[0];
        struct scsi_qla_host *vp;
-       struct scsi_qla_host *tvp;
+       unsigned long flags;
 
        status = qla2x00_init_rings(vha);
        if (!status) {
@@ -5272,10 +5329,21 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
                DEBUG(printk(KERN_INFO
                        "qla82xx_restart_isp(%ld): succeeded.\n",
                        vha->host_no));
-               list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
-                       if (vp->vp_idx)
+
+               spin_lock_irqsave(&ha->vport_slock, flags);
+               list_for_each_entry(vp, &ha->vp_list, list) {
+                       if (vp->vp_idx) {
+                               atomic_inc(&vp->vref_count);
+                               spin_unlock_irqrestore(&ha->vport_slock, flags);
+
                                qla2x00_vp_abort_isp(vp);
+
+                               spin_lock_irqsave(&ha->vport_slock, flags);
+                               atomic_dec(&vp->vref_count);
+                       }
                }
+               spin_unlock_irqrestore(&ha->vport_slock, flags);
+
        } else {
                qla_printk(KERN_INFO, ha,
                        "qla82xx_restart_isp: **** FAILED ****\n");
index 6982ba70e12af12235d8d6502c78f81c06e732aa..28f65be19dad9878f19981a18ebc60ef13bed356 100644 (file)
@@ -1706,19 +1706,20 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
                                cp->result = DID_ERROR << 16;
                                break;
                        }
-               } else if (!lscsi_status) {
+               } else {
                        DEBUG2(qla_printk(KERN_INFO, ha,
                            "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x "
                            "of 0x%x bytes).\n", vha->host_no, cp->device->id,
                            cp->device->lun, resid, scsi_bufflen(cp)));
 
-                       cp->result = DID_ERROR << 16;
-                       break;
+                       cp->result = DID_ERROR << 16 | lscsi_status;
+                       goto check_scsi_status;
                }
 
                cp->result = DID_OK << 16 | lscsi_status;
                logit = 0;
 
+check_scsi_status:
                /*
                 * Check to see if SCSI Status is non zero. If so report SCSI
                 * Status.
index 6009b0c69488144bf21715a2a8b03192fbac56ef..a595ec8264f8d7f73823a11d0616591a7957bd63 100644 (file)
@@ -2913,7 +2913,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
        uint16_t stat = le16_to_cpu(rptid_entry->vp_idx);
        struct qla_hw_data *ha = vha->hw;
        scsi_qla_host_t *vp;
-       scsi_qla_host_t *tvp;
+       unsigned long   flags;
 
        if (rptid_entry->entry_status != 0)
                return;
@@ -2945,9 +2945,12 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
                        return;
                }
 
-               list_for_each_entry_safe(vp, tvp, &ha->vp_list, list)
+               spin_lock_irqsave(&ha->vport_slock, flags);
+               list_for_each_entry(vp, &ha->vp_list, list)
                        if (vp_idx == vp->vp_idx)
                                break;
+               spin_unlock_irqrestore(&ha->vport_slock, flags);
+
                if (!vp)
                        return;
 
index 987c5b0ca78ea22d67d074ee7d170ef1f14b52a4..2b69392a71a1fac76b50ab389a0c3e1054dce59f 100644 (file)
@@ -30,6 +30,7 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
 {
        uint32_t vp_id;
        struct qla_hw_data *ha = vha->hw;
+       unsigned long flags;
 
        /* Find an empty slot and assign an vp_id */
        mutex_lock(&ha->vport_lock);
@@ -44,7 +45,11 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
        set_bit(vp_id, ha->vp_idx_map);
        ha->num_vhosts++;
        vha->vp_idx = vp_id;
+
+       spin_lock_irqsave(&ha->vport_slock, flags);
        list_add_tail(&vha->list, &ha->vp_list);
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
+
        mutex_unlock(&ha->vport_lock);
        return vp_id;
 }
@@ -54,12 +59,31 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
 {
        uint16_t vp_id;
        struct qla_hw_data *ha = vha->hw;
+       unsigned long flags = 0;
 
        mutex_lock(&ha->vport_lock);
+       /*
+        * Wait for all pending activities to finish before removing vport from
+        * the list.
+        * Lock needs to be held for safe removal from the list (it
+        * ensures no active vp_list traversal while the vport is removed
+        * from the queue)
+        */
+       spin_lock_irqsave(&ha->vport_slock, flags);
+       while (atomic_read(&vha->vref_count)) {
+               spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+               msleep(500);
+
+               spin_lock_irqsave(&ha->vport_slock, flags);
+       }
+       list_del(&vha->list);
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
+
        vp_id = vha->vp_idx;
        ha->num_vhosts--;
        clear_bit(vp_id, ha->vp_idx_map);
-       list_del(&vha->list);
+
        mutex_unlock(&ha->vport_lock);
 }
 
@@ -68,12 +92,17 @@ qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
 {
        scsi_qla_host_t *vha;
        struct scsi_qla_host *tvha;
+       unsigned long flags;
 
+       spin_lock_irqsave(&ha->vport_slock, flags);
        /* Locate matching device in database. */
        list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
-               if (!memcmp(port_name, vha->port_name, WWN_SIZE))
+               if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
+                       spin_unlock_irqrestore(&ha->vport_slock, flags);
                        return vha;
+               }
        }
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
        return NULL;
 }
 
@@ -93,6 +122,12 @@ qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
 static void
 qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
 {
+       /*
+        * !!! NOTE !!!
+        * This function, if called in contexts other than vp create, disable
+        * or delete, please make sure this is synchronized with the
+        * delete thread.
+        */
        fc_port_t *fcport;
 
        list_for_each_entry(fcport, &vha->vp_fcports, list) {
@@ -100,7 +135,6 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
                    "loop_id=0x%04x :%x\n",
                    vha->host_no, fcport->loop_id, fcport->vp_idx));
 
-               atomic_set(&fcport->state, FCS_DEVICE_DEAD);
                qla2x00_mark_device_lost(vha, fcport, 0, 0);
                atomic_set(&fcport->state, FCS_UNCONFIGURED);
        }
@@ -194,12 +228,17 @@ qla24xx_configure_vp(scsi_qla_host_t *vha)
 void
 qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
 {
-       scsi_qla_host_t *vha, *tvha;
+       scsi_qla_host_t *vha;
        struct qla_hw_data *ha = rsp->hw;
        int i = 0;
+       unsigned long flags;
 
-       list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
+       spin_lock_irqsave(&ha->vport_slock, flags);
+       list_for_each_entry(vha, &ha->vp_list, list) {
                if (vha->vp_idx) {
+                       atomic_inc(&vha->vref_count);
+                       spin_unlock_irqrestore(&ha->vport_slock, flags);
+
                        switch (mb[0]) {
                        case MBA_LIP_OCCURRED:
                        case MBA_LOOP_UP:
@@ -215,9 +254,13 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
                                qla2x00_async_event(vha, rsp, mb);
                                break;
                        }
+
+                       spin_lock_irqsave(&ha->vport_slock, flags);
+                       atomic_dec(&vha->vref_count);
                }
                i++;
        }
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
 }
 
 int
@@ -297,7 +340,7 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
        int ret;
        struct qla_hw_data *ha = vha->hw;
        scsi_qla_host_t *vp;
-       struct scsi_qla_host *tvp;
+       unsigned long flags = 0;
 
        if (vha->vp_idx)
                return;
@@ -309,10 +352,19 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
        if (!(ha->current_topology & ISP_CFG_F))
                return;
 
-       list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
-               if (vp->vp_idx)
+       spin_lock_irqsave(&ha->vport_slock, flags);
+       list_for_each_entry(vp, &ha->vp_list, list) {
+               if (vp->vp_idx) {
+                       atomic_inc(&vp->vref_count);
+                       spin_unlock_irqrestore(&ha->vport_slock, flags);
+
                        ret = qla2x00_do_dpc_vp(vp);
+
+                       spin_lock_irqsave(&ha->vport_slock, flags);
+                       atomic_dec(&vp->vref_count);
+               }
        }
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
 }
 
 int
index 915b77a6e19390026134ff697cceaa89e9dd8c99..0a71cc71eab23922e9c6aee451962f60a1e3cfb3 100644 (file)
@@ -2672,6 +2672,19 @@ qla82xx_start_scsi(srb_t *sp)
 sufficient_dsds:
                req_cnt = 1;
 
+               if (req->cnt < (req_cnt + 2)) {
+                       cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+                               &reg->req_q_out[0]);
+                       if (req->ring_index < cnt)
+                               req->cnt = cnt - req->ring_index;
+                       else
+                               req->cnt = req->length -
+                                       (req->ring_index - cnt);
+               }
+
+               if (req->cnt < (req_cnt + 2))
+                       goto queuing_error;
+
                ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
                if (!sp->ctx) {
                        DEBUG(printk(KERN_INFO
@@ -3307,16 +3320,19 @@ qla82xx_check_fw_alive(scsi_qla_host_t *vha)
                                set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
                        }
                        qla2xxx_wake_dpc(vha);
+                       ha->flags.fw_hung = 1;
                        if (ha->flags.mbox_busy) {
-                               ha->flags.fw_hung = 1;
                                ha->flags.mbox_int = 1;
                                DEBUG2(qla_printk(KERN_ERR, ha,
-                                   "Due to fw hung, doing premature "
-                                   "completion of mbx command\n"));
-                               complete(&ha->mbx_intr_comp);
+                                       "Due to fw hung, doing premature "
+                                       "completion of mbx command\n"));
+                               if (test_bit(MBX_INTR_WAIT,
+                                       &ha->mbx_cmd_flags))
+                                       complete(&ha->mbx_intr_comp);
                        }
                }
-       }
+       } else
+               vha->seconds_since_last_heartbeat = 0;
        vha->fw_heartbeat_counter = fw_heartbeat_counter;
 }
 
@@ -3418,13 +3434,15 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
                                "%s(): Adapter reset needed!\n", __func__);
                        set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
                        qla2xxx_wake_dpc(vha);
+                       ha->flags.fw_hung = 1;
                        if (ha->flags.mbox_busy) {
-                               ha->flags.fw_hung = 1;
                                ha->flags.mbox_int = 1;
                                DEBUG2(qla_printk(KERN_ERR, ha,
-                                   "Need reset, doing premature "
-                                   "completion of mbx command\n"));
-                               complete(&ha->mbx_intr_comp);
+                                       "Need reset, doing premature "
+                                       "completion of mbx command\n"));
+                               if (test_bit(MBX_INTR_WAIT,
+                                       &ha->mbx_cmd_flags))
+                                       complete(&ha->mbx_intr_comp);
                        }
                } else {
                        qla82xx_check_fw_alive(vha);
index 8c80b49ac1c44d875f7c033fbb1c8962723307cb..1e4bff695254b4fb6a4adbc1008ba4a999352088 100644 (file)
@@ -2341,16 +2341,28 @@ probe_out:
 static void
 qla2x00_remove_one(struct pci_dev *pdev)
 {
-       scsi_qla_host_t *base_vha, *vha, *temp;
+       scsi_qla_host_t *base_vha, *vha;
        struct qla_hw_data  *ha;
+       unsigned long flags;
 
        base_vha = pci_get_drvdata(pdev);
        ha = base_vha->hw;
 
-       list_for_each_entry_safe(vha, temp, &ha->vp_list, list) {
-               if (vha && vha->fc_vport)
+       spin_lock_irqsave(&ha->vport_slock, flags);
+       list_for_each_entry(vha, &ha->vp_list, list) {
+               atomic_inc(&vha->vref_count);
+
+               if (vha && vha->fc_vport) {
+                       spin_unlock_irqrestore(&ha->vport_slock, flags);
+
                        fc_vport_terminate(vha->fc_vport);
+
+                       spin_lock_irqsave(&ha->vport_slock, flags);
+               }
+
+               atomic_dec(&vha->vref_count);
        }
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
 
        set_bit(UNLOADING, &base_vha->dpc_flags);
 
@@ -2975,10 +2987,17 @@ static struct qla_work_evt *
 qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
 {
        struct qla_work_evt *e;
+       uint8_t bail;
+
+       QLA_VHA_MARK_BUSY(vha, bail);
+       if (bail)
+               return NULL;
 
        e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
-       if (!e)
+       if (!e) {
+               QLA_VHA_MARK_NOT_BUSY(vha);
                return NULL;
+       }
 
        INIT_LIST_HEAD(&e->list);
        e->type = type;
@@ -3135,6 +3154,9 @@ qla2x00_do_work(struct scsi_qla_host *vha)
                }
                if (e->flags & QLA_EVT_FLAG_FREE)
                        kfree(e);
+
+               /* For each work completed decrement vha ref count */
+               QLA_VHA_MARK_NOT_BUSY(vha);
        }
 }
 
index e75ccb91317dcd27b030efb04f59a3f5ea4fcf16..8edbccb3232d3b68cb16d0054a444e657a452975 100644 (file)
@@ -7,9 +7,9 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "8.03.03-k0"
+#define QLA2XXX_VERSION      "8.03.04-k0"
 
 #define QLA_DRIVER_MAJOR_VER   8
 #define QLA_DRIVER_MINOR_VER   3
-#define QLA_DRIVER_PATCH_VER   3
+#define QLA_DRIVER_PATCH_VER   4
 #define QLA_DRIVER_BETA_VER    0
index ad0ed212db4ad094441f7a5656639e989c980738..348fba0a8976467fa9724411699b918a9cd8b0ea 100644 (file)
@@ -1046,13 +1046,13 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
 
        /* If the user actually wanted this page, we can skip the rest */
        if (page == 0)
-               return -EINVAL;
+               return 0;
 
        for (i = 0; i < min((int)buf[3], buf_len - 4); i++)
                if (buf[i + 4] == page)
                        goto found;
 
-       if (i < buf[3] && i > buf_len)
+       if (i < buf[3] && i >= buf_len - 4)
                /* ran off the end of the buffer, give us benefit of doubt */
                goto found;
        /* The device claims it doesn't support the requested page */
index 9ade720422c685f01ab8f7fe3b625bfc054390d7..ee02d3838a0a41e09fbfff0096483ad3a24eb968 100644 (file)
@@ -1011,8 +1011,8 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
 
 err_exit:
        scsi_release_buffers(cmd);
-       scsi_put_command(cmd);
        cmd->request->special = NULL;
+       scsi_put_command(cmd);
        return error;
 }
 EXPORT_SYMBOL(scsi_init_io);
index 2714becc2eaf72fc4cb4452232b3fc67ab320586..ffa0689ee84050c13b8d347b9429e5bf64a88ee6 100644 (file)
@@ -870,7 +870,7 @@ static int sd_release(struct gendisk *disk, fmode_t mode)
 
        SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n"));
 
-       if (atomic_dec_return(&sdkp->openers) && sdev->removable) {
+       if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) {
                if (scsi_block_when_processing_errors(sdev))
                        scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
        }
@@ -2625,15 +2625,15 @@ module_exit(exit_sd);
 static void sd_print_sense_hdr(struct scsi_disk *sdkp,
                               struct scsi_sense_hdr *sshdr)
 {
-       sd_printk(KERN_INFO, sdkp, "");
+       sd_printk(KERN_INFO, sdkp, " ");
        scsi_show_sense_hdr(sshdr);
-       sd_printk(KERN_INFO, sdkp, "");
+       sd_printk(KERN_INFO, sdkp, " ");
        scsi_show_extd_sense(sshdr->asc, sshdr->ascq);
 }
 
 static void sd_print_result(struct scsi_disk *sdkp, int result)
 {
-       sd_printk(KERN_INFO, sdkp, "");
+       sd_printk(KERN_INFO, sdkp, " ");
        scsi_show_result(result);
 }
 
index a7bc8b7b09ac3f26f7a17f0a992d9806b1e4ce40..2c3e89ddf069fb66fad54c46912216c97daa1ff8 100644 (file)
@@ -72,10 +72,7 @@ static void sym_printl_hex(u_char *p, int n)
 
 static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg)
 {
-       if (label)
-               sym_print_addr(cp->cmd, "%s: ", label);
-       else
-               sym_print_addr(cp->cmd, "");
+       sym_print_addr(cp->cmd, "%s: ", label);
 
        spi_print_msg(msg);
        printf("\n");
@@ -4558,7 +4555,8 @@ static void sym_int_sir(struct sym_hcb *np)
                        switch (np->msgin [2]) {
                        case M_X_MODIFY_DP:
                                if (DEBUG_FLAGS & DEBUG_POINTER)
-                                       sym_print_msg(cp, NULL, np->msgin);
+                                       sym_print_msg(cp, "extended msg ",
+                                                     np->msgin);
                                tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) + 
                                      (np->msgin[5]<<8)  + (np->msgin[6]);
                                sym_modify_dp(np, tp, cp, tmp);
@@ -4585,7 +4583,7 @@ static void sym_int_sir(struct sym_hcb *np)
                 */
                case M_IGN_RESIDUE:
                        if (DEBUG_FLAGS & DEBUG_POINTER)
-                               sym_print_msg(cp, NULL, np->msgin);
+                               sym_print_msg(cp, "1 or 2 byte ", np->msgin);
                        if (cp->host_flags & HF_SENSE)
                                OUTL_DSP(np, SCRIPTA_BA(np, clrack));
                        else
index 50441ffe8e3856592dbfdae368e9891b64078d50..2904aa044126dbc49729caacbcb3d236da900bc6 100644 (file)
@@ -472,14 +472,9 @@ pl010_set_termios(struct uart_port *port, struct ktermios *termios,
        spin_unlock_irqrestore(&uap->port.lock, flags);
 }
 
-static void pl010_set_ldisc(struct uart_port *port)
+static void pl010_set_ldisc(struct uart_port *port, int new)
 {
-       int line = port->line;
-
-       if (line >= port->state->port.tty->driver->num)
-               return;
-
-       if (port->state->port.tty->ldisc->ops->num == N_PPS) {
+       if (new == N_PPS) {
                port->flags |= UPF_HARDPPS_CD;
                pl010_enable_ms(port);
        } else
index 93de907b12088a54ab5809cf79c46ba5ec2757f2..800c54602339780cc79d615e16fb89f49bf3d5f5 100644 (file)
@@ -2044,6 +2044,7 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd)
                if (!port) {
                        printk(KERN_WARNING
                               "IOC3 serial memory not available for port\n");
+                       ret = -ENOMEM;
                        goto out4;
                }
                spin_lock_init(&port->ip_lock);
index bc9af503907f4b24ac0845068701b2de1474a105..5dff45c76d32c5f026dc1ae408166a4f5e9ac6d2 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/init.h>
 #include <linux/console.h>
 #include <linux/sysrq.h>
+#include <linux/slab.h>
 #include <linux/serial_reg.h>
 #include <linux/circ_buf.h>
 #include <linux/delay.h>
@@ -1423,7 +1424,6 @@ static void hsu_global_init(void)
        }
 
        phsu = hsu;
-
        hsu_debugfs_init(hsu);
        return;
 
@@ -1435,18 +1435,20 @@ err_free_region:
 
 static void serial_hsu_remove(struct pci_dev *pdev)
 {
-       struct hsu_port *hsu;
-       int i;
+       void *priv = pci_get_drvdata(pdev);
+       struct uart_hsu_port *up;
 
-       hsu = pci_get_drvdata(pdev);
-       if (!hsu)
+       if (!priv)
                return;
 
-       for (i = 0; i < 3; i++)
-               uart_remove_one_port(&serial_hsu_reg, &hsu->port[i].port);
+       /* For port 0/1/2, priv is the address of uart_hsu_port */
+       if (pdev->device != 0x081E) {
+               up = priv;
+               uart_remove_one_port(&serial_hsu_reg, &up->port);
+       }
 
        pci_set_drvdata(pdev, NULL);
-       free_irq(hsu->irq, hsu);
+       free_irq(pdev->irq, priv);
        pci_disable_device(pdev);
 }
 
index 8dedb266f143f1cf801d84f34892a999b2dc9934..c4399e23565aca0c986fdecc56fc424dee2c1ee3 100644 (file)
@@ -500,6 +500,7 @@ static int __init mpc512x_psc_fifoc_init(void)
        psc_fifoc = of_iomap(np, 0);
        if (!psc_fifoc) {
                pr_err("%s: Can't map FIFOC\n", __func__);
+               of_node_put(np);
                return -ENODEV;
        }
 
index f6ad1ecbff79ec9688fbc024a2036f351efff26e..51c15f58e01ef84a8ce43ba8b62472f0e4d7bec0 100644 (file)
@@ -29,6 +29,7 @@
 
 #include <linux/module.h>
 #include <linux/ioport.h>
+#include <linux/irq.h>
 #include <linux/init.h>
 #include <linux/console.h>
 #include <linux/sysrq.h>
index 141c69554bd481d27863b852bbd79b364417643e..7d475b2a79e8bb7fee0cd8f1f048fc61d1b7db3c 100644 (file)
@@ -335,8 +335,6 @@ static int serial_probe(struct pcmcia_device *link)
        info->p_dev = link;
        link->priv = info;
 
-       link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
-       link->resource[0]->end = 8;
        link->conf.Attributes = CONF_ENABLE_IRQ;
        if (do_sound) {
                link->conf.Attributes |= CONF_ENABLE_SPKR;
@@ -411,6 +409,27 @@ static int setup_serial(struct pcmcia_device *handle, struct serial_info * info,
 
 /*====================================================================*/
 
+static int pfc_config(struct pcmcia_device *p_dev)
+{
+       unsigned int port = 0;
+       struct serial_info *info = p_dev->priv;
+
+       if ((p_dev->resource[1]->end != 0) &&
+               (resource_size(p_dev->resource[1]) == 8)) {
+               port = p_dev->resource[1]->start;
+               info->slave = 1;
+       } else if ((info->manfid == MANFID_OSITECH) &&
+               (resource_size(p_dev->resource[0]) == 0x40)) {
+               port = p_dev->resource[0]->start + 0x28;
+               info->slave = 1;
+       }
+       if (info->slave)
+               return setup_serial(p_dev, info, port, p_dev->irq);
+
+       dev_warn(&p_dev->dev, "no usable port range found, giving up\n");
+       return -ENODEV;
+}
+
 static int simple_config_check(struct pcmcia_device *p_dev,
                               cistpl_cftable_entry_t *cf,
                               cistpl_cftable_entry_t *dflt,
@@ -461,23 +480,8 @@ static int simple_config(struct pcmcia_device *link)
        struct serial_info *info = link->priv;
        int i = -ENODEV, try;
 
-       /* If the card is already configured, look up the port and irq */
-       if (link->function_config) {
-               unsigned int port = 0;
-               if ((link->resource[1]->end != 0) &&
-                       (resource_size(link->resource[1]) == 8)) {
-                       port = link->resource[1]->end;
-                       info->slave = 1;
-               } else if ((info->manfid == MANFID_OSITECH) &&
-                       (resource_size(link->resource[0]) == 0x40)) {
-                       port = link->resource[0]->start + 0x28;
-                       info->slave = 1;
-               }
-               if (info->slave) {
-                       return setup_serial(link, info, port,
-                                           link->irq);
-               }
-       }
+       link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+       link->resource[0]->end = 8;
 
        /* First pass: look for a config entry that looks normal.
         * Two tries: without IO aliases, then with aliases */
@@ -491,8 +495,7 @@ static int simple_config(struct pcmcia_device *link)
        if (!pcmcia_loop_config(link, simple_config_check_notpicky, NULL))
                goto found_port;
 
-       printk(KERN_NOTICE
-              "serial_cs: no usable port range found, giving up\n");
+       dev_warn(&link->dev, "no usable port range found, giving up\n");
        return -1;
 
 found_port:
@@ -558,6 +561,7 @@ static int multi_config(struct pcmcia_device *link)
        int i, base2 = 0;
 
        /* First, look for a generic full-sized window */
+       link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
        link->resource[0]->end = info->multi * 8;
        if (pcmcia_loop_config(link, multi_config_check, &base2)) {
                /* If that didn't work, look for two windows */
@@ -565,15 +569,14 @@ static int multi_config(struct pcmcia_device *link)
                info->multi = 2;
                if (pcmcia_loop_config(link, multi_config_check_notpicky,
                                       &base2)) {
-                       printk(KERN_NOTICE "serial_cs: no usable port range"
+                       dev_warn(&link->dev, "no usable port range "
                               "found, giving up\n");
                        return -ENODEV;
                }
        }
 
        if (!link->irq)
-               dev_warn(&link->dev,
-                       "serial_cs: no usable IRQ found, continuing...\n");
+               dev_warn(&link->dev, "no usable IRQ found, continuing...\n");
 
        /*
         * Apply any configuration quirks.
@@ -675,6 +678,7 @@ static int serial_config(struct pcmcia_device * link)
           multifunction cards that ask for appropriate IO port ranges */
        if ((info->multi == 0) &&
            (link->has_func_id) &&
+           (link->socket->pcmcia_pfc == 0) &&
            ((link->func_id == CISTPL_FUNCID_MULTI) ||
             (link->func_id == CISTPL_FUNCID_SERIAL)))
                pcmcia_loop_config(link, serial_check_for_multi, info);
@@ -685,7 +689,13 @@ static int serial_config(struct pcmcia_device * link)
        if (info->quirk && info->quirk->multi != -1)
                info->multi = info->quirk->multi;
 
-       if (info->multi > 1)
+       dev_info(&link->dev,
+               "trying to set up [0x%04x:0x%04x] (pfc: %d, multi: %d, quirk: %p)\n",
+               link->manf_id, link->card_id,
+               link->socket->pcmcia_pfc, info->multi, info->quirk);
+       if (link->socket->pcmcia_pfc)
+               i = pfc_config(link);
+       else if (info->multi > 1)
                i = multi_config(link);
        else
                i = simple_config(link);
@@ -704,7 +714,7 @@ static int serial_config(struct pcmcia_device * link)
        return 0;
 
 failed:
-       dev_warn(&link->dev, "serial_cs: failed to initialize\n");
+       dev_warn(&link->dev, "failed to initialize\n");
        serial_remove(link);
        return -ENODEV;
 }
index acd35d1ebd12621e35a05324daa450992b081f17..4c37c4e28647e68f62e1eaadcc92ac887857bf44 100644 (file)
@@ -503,8 +503,9 @@ static void giveback(struct pl022 *pl022)
        msg->state = NULL;
        if (msg->complete)
                msg->complete(msg->context);
-       /* This message is completed, so let's turn off the clock! */
+       /* This message is completed, so let's turn off the clocks! */
        clk_disable(pl022->clk);
+       amba_pclk_disable(pl022->adev);
 }
 
 /**
@@ -1139,9 +1140,10 @@ static void pump_messages(struct work_struct *work)
        /* Setup the SPI using the per chip configuration */
        pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi);
        /*
-        * We enable the clock here, then the clock will be disabled when
+        * We enable the clocks here, then the clocks will be disabled when
         * giveback() is called in each method (poll/interrupt/DMA)
         */
+       amba_pclk_enable(pl022->adev);
        clk_enable(pl022->clk);
        restore_state(pl022);
        flush(pl022);
@@ -1786,11 +1788,9 @@ pl022_probe(struct amba_device *adev, struct amba_id *id)
        }
 
        /* Disable SSP */
-       clk_enable(pl022->clk);
        writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
               SSP_CR1(pl022->virtbase));
        load_ssp_default_config(pl022);
-       clk_disable(pl022->clk);
 
        status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022",
                             pl022);
@@ -1818,6 +1818,8 @@ pl022_probe(struct amba_device *adev, struct amba_id *id)
                goto err_spi_register;
        }
        dev_dbg(dev, "probe succeded\n");
+       /* Disable the silicon block pclk and clock it when needed */
+       amba_pclk_disable(adev);
        return 0;
 
  err_spi_register:
@@ -1879,9 +1881,9 @@ static int pl022_suspend(struct amba_device *adev, pm_message_t state)
                return status;
        }
 
-       clk_enable(pl022->clk);
+       amba_pclk_enable(adev);
        load_ssp_default_config(pl022);
-       clk_disable(pl022->clk);
+       amba_pclk_disable(adev);
        dev_dbg(&adev->dev, "suspended\n");
        return 0;
 }
@@ -1981,7 +1983,7 @@ static int __init pl022_init(void)
        return amba_driver_register(&pl022_driver);
 }
 
-module_init(pl022_init);
+subsys_initcall(pl022_init);
 
 static void __exit pl022_exit(void)
 {
index d256cb00604c55db5cc2ce25233b8d8e189c69be..56247853c298aac0de06eae049429ed5581727c4 100644 (file)
@@ -181,10 +181,6 @@ static void flush(struct dw_spi *dws)
        wait_till_not_busy(dws);
 }
 
-static void null_cs_control(u32 command)
-{
-}
-
 static int null_writer(struct dw_spi *dws)
 {
        u8 n_bytes = dws->n_bytes;
@@ -322,7 +318,7 @@ static void giveback(struct dw_spi *dws)
                                        struct spi_transfer,
                                        transfer_list);
 
-       if (!last_transfer->cs_change)
+       if (!last_transfer->cs_change && dws->cs_control)
                dws->cs_control(MRST_SPI_DEASSERT);
 
        msg->state = NULL;
@@ -396,6 +392,11 @@ static irqreturn_t interrupt_transfer(struct dw_spi *dws)
 static irqreturn_t dw_spi_irq(int irq, void *dev_id)
 {
        struct dw_spi *dws = dev_id;
+       u16 irq_status, irq_mask = 0x3f;
+
+       irq_status = dw_readw(dws, isr) & irq_mask;
+       if (!irq_status)
+               return IRQ_NONE;
 
        if (!dws->cur_msg) {
                spi_mask_intr(dws, SPI_INT_TXEI);
@@ -544,13 +545,13 @@ static void pump_transfers(unsigned long data)
         */
        if (dws->cs_control) {
                if (dws->rx && dws->tx)
-                       chip->tmode = 0x00;
+                       chip->tmode = SPI_TMOD_TR;
                else if (dws->rx)
-                       chip->tmode = 0x02;
+                       chip->tmode = SPI_TMOD_RO;
                else
-                       chip->tmode = 0x01;
+                       chip->tmode = SPI_TMOD_TO;
 
-               cr0 &= ~(0x3 << SPI_MODE_OFFSET);
+               cr0 &= ~SPI_TMOD_MASK;
                cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
        }
 
@@ -699,9 +700,6 @@ static int dw_spi_setup(struct spi_device *spi)
                chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
                if (!chip)
                        return -ENOMEM;
-
-               chip->cs_control = null_cs_control;
-               chip->enable_dma = 0;
        }
 
        /*
@@ -883,7 +881,7 @@ int __devinit dw_spi_add_host(struct dw_spi *dws)
        dws->dma_inited = 0;
        dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);
 
-       ret = request_irq(dws->irq, dw_spi_irq, 0,
+       ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED,
                        "dw_spi", dws);
        if (ret < 0) {
                dev_err(&master->dev, "can not get IRQ\n");
index a9e5c79ae52a04a43aebabeacfc2d3cc29e03e77..b5a78a1f4421a0c19aa8735bd49f076fd8f91b46 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/init.h>
 #include <linux/cache.h>
 #include <linux/mutex.h>
+#include <linux/of_device.h>
 #include <linux/slab.h>
 #include <linux/mod_devicetable.h>
 #include <linux/spi/spi.h>
@@ -86,6 +87,10 @@ static int spi_match_device(struct device *dev, struct device_driver *drv)
        const struct spi_device *spi = to_spi_device(dev);
        const struct spi_driver *sdrv = to_spi_driver(drv);
 
+       /* Attempt an OF style match */
+       if (of_driver_match_device(dev, drv))
+               return 1;
+
        if (sdrv->id_table)
                return !!spi_match_id(sdrv->id_table, spi);
 
@@ -554,11 +559,9 @@ done:
 EXPORT_SYMBOL_GPL(spi_register_master);
 
 
-static int __unregister(struct device *dev, void *master_dev)
+static int __unregister(struct device *dev, void *null)
 {
-       /* note: before about 2.6.14-rc1 this would corrupt memory: */
-       if (dev != master_dev)
-               spi_unregister_device(to_spi_device(dev));
+       spi_unregister_device(to_spi_device(dev));
        return 0;
 }
 
@@ -576,8 +579,7 @@ void spi_unregister_master(struct spi_master *master)
 {
        int dummy;
 
-       dummy = device_for_each_child(master->dev.parent, &master->dev,
-                                       __unregister);
+       dummy = device_for_each_child(&master->dev, NULL, __unregister);
        device_unregister(&master->dev);
 }
 EXPORT_SYMBOL_GPL(spi_unregister_master);
index e24a63498acb84f7e4f9f83710aca3fadefcc177..63e51b011d508c8922bb047cfb94572049323493 100644 (file)
@@ -350,7 +350,7 @@ static int __init spi_gpio_probe(struct platform_device *pdev)
        spi_gpio->bitbang.master = spi_master_get(master);
        spi_gpio->bitbang.chipselect = spi_gpio_chipselect;
 
-       if ((master_flags & (SPI_MASTER_NO_RX | SPI_MASTER_NO_RX)) == 0) {
+       if ((master_flags & (SPI_MASTER_NO_TX | SPI_MASTER_NO_RX)) == 0) {
                spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0;
                spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1;
                spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2;
index d31b57f7baaf3c3bf970294afd13a1396cecf46c..1dd86b835cd86a800aa8db8257e4a37422a33b8a 100644 (file)
@@ -408,11 +408,17 @@ static void mpc8xxx_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
 
        xfer_ofs = mspi->xfer_in_progress->len - mspi->count;
 
-       out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs);
+       if (mspi->rx_dma == mspi->dma_dummy_rx)
+               out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma);
+       else
+               out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs);
        out_be16(&rx_bd->cbd_datlen, 0);
        out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP);
 
-       out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs);
+       if (mspi->tx_dma == mspi->dma_dummy_tx)
+               out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma);
+       else
+               out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs);
        out_be16(&tx_bd->cbd_datlen, xfer_len);
        out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP |
                                 BD_SC_LAST);
index 97365815a729a2d3c17d47d3815ffa38ff6c6526..c3038da2648aa4e621b0e6127b89c3017724cb55 100644 (file)
@@ -200,6 +200,9 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
                val = readl(regs + S3C64XX_SPI_STATUS);
        } while (TX_FIFO_LVL(val, sci) && loops--);
 
+       if (loops == 0)
+               dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
+
        /* Flush RxFIFO*/
        loops = msecs_to_loops(1);
        do {
@@ -210,6 +213,9 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
                        break;
        } while (loops--);
 
+       if (loops == 0)
+               dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n");
+
        val = readl(regs + S3C64XX_SPI_CH_CFG);
        val &= ~S3C64XX_SPI_CH_SW_RST;
        writel(val, regs + S3C64XX_SPI_CH_CFG);
@@ -320,16 +326,17 @@ static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
 
        /* millisecs to xfer 'len' bytes @ 'cur_speed' */
        ms = xfer->len * 8 * 1000 / sdd->cur_speed;
-       ms += 5; /* some tolerance */
+       ms += 10; /* some tolerance */
 
        if (dma_mode) {
                val = msecs_to_jiffies(ms) + 10;
                val = wait_for_completion_timeout(&sdd->xfer_completion, val);
        } else {
+               u32 status;
                val = msecs_to_loops(ms);
                do {
-                       val = readl(regs + S3C64XX_SPI_STATUS);
-               } while (RX_FIFO_LVL(val, sci) < xfer->len && --val);
+                       status = readl(regs + S3C64XX_SPI_STATUS);
+               } while (RX_FIFO_LVL(status, sci) < xfer->len && --val);
        }
 
        if (!val)
@@ -447,8 +454,8 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
        writel(val, regs + S3C64XX_SPI_CLK_CFG);
 }
 
-void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
-                               int size, enum s3c2410_dma_buffresult res)
+static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
+                                int size, enum s3c2410_dma_buffresult res)
 {
        struct s3c64xx_spi_driver_data *sdd = buf_id;
        unsigned long flags;
@@ -467,8 +474,8 @@ void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
        spin_unlock_irqrestore(&sdd->lock, flags);
 }
 
-void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id,
-                               int size, enum s3c2410_dma_buffresult res)
+static void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id,
+                                int size, enum s3c2410_dma_buffresult res)
 {
        struct s3c64xx_spi_driver_data *sdd = buf_id;
        unsigned long flags;
@@ -508,8 +515,9 @@ static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 
                if (xfer->tx_buf != NULL) {
-                       xfer->tx_dma = dma_map_single(dev, xfer->tx_buf,
-                                               xfer->len, DMA_TO_DEVICE);
+                       xfer->tx_dma = dma_map_single(dev,
+                                       (void *)xfer->tx_buf, xfer->len,
+                                       DMA_TO_DEVICE);
                        if (dma_mapping_error(dev, xfer->tx_dma)) {
                                dev_err(dev, "dma_map_single Tx failed\n");
                                xfer->tx_dma = XFER_DMAADDR_INVALID;
@@ -919,6 +927,13 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
+       sci = pdev->dev.platform_data;
+       if (!sci->src_clk_name) {
+               dev_err(&pdev->dev,
+                       "Board init must call s3c64xx_spi_set_info()\n");
+               return -EINVAL;
+       }
+
        /* Check for availability of necessary resource */
 
        dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
@@ -946,8 +961,6 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
 
-       sci = pdev->dev.platform_data;
-
        platform_set_drvdata(pdev, master);
 
        sdd = spi_master_get_devdata(master);
@@ -1170,7 +1183,7 @@ static int __init s3c64xx_spi_init(void)
 {
        return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe);
 }
-module_init(s3c64xx_spi_init);
+subsys_initcall(s3c64xx_spi_init);
 
 static void __exit s3c64xx_spi_exit(void)
 {
index baa8b05b9e8d70a49ba2dcb63e5e8c3cea7ed341..6e973a79aa25b6fec509639687fc4e196a568397 100644 (file)
@@ -30,7 +30,6 @@
 #include "hash.h"
 
 #include <linux/if_arp.h>
-#include <linux/netfilter_bridge.h>
 
 #define MIN(x, y) ((x) < (y) ? (x) : (y))
 
@@ -431,11 +430,6 @@ out:
        return NOTIFY_DONE;
 }
 
-static int batman_skb_recv_finish(struct sk_buff *skb)
-{
-       return NF_ACCEPT;
-}
-
 /* receive a packet with the batman ethertype coming on a hard
  * interface */
 int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
@@ -456,13 +450,6 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
        if (atomic_read(&module_state) != MODULE_ACTIVE)
                goto err_free;
 
-       /* if netfilter/ebtables wants to block incoming batman
-        * packets then give them a chance to do so here */
-       ret = NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, dev, NULL,
-                     batman_skb_recv_finish);
-       if (ret != 1)
-               goto err_out;
-
        /* packet should hold at least type and version */
        if (unlikely(skb_headlen(skb) < 2))
                goto err_free;
index 055edee7b4e401be0f9142d222fab9a52631c150..da3c82e47bbd6365788b827f9a1ee2e4a48be226 100644 (file)
@@ -29,7 +29,6 @@
 #include "vis.h"
 #include "aggregation.h"
 
-#include <linux/netfilter_bridge.h>
 
 static void send_outstanding_bcast_packet(struct work_struct *work);
 
@@ -92,12 +91,9 @@ int send_skb_packet(struct sk_buff *skb,
 
        /* dev_queue_xmit() returns a negative result on error.  However on
         * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
-        * (which is > 0). This will not be treated as an error.
-        * Also, if netfilter/ebtables wants to block outgoing batman
-        * packets then giving them a chance to do so here */
+        * (which is > 0). This will not be treated as an error. */
 
-       return NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
-                      dev_queue_xmit);
+       return dev_queue_xmit(skb);
 send_skb_err:
        kfree_skb(skb);
        return NET_XMIT_DROP;
index 9952579425b99a477c29e488243337e9372b2200..1b3060eb2921c9da66fb2f0844de4a2d1afa4ce0 100644 (file)
@@ -80,5 +80,4 @@ struct st_proto_s {
 extern long st_register(struct st_proto_s *);
 extern long st_unregister(enum proto_type);
 
-extern struct platform_device *st_get_plat_device(void);
 #endif /* ST_H */
index 063c9b1db1ab655504f5908e7c4598418dafd0fd..b85d8bfdf600ad22cd4d62636b521bfb3ac38fa5 100644 (file)
@@ -38,7 +38,6 @@
 #include "st_ll.h"
 #include "st.h"
 
-#define VERBOSE
 /* strings to be used for rfkill entries and by
  * ST Core to be used for sysfs debug entry
  */
@@ -581,7 +580,7 @@ long st_register(struct st_proto_s *new_proto)
        long err = 0;
        unsigned long flags = 0;
 
-       st_kim_ref(&st_gdata);
+       st_kim_ref(&st_gdata, 0);
        pr_info("%s(%d) ", __func__, new_proto->type);
        if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL
            || new_proto->reg_complete_cb == NULL) {
@@ -713,7 +712,7 @@ long st_unregister(enum proto_type type)
 
        pr_debug("%s: %d ", __func__, type);
 
-       st_kim_ref(&st_gdata);
+       st_kim_ref(&st_gdata, 0);
        if (type < ST_BT || type >= ST_MAX) {
                pr_err(" protocol %d not supported", type);
                return -EPROTONOSUPPORT;
@@ -767,7 +766,7 @@ long st_write(struct sk_buff *skb)
 #endif
        long len;
 
-       st_kim_ref(&st_gdata);
+       st_kim_ref(&st_gdata, 0);
        if (unlikely(skb == NULL || st_gdata == NULL
                || st_gdata->tty == NULL)) {
                pr_err("data/tty unavailable to perform write");
@@ -818,7 +817,7 @@ static int st_tty_open(struct tty_struct *tty)
        struct st_data_s *st_gdata;
        pr_info("%s ", __func__);
 
-       st_kim_ref(&st_gdata);
+       st_kim_ref(&st_gdata, 0);
        st_gdata->tty = tty;
        tty->disc_data = st_gdata;
 
index e0c32d149f5f294878d22597bec9c1bfc05660a4..8601320a679ee8ce5dda5ea9d36f8c85798dd23e 100644 (file)
@@ -117,7 +117,7 @@ int st_core_init(struct st_data_s **);
 void st_core_exit(struct st_data_s *);
 
 /* ask for reference from KIM */
-void st_kim_ref(struct st_data_s **);
+void st_kim_ref(struct st_data_s **, int);
 
 #define GPS_STUB_TEST
 #ifdef GPS_STUB_TEST
index b4a6c7fdc4e6ca59283dd3e64736c0efdfd97407..9e99463f76e8a0d048f7ec8667a455125d88cb0b 100644 (file)
@@ -72,10 +72,25 @@ const unsigned char *protocol_names[] = {
        PROTO_ENTRY(ST_GPS, "GPS"),
 };
 
+#define MAX_ST_DEVICES 3       /* Imagine 1 on each UART for now */
+struct platform_device *st_kim_devices[MAX_ST_DEVICES];
 
 /**********************************************************************/
 /* internal functions */
 
+/**
+ * st_get_plat_device -
+ *     function which returns the reference to the platform device
+ *     requested by id. As of now only 1 such device exists (id=0)
+ *     the context requesting for reference can get the id to be
+ *     requested by a. The protocol driver which is registering or
+ *     b. the tty device which is opened.
+ */
+static struct platform_device *st_get_plat_device(int id)
+{
+       return st_kim_devices[id];
+}
+
 /**
  * validate_firmware_response -
  *     function to return whether the firmware response was proper
@@ -353,7 +368,7 @@ void st_kim_chip_toggle(enum proto_type type, enum kim_gpio_state state)
        struct kim_data_s       *kim_gdata;
        pr_info(" %s ", __func__);
 
-       kim_pdev = st_get_plat_device();
+       kim_pdev = st_get_plat_device(0);
        kim_gdata = dev_get_drvdata(&kim_pdev->dev);
 
        if (kim_gdata->gpios[type] == -1) {
@@ -574,12 +589,12 @@ static int kim_toggle_radio(void *data, bool blocked)
  *     This would enable multiple such platform devices to exist
  *     on a given platform
  */
-void st_kim_ref(struct st_data_s **core_data)
+void st_kim_ref(struct st_data_s **core_data, int id)
 {
        struct platform_device  *pdev;
        struct kim_data_s       *kim_gdata;
        /* get kim_gdata reference from platform device */
-       pdev = st_get_plat_device();
+       pdev = st_get_plat_device(id);
        kim_gdata = dev_get_drvdata(&pdev->dev);
        *core_data = kim_gdata->core_data;
 }
@@ -623,6 +638,7 @@ static int kim_probe(struct platform_device *pdev)
        long *gpios = pdev->dev.platform_data;
        struct kim_data_s       *kim_gdata;
 
+       st_kim_devices[pdev->id] = pdev;
        kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC);
        if (!kim_gdata) {
                pr_err("no mem to allocate");
index c725356cc3466ec5963290415765aab5fb50188a..de7ebb99d8f6cc61ff600b2d9580627f1a12e78d 100644 (file)
@@ -1,6 +1,6 @@
 config VIDEO_TM6000
        tristate "TV Master TM5600/6000/6010 driver"
-       depends on VIDEO_DEV && I2C && INPUT && USB && EXPERIMENTAL
+       depends on VIDEO_DEV && I2C && INPUT && IR_CORE && USB && EXPERIMENTAL
        select VIDEO_TUNER
        select MEDIA_TUNER_XC2028
        select MEDIA_TUNER_XC5000
index 32f7a0af6938094380e9de73b933f652f0a00612..54f7667cc7062b640f2fb97717f7c7c26768f2cf 100644 (file)
@@ -46,7 +46,7 @@ MODULE_PARM_DESC(enable_ir, "enable ir (default is enable");
        }
 
 struct tm6000_ir_poll_result {
-       u8 rc_data[4];
+       u16 rc_data;
 };
 
 struct tm6000_IR {
@@ -60,9 +60,9 @@ struct tm6000_IR {
        int                     polling;
        struct delayed_work     work;
        u8                      wait:1;
+       u8                      key:1;
        struct urb              *int_urb;
        u8                      *urb_data;
-       u8                      key:1;
 
        int (*get_key) (struct tm6000_IR *, struct tm6000_ir_poll_result *);
 
@@ -122,13 +122,14 @@ static void tm6000_ir_urb_received(struct urb *urb)
 
        if (urb->status != 0)
                printk(KERN_INFO "not ready\n");
-       else if (urb->actual_length > 0)
+       else if (urb->actual_length > 0) {
                memcpy(ir->urb_data, urb->transfer_buffer, urb->actual_length);
 
-       dprintk("data %02x %02x %02x %02x\n", ir->urb_data[0],
-       ir->urb_data[1], ir->urb_data[2], ir->urb_data[3]);
+               dprintk("data %02x %02x %02x %02x\n", ir->urb_data[0],
+                       ir->urb_data[1], ir->urb_data[2], ir->urb_data[3]);
 
-       ir->key = 1;
+               ir->key = 1;
+       }
 
        rc = usb_submit_urb(urb, GFP_ATOMIC);
 }
@@ -140,30 +141,47 @@ static int default_polling_getkey(struct tm6000_IR *ir,
        int rc;
        u8 buf[2];
 
-       if (ir->wait && !&dev->int_in) {
-               poll_result->rc_data[0] = 0xff;
+       if (ir->wait && !&dev->int_in)
                return 0;
-       }
 
        if (&dev->int_in) {
-               poll_result->rc_data[0] = ir->urb_data[0];
-               poll_result->rc_data[1] = ir->urb_data[1];
+               if (ir->ir.ir_type == IR_TYPE_RC5)
+                       poll_result->rc_data = ir->urb_data[0];
+               else
+                       poll_result->rc_data = ir->urb_data[0] | ir->urb_data[1] << 8;
        } else {
                tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 2, 0);
                msleep(10);
                tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 2, 1);
                msleep(10);
 
-               rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR |
-                USB_RECIP_DEVICE, REQ_02_GET_IR_CODE, 0, 0, buf, 1);
+               if (ir->ir.ir_type == IR_TYPE_RC5) {
+                       rc = tm6000_read_write_usb(dev, USB_DIR_IN |
+                               USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+                               REQ_02_GET_IR_CODE, 0, 0, buf, 1);
 
-               msleep(10);
+                       msleep(10);
 
-               dprintk("read data=%02x\n", buf[0]);
-               if (rc < 0)
-                       return rc;
+                       dprintk("read data=%02x\n", buf[0]);
+                       if (rc < 0)
+                               return rc;
 
-               poll_result->rc_data[0] = buf[0];
+                       poll_result->rc_data = buf[0];
+               } else {
+                       rc = tm6000_read_write_usb(dev, USB_DIR_IN |
+                               USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+                               REQ_02_GET_IR_CODE, 0, 0, buf, 2);
+
+                       msleep(10);
+
+                       dprintk("read data=%04x\n", buf[0] | buf[1] << 8);
+                       if (rc < 0)
+                               return rc;
+
+                       poll_result->rc_data = buf[0] | buf[1] << 8;
+               }
+               if ((poll_result->rc_data & 0x00ff) != 0xff)
+                       ir->key = 1;
        }
        return 0;
 }
@@ -180,12 +198,11 @@ static void tm6000_ir_handle_key(struct tm6000_IR *ir)
                return;
        }
 
-       dprintk("ir->get_key result data=%02x %02x\n",
-               poll_result.rc_data[0], poll_result.rc_data[1]);
+       dprintk("ir->get_key result data=%04x\n", poll_result.rc_data);
 
-       if (poll_result.rc_data[0] != 0xff && ir->key == 1) {
+       if (ir->key) {
                ir_input_keydown(ir->input->input_dev, &ir->ir,
-                       poll_result.rc_data[0] | poll_result.rc_data[1] << 8);
+                               (u32)poll_result.rc_data);
 
                ir_input_nokey(ir->input->input_dev, &ir->ir);
                ir->key = 0;
index 0142338bcafe7fc0440edbf3c65a12d7f9b0df85..4bdb8362de827cdb5c820e2bcb3ca65f9a9226e7 100644 (file)
@@ -766,9 +766,14 @@ static int wpa_set_associate(PSDevice pDevice,
     DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len);
 
 
-       if (param->u.wpa_associate.wpa_ie &&
-           copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len))
-           return -EINVAL;
+       if (param->u.wpa_associate.wpa_ie_len) {
+               if (!param->u.wpa_associate.wpa_ie)
+                       return -EINVAL;
+               if (param->u.wpa_associate.wpa_ie_len > sizeof(abyWPAIE))
+                       return -EINVAL;
+               if (copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len))
+                       return -EFAULT;
+       }
 
        if (param->u.wpa_associate.mode == 1)
            pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
index 7e594449600e004c7f6ba14fa2dce39ebee25d20..9eed5b52d9de22647f6ca7e94791cd97f6e60122 100644 (file)
@@ -91,12 +91,12 @@ config USB_DYNAMIC_MINORS
          If you are unsure about this, say N here.
 
 config USB_SUSPEND
-       bool "USB runtime power management (suspend/resume and wakeup)"
+       bool "USB runtime power management (autosuspend) and wakeup"
        depends on USB && PM_RUNTIME
        help
          If you say Y here, you can use driver calls or the sysfs
-         "power/level" file to suspend or resume individual USB
-         peripherals and to enable or disable autosuspend (see
+         "power/control" file to enable or disable autosuspend for
+         individual USB peripherals (see
          Documentation/usb/power-management.txt for more details).
 
          Also, USB "remote wakeup" signaling is supported, whereby some
index f06f5dbc8cdc22fbedfa463c05b18672cbb99516..1e6ccef2cf0cbd9e817e7045dccfb03918efa9d7 100644 (file)
@@ -159,9 +159,9 @@ void usb_major_cleanup(void)
 int usb_register_dev(struct usb_interface *intf,
                     struct usb_class_driver *class_driver)
 {
-       int retval = -EINVAL;
+       int retval;
        int minor_base = class_driver->minor_base;
-       int minor = 0;
+       int minor;
        char name[20];
        char *temp;
 
@@ -173,12 +173,17 @@ int usb_register_dev(struct usb_interface *intf,
         */
        minor_base = 0;
 #endif
-       intf->minor = -1;
-
-       dbg ("looking for a minor, starting at %d", minor_base);
 
        if (class_driver->fops == NULL)
-               goto exit;
+               return -EINVAL;
+       if (intf->minor >= 0)
+               return -EADDRINUSE;
+
+       retval = init_usb_class();
+       if (retval)
+               return retval;
+
+       dev_dbg(&intf->dev, "looking for a minor, starting at %d", minor_base);
 
        down_write(&minor_rwsem);
        for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) {
@@ -186,20 +191,12 @@ int usb_register_dev(struct usb_interface *intf,
                        continue;
 
                usb_minors[minor] = class_driver->fops;
-
-               retval = 0;
+               intf->minor = minor;
                break;
        }
        up_write(&minor_rwsem);
-
-       if (retval)
-               goto exit;
-
-       retval = init_usb_class();
-       if (retval)
-               goto exit;
-
-       intf->minor = minor;
+       if (intf->minor < 0)
+               return -EXFULL;
 
        /* create a usb class device for this usb interface */
        snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
@@ -213,11 +210,11 @@ int usb_register_dev(struct usb_interface *intf,
                                      "%s", temp);
        if (IS_ERR(intf->usb_dev)) {
                down_write(&minor_rwsem);
-               usb_minors[intf->minor] = NULL;
+               usb_minors[minor] = NULL;
+               intf->minor = -1;
                up_write(&minor_rwsem);
                retval = PTR_ERR(intf->usb_dev);
        }
-exit:
        return retval;
 }
 EXPORT_SYMBOL_GPL(usb_register_dev);
index 844683e503830485147910ff16ca035a90194d27..9f0ce7de0e366fb0066dfb92d6a6403aa5f4a2b3 100644 (file)
@@ -1802,6 +1802,7 @@ free_interfaces:
                intf->dev.groups = usb_interface_groups;
                intf->dev.dma_mask = dev->dev.dma_mask;
                INIT_WORK(&intf->reset_ws, __usb_queue_reset_device);
+               intf->minor = -1;
                device_initialize(&intf->dev);
                dev_set_name(&intf->dev, "%d-%s:%d.%d",
                        dev->bus->busnum, dev->devpath,
index 58b72d741d9313b1f393a033cd0ed4cb89ad32ac..a1e8d273103f77b2d237a5f68438b289b51bb92a 100644 (file)
@@ -119,6 +119,11 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
                        ehci->broken_periodic = 1;
                        ehci_info(ehci, "using broken periodic workaround\n");
                }
+               if (pdev->device == 0x0806 || pdev->device == 0x0811
+                               || pdev->device == 0x0829) {
+                       ehci_info(ehci, "disable lpm for langwell/penwell\n");
+                       ehci->has_lpm = 0;
+               }
                break;
        case PCI_VENDOR_ID_TDI:
                if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
index 59dc3d351b60269f3c3872b859c0b39d7fcbe449..5ab5bb89bae3558cfe6f296e37931680afa0e323 100644 (file)
@@ -322,6 +322,7 @@ cppi_channel_allocate(struct dma_controller *c,
                                index, transmit ? 'T' : 'R', cppi_ch);
        cppi_ch->hw_ep = ep;
        cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
+       cppi_ch->channel.max_len = 0x7fffffff;
 
        DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R');
        return &cppi_ch->channel;
index c79a5e30d43735bb285300152ade427fd81dac6a..9e8639d4e862b65a2b23bc27a29fd09ba0b60ec1 100644 (file)
@@ -195,15 +195,14 @@ static const struct file_operations musb_regdump_fops = {
 
 static int musb_test_mode_open(struct inode *inode, struct file *file)
 {
-       file->private_data = inode->i_private;
-
        return single_open(file, musb_test_mode_show, inode->i_private);
 }
 
 static ssize_t musb_test_mode_write(struct file *file,
                const char __user *ubuf, size_t count, loff_t *ppos)
 {
-       struct musb             *musb = file->private_data;
+       struct seq_file         *s = file->private_data;
+       struct musb             *musb = s->private;
        u8                      test = 0;
        char                    buf[18];
 
index 6fca870e957ed3b5061f19deec76a9472d6d5211..d065e23f123ee755f9e6fe0e661823f48d917727 100644 (file)
@@ -300,6 +300,11 @@ static void txstate(struct musb *musb, struct musb_request *req)
 #ifndef        CONFIG_MUSB_PIO_ONLY
        if (is_dma_capable() && musb_ep->dma) {
                struct dma_controller   *c = musb->dma_controller;
+               size_t request_size;
+
+               /* setup DMA, then program endpoint CSR */
+               request_size = min_t(size_t, request->length - request->actual,
+                                       musb_ep->dma->max_len);
 
                use_dma = (request->dma != DMA_ADDR_INVALID);
 
@@ -307,11 +312,6 @@ static void txstate(struct musb *musb, struct musb_request *req)
 
 #ifdef CONFIG_USB_INVENTRA_DMA
                {
-                       size_t request_size;
-
-                       /* setup DMA, then program endpoint CSR */
-                       request_size = min_t(size_t, request->length,
-                                               musb_ep->dma->max_len);
                        if (request_size < musb_ep->packet_sz)
                                musb_ep->dma->desired_mode = 0;
                        else
@@ -373,8 +373,8 @@ static void txstate(struct musb *musb, struct musb_request *req)
                use_dma = use_dma && c->channel_program(
                                musb_ep->dma, musb_ep->packet_sz,
                                0,
-                               request->dma,
-                               request->length);
+                               request->dma + request->actual,
+                               request_size);
                if (!use_dma) {
                        c->channel_release(musb_ep->dma);
                        musb_ep->dma = NULL;
@@ -386,8 +386,8 @@ static void txstate(struct musb *musb, struct musb_request *req)
                use_dma = use_dma && c->channel_program(
                                musb_ep->dma, musb_ep->packet_sz,
                                request->zero,
-                               request->dma,
-                               request->length);
+                               request->dma + request->actual,
+                               request_size);
 #endif
        }
 #endif
@@ -501,26 +501,14 @@ void musb_g_tx(struct musb *musb, u8 epnum)
                                request->zero = 0;
                        }
 
-                       /* ... or if not, then complete it. */
-                       musb_g_giveback(musb_ep, request, 0);
-
-                       /*
-                        * Kickstart next transfer if appropriate;
-                        * the packet that just completed might not
-                        * be transmitted for hours or days.
-                        * REVISIT for double buffering...
-                        * FIXME revisit for stalls too...
-                        */
-                       musb_ep_select(mbase, epnum);
-                       csr = musb_readw(epio, MUSB_TXCSR);
-                       if (csr & MUSB_TXCSR_FIFONOTEMPTY)
-                               return;
-
-                       request = musb_ep->desc ? next_request(musb_ep) : NULL;
-                       if (!request) {
-                               DBG(4, "%s idle now\n",
-                                       musb_ep->end_point.name);
-                               return;
+                       if (request->actual == request->length) {
+                               musb_g_giveback(musb_ep, request, 0);
+                               request = musb_ep->desc ? next_request(musb_ep) : NULL;
+                               if (!request) {
+                                       DBG(4, "%s idle now\n",
+                                               musb_ep->end_point.name);
+                                       return;
+                               }
                        }
                }
 
@@ -568,11 +556,19 @@ static void rxstate(struct musb *musb, struct musb_request *req)
 {
        const u8                epnum = req->epnum;
        struct usb_request      *request = &req->request;
-       struct musb_ep          *musb_ep = &musb->endpoints[epnum].ep_out;
+       struct musb_ep          *musb_ep;
        void __iomem            *epio = musb->endpoints[epnum].regs;
        unsigned                fifo_count = 0;
-       u16                     len = musb_ep->packet_sz;
+       u16                     len;
        u16                     csr = musb_readw(epio, MUSB_RXCSR);
+       struct musb_hw_ep       *hw_ep = &musb->endpoints[epnum];
+
+       if (hw_ep->is_shared_fifo)
+               musb_ep = &hw_ep->ep_in;
+       else
+               musb_ep = &hw_ep->ep_out;
+
+       len = musb_ep->packet_sz;
 
        /* We shouldn't get here while DMA is active, but we do... */
        if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
@@ -647,8 +643,8 @@ static void rxstate(struct musb *musb, struct musb_request *req)
         */
 
                                csr |= MUSB_RXCSR_DMAENAB;
-#ifdef USE_MODE1
                                csr |= MUSB_RXCSR_AUTOCLEAR;
+#ifdef USE_MODE1
                                /* csr |= MUSB_RXCSR_DMAMODE; */
 
                                /* this special sequence (enabling and then
@@ -663,10 +659,11 @@ static void rxstate(struct musb *musb, struct musb_request *req)
                                if (request->actual < request->length) {
                                        int transfer_size = 0;
 #ifdef USE_MODE1
-                                       transfer_size = min(request->length,
+                                       transfer_size = min(request->length - request->actual,
                                                        channel->max_len);
 #else
-                                       transfer_size = len;
+                                       transfer_size = min(request->length - request->actual,
+                                                       (unsigned)len);
 #endif
                                        if (transfer_size <= musb_ep->packet_sz)
                                                musb_ep->dma->desired_mode = 0;
@@ -740,9 +737,15 @@ void musb_g_rx(struct musb *musb, u8 epnum)
        u16                     csr;
        struct usb_request      *request;
        void __iomem            *mbase = musb->mregs;
-       struct musb_ep          *musb_ep = &musb->endpoints[epnum].ep_out;
+       struct musb_ep          *musb_ep;
        void __iomem            *epio = musb->endpoints[epnum].regs;
        struct dma_channel      *dma;
+       struct musb_hw_ep       *hw_ep = &musb->endpoints[epnum];
+
+       if (hw_ep->is_shared_fifo)
+               musb_ep = &hw_ep->ep_in;
+       else
+               musb_ep = &hw_ep->ep_out;
 
        musb_ep_select(mbase, epnum);
 
@@ -1081,7 +1084,7 @@ struct free_record {
 /*
  * Context: controller locked, IRQs blocked.
  */
-static void musb_ep_restart(struct musb *musb, struct musb_request *req)
+void musb_ep_restart(struct musb *musb, struct musb_request *req)
 {
        DBG(3, "<== %s request %p len %u on hw_ep%d\n",
                req->tx ? "TX/IN" : "RX/OUT",
index c8b140325d82bf4bb6cd3ad85216fa24ec308ce7..572b1da7f2dc45ea1fd3d9bb67d3ea273d56cfae 100644 (file)
@@ -105,4 +105,6 @@ extern void musb_gadget_cleanup(struct musb *);
 
 extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int);
 
+extern void musb_ep_restart(struct musb *, struct musb_request *);
+
 #endif         /* __MUSB_GADGET_H */
index 59bef8f3a3585100310bbb43848ea56603a82c28..6dd03f4c5f4956983c2d1bf2552c248f6a1702a0 100644 (file)
@@ -261,6 +261,7 @@ __acquires(musb->lock)
                                        ctrlrequest->wIndex & 0x0f;
                                struct musb_ep          *musb_ep;
                                struct musb_hw_ep       *ep;
+                               struct musb_request     *request;
                                void __iomem            *regs;
                                int                     is_in;
                                u16                     csr;
@@ -302,6 +303,14 @@ __acquires(musb->lock)
                                        musb_writew(regs, MUSB_RXCSR, csr);
                                }
 
+                               /* Maybe start the first request in the queue */
+                               request = to_musb_request(
+                                               next_request(musb_ep));
+                               if (!musb_ep->busy && request) {
+                                       DBG(3, "restarting the request\n");
+                                       musb_ep_restart(musb, request);
+                               }
+
                                /* select ep0 again */
                                musb_ep_select(mbase, 0);
                                } break;
index 877d20b1dff973fd6975f258bb0b18267df1f5f5..9e65c47cc98b95761daba7186dd180550b954def 100644 (file)
@@ -660,6 +660,12 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
 
        qh->segsize = length;
 
+       /*
+        * Ensure the data reaches to main memory before starting
+        * DMA transfer
+        */
+       wmb();
+
        if (!dma->channel_program(channel, pkt_size, mode,
                        urb->transfer_dma + offset, length)) {
                dma->channel_release(channel);
index 05aaac1c3861e5be2f8d30c4311e7faa5a2c40a8..0bc97698af157d2cc25e4309a0da34ac2ad4e317 100644 (file)
@@ -347,11 +347,20 @@ static void twl4030_i2c_access(struct twl4030_usb *twl, int on)
        }
 }
 
-static void twl4030_phy_power(struct twl4030_usb *twl, int on)
+static void __twl4030_phy_power(struct twl4030_usb *twl, int on)
 {
-       u8 pwr;
+       u8 pwr = twl4030_usb_read(twl, PHY_PWR_CTRL);
+
+       if (on)
+               pwr &= ~PHY_PWR_PHYPWD;
+       else
+               pwr |= PHY_PWR_PHYPWD;
 
-       pwr = twl4030_usb_read(twl, PHY_PWR_CTRL);
+       WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
+}
+
+static void twl4030_phy_power(struct twl4030_usb *twl, int on)
+{
        if (on) {
                regulator_enable(twl->usb3v1);
                regulator_enable(twl->usb1v8);
@@ -365,15 +374,13 @@ static void twl4030_phy_power(struct twl4030_usb *twl, int on)
                twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0,
                                                        VUSB_DEDICATED2);
                regulator_enable(twl->usb1v5);
-               pwr &= ~PHY_PWR_PHYPWD;
-               WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
+               __twl4030_phy_power(twl, 1);
                twl4030_usb_write(twl, PHY_CLK_CTRL,
                                  twl4030_usb_read(twl, PHY_CLK_CTRL) |
                                        (PHY_CLK_CTRL_CLOCKGATING_EN |
                                                PHY_CLK_CTRL_CLK32K_EN));
-       } else  {
-               pwr |= PHY_PWR_PHYPWD;
-               WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
+       } else {
+               __twl4030_phy_power(twl, 0);
                regulator_disable(twl->usb1v5);
                regulator_disable(twl->usb1v8);
                regulator_disable(twl->usb3v1);
@@ -387,19 +394,25 @@ static void twl4030_phy_suspend(struct twl4030_usb *twl, int controller_off)
 
        twl4030_phy_power(twl, 0);
        twl->asleep = 1;
+       dev_dbg(twl->dev, "%s\n", __func__);
 }
 
-static void twl4030_phy_resume(struct twl4030_usb *twl)
+static void __twl4030_phy_resume(struct twl4030_usb *twl)
 {
-       if (!twl->asleep)
-               return;
-
        twl4030_phy_power(twl, 1);
        twl4030_i2c_access(twl, 1);
        twl4030_usb_set_mode(twl, twl->usb_mode);
        if (twl->usb_mode == T2_USB_MODE_ULPI)
                twl4030_i2c_access(twl, 0);
+}
+
+static void twl4030_phy_resume(struct twl4030_usb *twl)
+{
+       if (!twl->asleep)
+               return;
+       __twl4030_phy_resume(twl);
        twl->asleep = 0;
+       dev_dbg(twl->dev, "%s\n", __func__);
 }
 
 static int twl4030_usb_ldo_init(struct twl4030_usb *twl)
@@ -408,8 +421,8 @@ static int twl4030_usb_ldo_init(struct twl4030_usb *twl)
        twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0xC0, PROTECT_KEY);
        twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0x0C, PROTECT_KEY);
 
-       /* put VUSB3V1 LDO in active state */
-       twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2);
+       /* Keep VUSB3V1 LDO in sleep state until VBUS/ID change detected*/
+       /*twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2);*/
 
        /* input to VUSB3V1 LDO is from VBAT, not VBUS */
        twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0x14, VUSB_DEDICATED1);
@@ -502,6 +515,26 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
        return IRQ_HANDLED;
 }
 
+static void twl4030_usb_phy_init(struct twl4030_usb *twl)
+{
+       int status;
+
+       status = twl4030_usb_linkstat(twl);
+       if (status >= 0) {
+               if (status == USB_EVENT_NONE) {
+                       __twl4030_phy_power(twl, 0);
+                       twl->asleep = 1;
+               } else {
+                       __twl4030_phy_resume(twl);
+                       twl->asleep = 0;
+               }
+
+               blocking_notifier_call_chain(&twl->otg.notifier, status,
+                               twl->otg.gadget);
+       }
+       sysfs_notify(&twl->dev->kobj, NULL, "vbus");
+}
+
 static int twl4030_set_suspend(struct otg_transceiver *x, int suspend)
 {
        struct twl4030_usb *twl = xceiv_to_twl(x);
@@ -550,7 +583,6 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
        struct twl4030_usb_data *pdata = pdev->dev.platform_data;
        struct twl4030_usb      *twl;
        int                     status, err;
-       u8                      pwr;
 
        if (!pdata) {
                dev_dbg(&pdev->dev, "platform_data not available\n");
@@ -569,10 +601,7 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
        twl->otg.set_peripheral = twl4030_set_peripheral;
        twl->otg.set_suspend    = twl4030_set_suspend;
        twl->usb_mode           = pdata->usb_mode;
-
-       pwr = twl4030_usb_read(twl, PHY_PWR_CTRL);
-
-       twl->asleep             = (pwr & PHY_PWR_PHYPWD);
+       twl->asleep = 1;
 
        /* init spinlock for workqueue */
        spin_lock_init(&twl->lock);
@@ -610,15 +639,10 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
                return status;
        }
 
-       /* The IRQ handler just handles changes from the previous states
-        * of the ID and VBUS pins ... in probe() we must initialize that
-        * previous state.  The easy way:  fake an IRQ.
-        *
-        * REVISIT:  a real IRQ might have happened already, if PREEMPT is
-        * enabled.  Else the IRQ may not yet be configured or enabled,
-        * because of scheduling delays.
+       /* Power down phy or make it work according to
+        * current link state.
         */
-       twl4030_usb_irq(twl->irq, twl);
+       twl4030_usb_phy_init(twl);
 
        dev_info(&pdev->dev, "Initialized TWL4030 USB module\n");
        return 0;
index 30922a7e3347494b5ca30b81ea77e53ea767e91e..aa665817a2720414a7e669b20499d62ebca4b6db 100644 (file)
@@ -2024,6 +2024,9 @@ static int mos7720_ioctl(struct tty_struct *tty, struct file *file,
 
        case TIOCGICOUNT:
                cnow = mos7720_port->icount;
+
+               memset(&icount, 0, sizeof(struct serial_icounter_struct));
+
                icount.cts = cnow.cts;
                icount.dsr = cnow.dsr;
                icount.rng = cnow.rng;
index 1c9b6e9b2386e5032da1e3b8a095525222dc8cb5..1a42bc2137995bea0b70905cfe518674242141fd 100644 (file)
@@ -2285,6 +2285,9 @@ static int mos7840_ioctl(struct tty_struct *tty, struct file *file,
        case TIOCGICOUNT:
                cnow = mos7840_port->icount;
                smp_rmb();
+
+               memset(&icount, 0, sizeof(struct serial_icounter_struct));
+
                icount.cts = cnow.cts;
                icount.dsr = cnow.dsr;
                icount.rng = cnow.rng;
index 29e850a7a2f9871b7c9658e0717f8926dc5bc4ac..17927b1f9334ead1e854ed539dea352f8718c589 100644 (file)
@@ -127,7 +127,10 @@ static void handle_tx(struct vhost_net *net)
        size_t len, total_len = 0;
        int err, wmem;
        size_t hdr_size;
-       struct socket *sock = rcu_dereference(vq->private_data);
+       struct socket *sock;
+
+       sock = rcu_dereference_check(vq->private_data,
+                                    lockdep_is_held(&vq->mutex));
        if (!sock)
                return;
 
@@ -243,7 +246,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
        int r, nlogs = 0;
 
        while (datalen > 0) {
-               if (unlikely(headcount >= VHOST_NET_MAX_SG)) {
+               if (unlikely(seg >= VHOST_NET_MAX_SG)) {
                        r = -ENOBUFS;
                        goto err;
                }
@@ -582,7 +585,10 @@ static void vhost_net_disable_vq(struct vhost_net *n,
 static void vhost_net_enable_vq(struct vhost_net *n,
                                struct vhost_virtqueue *vq)
 {
-       struct socket *sock = vq->private_data;
+       struct socket *sock;
+
+       sock = rcu_dereference_protected(vq->private_data,
+                                        lockdep_is_held(&vq->mutex));
        if (!sock)
                return;
        if (vq == n->vqs + VHOST_NET_VQ_TX) {
@@ -598,7 +604,8 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n,
        struct socket *sock;
 
        mutex_lock(&vq->mutex);
-       sock = vq->private_data;
+       sock = rcu_dereference_protected(vq->private_data,
+                                        lockdep_is_held(&vq->mutex));
        vhost_net_disable_vq(n, vq);
        rcu_assign_pointer(vq->private_data, NULL);
        mutex_unlock(&vq->mutex);
@@ -736,7 +743,8 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
        }
 
        /* start polling new socket */
-       oldsock = vq->private_data;
+       oldsock = rcu_dereference_protected(vq->private_data,
+                                           lockdep_is_held(&vq->mutex));
        if (sock != oldsock) {
                 vhost_net_disable_vq(n, vq);
                 rcu_assign_pointer(vq->private_data, sock);
index 4b99117f3ecd209c63571c610f813bd41d50abba..8b5a1b33d0fed906ef6d0873027a3275858e8c2c 100644 (file)
@@ -60,22 +60,25 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
        return 0;
 }
 
+static void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
+{
+       INIT_LIST_HEAD(&work->node);
+       work->fn = fn;
+       init_waitqueue_head(&work->done);
+       work->flushing = 0;
+       work->queue_seq = work->done_seq = 0;
+}
+
 /* Init poll structure */
 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
                     unsigned long mask, struct vhost_dev *dev)
 {
-       struct vhost_work *work = &poll->work;
-
        init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
        init_poll_funcptr(&poll->table, vhost_poll_func);
        poll->mask = mask;
        poll->dev = dev;
 
-       INIT_LIST_HEAD(&work->node);
-       work->fn = fn;
-       init_waitqueue_head(&work->done);
-       work->flushing = 0;
-       work->queue_seq = work->done_seq = 0;
+       vhost_work_init(&poll->work, fn);
 }
 
 /* Start polling a file. We add ourselves to file's wait queue. The caller must
@@ -95,35 +98,38 @@ void vhost_poll_stop(struct vhost_poll *poll)
        remove_wait_queue(poll->wqh, &poll->wait);
 }
 
-/* Flush any work that has been scheduled. When calling this, don't hold any
- * locks that are also used by the callback. */
-void vhost_poll_flush(struct vhost_poll *poll)
+static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
 {
-       struct vhost_work *work = &poll->work;
        unsigned seq;
        int left;
        int flushing;
 
-       spin_lock_irq(&poll->dev->work_lock);
+       spin_lock_irq(&dev->work_lock);
        seq = work->queue_seq;
        work->flushing++;
-       spin_unlock_irq(&poll->dev->work_lock);
+       spin_unlock_irq(&dev->work_lock);
        wait_event(work->done, ({
-                  spin_lock_irq(&poll->dev->work_lock);
+                  spin_lock_irq(&dev->work_lock);
                   left = seq - work->done_seq <= 0;
-                  spin_unlock_irq(&poll->dev->work_lock);
+                  spin_unlock_irq(&dev->work_lock);
                   left;
        }));
-       spin_lock_irq(&poll->dev->work_lock);
+       spin_lock_irq(&dev->work_lock);
        flushing = --work->flushing;
-       spin_unlock_irq(&poll->dev->work_lock);
+       spin_unlock_irq(&dev->work_lock);
        BUG_ON(flushing < 0);
 }
 
-void vhost_poll_queue(struct vhost_poll *poll)
+/* Flush any work that has been scheduled. When calling this, don't hold any
+ * locks that are also used by the callback. */
+void vhost_poll_flush(struct vhost_poll *poll)
+{
+       vhost_work_flush(poll->dev, &poll->work);
+}
+
+static inline void vhost_work_queue(struct vhost_dev *dev,
+                                   struct vhost_work *work)
 {
-       struct vhost_dev *dev = poll->dev;
-       struct vhost_work *work = &poll->work;
        unsigned long flags;
 
        spin_lock_irqsave(&dev->work_lock, flags);
@@ -135,6 +141,11 @@ void vhost_poll_queue(struct vhost_poll *poll)
        spin_unlock_irqrestore(&dev->work_lock, flags);
 }
 
+void vhost_poll_queue(struct vhost_poll *poll)
+{
+       vhost_work_queue(poll->dev, &poll->work);
+}
+
 static void vhost_vq_reset(struct vhost_dev *dev,
                           struct vhost_virtqueue *vq)
 {
@@ -236,6 +247,29 @@ long vhost_dev_check_owner(struct vhost_dev *dev)
        return dev->mm == current->mm ? 0 : -EPERM;
 }
 
+struct vhost_attach_cgroups_struct {
+        struct vhost_work work;
+        struct task_struct *owner;
+        int ret;
+};
+
+static void vhost_attach_cgroups_work(struct vhost_work *work)
+{
+        struct vhost_attach_cgroups_struct *s;
+        s = container_of(work, struct vhost_attach_cgroups_struct, work);
+        s->ret = cgroup_attach_task_all(s->owner, current);
+}
+
+static int vhost_attach_cgroups(struct vhost_dev *dev)
+{
+        struct vhost_attach_cgroups_struct attach;
+        attach.owner = current;
+        vhost_work_init(&attach.work, vhost_attach_cgroups_work);
+        vhost_work_queue(dev, &attach.work);
+        vhost_work_flush(dev, &attach.work);
+        return attach.ret;
+}
+
 /* Caller should have device mutex */
 static long vhost_dev_set_owner(struct vhost_dev *dev)
 {
@@ -255,14 +289,16 @@ static long vhost_dev_set_owner(struct vhost_dev *dev)
        }
 
        dev->worker = worker;
-       err = cgroup_attach_task_current_cg(worker);
+       wake_up_process(worker);        /* avoid contributing to loadavg */
+
+       err = vhost_attach_cgroups(dev);
        if (err)
                goto err_cgroup;
-       wake_up_process(worker);        /* avoid contributing to loadavg */
 
        return 0;
 err_cgroup:
        kthread_stop(worker);
+       dev->worker = NULL;
 err_worker:
        if (dev->mm)
                mmput(dev->mm);
@@ -284,7 +320,7 @@ long vhost_dev_reset_owner(struct vhost_dev *dev)
        vhost_dev_cleanup(dev);
 
        memory->nregions = 0;
-       dev->memory = memory;
+       RCU_INIT_POINTER(dev->memory, memory);
        return 0;
 }
 
@@ -316,8 +352,9 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
                fput(dev->log_file);
        dev->log_file = NULL;
        /* No one will access memory at this point */
-       kfree(dev->memory);
-       dev->memory = NULL;
+       kfree(rcu_dereference_protected(dev->memory,
+                                       lockdep_is_held(&dev->mutex)));
+       RCU_INIT_POINTER(dev->memory, NULL);
        if (dev->mm)
                mmput(dev->mm);
        dev->mm = NULL;
@@ -404,14 +441,22 @@ static int vq_access_ok(unsigned int num,
 /* Caller should have device mutex but not vq mutex */
 int vhost_log_access_ok(struct vhost_dev *dev)
 {
-       return memory_access_ok(dev, dev->memory, 1);
+       struct vhost_memory *mp;
+
+       mp = rcu_dereference_protected(dev->memory,
+                                      lockdep_is_held(&dev->mutex));
+       return memory_access_ok(dev, mp, 1);
 }
 
 /* Verify access for write logging. */
 /* Caller should have vq mutex and device mutex */
 static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base)
 {
-       return vq_memory_access_ok(log_base, vq->dev->memory,
+       struct vhost_memory *mp;
+
+       mp = rcu_dereference_protected(vq->dev->memory,
+                                      lockdep_is_held(&vq->mutex));
+       return vq_memory_access_ok(log_base, mp,
                            vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) &&
                (!vq->log_used || log_access_ok(log_base, vq->log_addr,
                                        sizeof *vq->used +
@@ -451,7 +496,8 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
                kfree(newmem);
                return -EFAULT;
        }
-       oldmem = d->memory;
+       oldmem = rcu_dereference_protected(d->memory,
+                                          lockdep_is_held(&d->mutex));
        rcu_assign_pointer(d->memory, newmem);
        synchronize_rcu();
        kfree(oldmem);
@@ -822,11 +868,12 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
                if (r < 0)
                        return r;
                len -= l;
-               if (!len)
+               if (!len) {
+                       if (vq->log_ctx)
+                               eventfd_signal(vq->log_ctx, 1);
                        return 0;
+               }
        }
-       if (vq->log_ctx)
-               eventfd_signal(vq->log_ctx, 1);
        /* Length written exceeds what we have stored. This is a bug. */
        BUG();
        return 0;
index afd77295971ce3044117d0d6e5ea8f4e20f655fe..af3c11ded5fd4910ed0dccea161a801298036731 100644 (file)
@@ -106,7 +106,7 @@ struct vhost_virtqueue {
         * vhost_work execution acts instead of rcu_read_lock() and the end of
         * vhost_work execution acts instead of rcu_read_lock().
         * Writers use virtqueue mutex. */
-       void *private_data;
+       void __rcu *private_data;
        /* Log write descriptors */
        void __user *log_base;
        struct vhost_log log[VHOST_NET_MAX_SG];
@@ -116,7 +116,7 @@ struct vhost_dev {
        /* Readers use RCU to access memory table pointer
         * log base pointer and features.
         * Writers use mutex below.*/
-       struct vhost_memory *memory;
+       struct vhost_memory __rcu *memory;
        struct mm_struct *mm;
        struct mutex mutex;
        unsigned acked_features;
@@ -173,7 +173,11 @@ enum {
 
 static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
 {
-       unsigned acked_features = rcu_dereference(dev->acked_features);
+       unsigned acked_features;
+
+       acked_features =
+               rcu_dereference_index_check(dev->acked_features,
+                                           lockdep_is_held(&dev->mutex));
        return acked_features & (1 << bit);
 }
 
index 84f842331dfae0d7c4b194b42f283fae44e8a6af..7ccc967831f05bca5aba179bb88ff7880f55b92f 100644 (file)
@@ -3508,7 +3508,7 @@ static void fbcon_exit(void)
        softback_buf = 0UL;
 
        for (i = 0; i < FB_MAX; i++) {
-               int pending;
+               int pending = 0;
 
                mapped = 0;
                info = registered_fb[i];
@@ -3516,7 +3516,8 @@ static void fbcon_exit(void)
                if (info == NULL)
                        continue;
 
-               pending = cancel_work_sync(&info->queue);
+               if (info->queue.func)
+                       pending = cancel_work_sync(&info->queue);
                DPRINTK("fbcon: %s pending work\n", (pending ? "canceled" :
                        "no"));
 
index 815f84b07933f7b97dc1fab249055531f6cc65b2..70477c2e4b619cd6a2ca5562771e15243cd647a3 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/platform_device.h>
 #include <linux/screen_info.h>
 #include <linux/dmi.h>
-
+#include <linux/pci.h>
 #include <video/vga.h>
 
 static struct fb_var_screeninfo efifb_defined __devinitdata = {
@@ -39,17 +39,31 @@ enum {
        M_I20,          /* 20-Inch iMac */
        M_I20_SR,       /* 20-Inch iMac (Santa Rosa) */
        M_I24,          /* 24-Inch iMac */
+       M_I24_8_1,      /* 24-Inch iMac, 8,1th gen */
+       M_I24_10_1,     /* 24-Inch iMac, 10,1th gen */
+       M_I27_11_1,     /* 27-Inch iMac, 11,1th gen */
        M_MINI,         /* Mac Mini */
+       M_MINI_3_1,     /* Mac Mini, 3,1th gen */
+       M_MINI_4_1,     /* Mac Mini, 4,1th gen */
        M_MB,           /* MacBook */
        M_MB_2,         /* MacBook, 2nd rev. */
        M_MB_3,         /* MacBook, 3rd rev. */
+       M_MB_5_1,       /* MacBook, 5th rev. */
+       M_MB_6_1,       /* MacBook, 6th rev. */
+       M_MB_7_1,       /* MacBook, 7th rev. */
        M_MB_SR,        /* MacBook, 2nd gen, (Santa Rosa) */
        M_MBA,          /* MacBook Air */
        M_MBP,          /* MacBook Pro */
        M_MBP_2,        /* MacBook Pro 2nd gen */
+       M_MBP_2_2,      /* MacBook Pro 2,2nd gen */
        M_MBP_SR,       /* MacBook Pro (Santa Rosa) */
        M_MBP_4,        /* MacBook Pro, 4th gen */
        M_MBP_5_1,    /* MacBook Pro, 5,1th gen */
+       M_MBP_5_2,      /* MacBook Pro, 5,2th gen */
+       M_MBP_5_3,      /* MacBook Pro, 5,3rd gen */
+       M_MBP_6_1,      /* MacBook Pro, 6,1th gen */
+       M_MBP_6_2,      /* MacBook Pro, 6,2th gen */
+       M_MBP_7_1,      /* MacBook Pro, 7,1th gen */
        M_UNKNOWN       /* placeholder */
 };
 
@@ -64,14 +78,28 @@ static struct efifb_dmi_info {
        [M_I20] = { "i20", 0x80010000, 1728 * 4, 1680, 1050 }, /* guess */
        [M_I20_SR] = { "imac7", 0x40010000, 1728 * 4, 1680, 1050 },
        [M_I24] = { "i24", 0x80010000, 2048 * 4, 1920, 1200 }, /* guess */
+       [M_I24_8_1] = { "imac8", 0xc0060000, 2048 * 4, 1920, 1200 },
+       [M_I24_10_1] = { "imac10", 0xc0010000, 2048 * 4, 1920, 1080 },
+       [M_I27_11_1] = { "imac11", 0xc0010000, 2560 * 4, 2560, 1440 },
        [M_MINI]= { "mini", 0x80000000, 2048 * 4, 1024, 768 },
+       [M_MINI_3_1] = { "mini31", 0x40010000, 1024 * 4, 1024, 768 },
+       [M_MINI_4_1] = { "mini41", 0xc0010000, 2048 * 4, 1920, 1200 },
        [M_MB] = { "macbook", 0x80000000, 2048 * 4, 1280, 800 },
+       [M_MB_5_1] = { "macbook51", 0x80010000, 2048 * 4, 1280, 800 },
+       [M_MB_6_1] = { "macbook61", 0x80010000, 2048 * 4, 1280, 800 },
+       [M_MB_7_1] = { "macbook71", 0x80010000, 2048 * 4, 1280, 800 },
        [M_MBA] = { "mba", 0x80000000, 2048 * 4, 1280, 800 },
        [M_MBP] = { "mbp", 0x80010000, 1472 * 4, 1440, 900 },
        [M_MBP_2] = { "mbp2", 0, 0, 0, 0 }, /* placeholder */
+       [M_MBP_2_2] = { "mbp22", 0x80010000, 1472 * 4, 1440, 900 },
        [M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900 },
        [M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200 },
        [M_MBP_5_1] = { "mbp51", 0xc0010000, 2048 * 4, 1440, 900 },
+       [M_MBP_5_2] = { "mbp52", 0xc0010000, 2048 * 4, 1920, 1200 },
+       [M_MBP_5_3] = { "mbp53", 0xd0010000, 2048 * 4, 1440, 900 },
+       [M_MBP_6_1] = { "mbp61", 0x90030000, 2048 * 4, 1920, 1200 },
+       [M_MBP_6_2] = { "mbp62", 0x90030000, 2048 * 4, 1680, 1050 },
+       [M_MBP_7_1] = { "mbp71", 0xc0010000, 2048 * 4, 1280, 800 },
        [M_UNKNOWN] = { NULL, 0, 0, 0, 0 }
 };
 
@@ -92,7 +120,12 @@ static const struct dmi_system_id dmi_system_table[] __initconst = {
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac6,1", M_I24),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac6,1", M_I24),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac7,1", M_I20_SR),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac8,1", M_I24_8_1),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac10,1", M_I24_10_1),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac11,1", M_I27_11_1),
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "Macmini1,1", M_MINI),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini3,1", M_MINI_3_1),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini4,1", M_MINI_4_1),
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook1,1", M_MB),
        /* At least one of these two will be right; maybe both? */
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook2,1", M_MB),
@@ -101,14 +134,23 @@ static const struct dmi_system_id dmi_system_table[] __initconst = {
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook3,1", M_MB),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook3,1", M_MB),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook4,1", M_MB),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook5,1", M_MB_5_1),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook6,1", M_MB_6_1),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook7,1", M_MB_7_1),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookAir1,1", M_MBA),
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro1,1", M_MBP),
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,1", M_MBP_2),
+       EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,2", M_MBP_2_2),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro2,1", M_MBP_2),
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,1", M_MBP_5_1),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,2", M_MBP_5_2),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,3", M_MBP_5_3),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,1", M_MBP_6_1),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,2", M_MBP_6_2),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro7,1", M_MBP_7_1),
        {},
 };
 
@@ -116,7 +158,7 @@ static int set_system(const struct dmi_system_id *id)
 {
        struct efifb_dmi_info *info = id->driver_data;
        if (info->base == 0)
-               return -ENODEV;
+               return 0;
 
        printk(KERN_INFO "efifb: dmi detected %s - framebuffer at %p "
                         "(%dx%d, stride %d)\n", id->ident,
@@ -124,18 +166,55 @@ static int set_system(const struct dmi_system_id *id)
                         info->stride);
 
        /* Trust the bootloader over the DMI tables */
-       if (screen_info.lfb_base == 0)
+       if (screen_info.lfb_base == 0) {
+#if defined(CONFIG_PCI)
+               struct pci_dev *dev = NULL;
+               int found_bar = 0;
+#endif
                screen_info.lfb_base = info->base;
-       if (screen_info.lfb_linelength == 0)
-               screen_info.lfb_linelength = info->stride;
-       if (screen_info.lfb_width == 0)
-               screen_info.lfb_width = info->width;
-       if (screen_info.lfb_height == 0)
-               screen_info.lfb_height = info->height;
-       if (screen_info.orig_video_isVGA == 0)
-               screen_info.orig_video_isVGA = VIDEO_TYPE_EFI;
 
-       return 0;
+#if defined(CONFIG_PCI)
+               /* make sure that the address in the table is actually on a
+                * VGA device's PCI BAR */
+
+               for_each_pci_dev(dev) {
+                       int i;
+                       if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
+                               continue;
+                       for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+                               resource_size_t start, end;
+
+                               start = pci_resource_start(dev, i);
+                               if (start == 0)
+                                       break;
+                               end = pci_resource_end(dev, i);
+                               if (screen_info.lfb_base >= start &&
+                                               screen_info.lfb_base < end) {
+                                       found_bar = 1;
+                               }
+                       }
+               }
+               if (!found_bar)
+                       screen_info.lfb_base = 0;
+#endif
+       }
+       if (screen_info.lfb_base) {
+               if (screen_info.lfb_linelength == 0)
+                       screen_info.lfb_linelength = info->stride;
+               if (screen_info.lfb_width == 0)
+                       screen_info.lfb_width = info->width;
+               if (screen_info.lfb_height == 0)
+                       screen_info.lfb_height = info->height;
+               if (screen_info.orig_video_isVGA == 0)
+                       screen_info.orig_video_isVGA = VIDEO_TYPE_EFI;
+       } else {
+               screen_info.lfb_linelength = 0;
+               screen_info.lfb_width = 0;
+               screen_info.lfb_height = 0;
+               screen_info.orig_video_isVGA = 0;
+               return 0;
+       }
+       return 1;
 }
 
 static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
index c91a7f70f7b086f56882ba07f97ba38e314adfd6..a31a77ff6f3d2a3ac2daa56baeb6ae55430f6177 100644 (file)
@@ -298,8 +298,8 @@ static void set_dma_control0(struct pxa168fb_info *fbi)
         * Set bit to enable graphics DMA.
         */
        x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0);
-       x |= fbi->active ? 0x00000100 : 0;
-       fbi->active = 0;
+       x &= ~CFG_GRA_ENA_MASK;
+       x |= fbi->active ? CFG_GRA_ENA(1) : CFG_GRA_ENA(0);
 
        /*
         * If we are in a pseudo-color mode, we need to enable
@@ -559,7 +559,7 @@ static struct fb_ops pxa168fb_ops = {
        .fb_imageblit   = cfb_imageblit,
 };
 
-static int __init pxa168fb_init_mode(struct fb_info *info,
+static int __devinit pxa168fb_init_mode(struct fb_info *info,
                              struct pxa168fb_mach_info *mi)
 {
        struct pxa168fb_info *fbi = info->par;
@@ -599,7 +599,7 @@ static int __init pxa168fb_init_mode(struct fb_info *info,
        return ret;
 }
 
-static int __init pxa168fb_probe(struct platform_device *pdev)
+static int __devinit pxa168fb_probe(struct platform_device *pdev)
 {
        struct pxa168fb_mach_info *mi;
        struct fb_info *info = 0;
@@ -792,7 +792,7 @@ static struct platform_driver pxa168fb_driver = {
        .probe          = pxa168fb_probe,
 };
 
-static int __devinit pxa168fb_init(void)
+static int __init pxa168fb_init(void)
 {
        return platform_driver_register(&pxa168fb_driver);
 }
index 559bf1727a2b8f252a193d6fac276aed56124428..b52f8e4ef1fdbe3cd19c70d4fb282d15f5b99f85 100644 (file)
@@ -1701,6 +1701,9 @@ static int        sisfb_ioctl(struct fb_info *info, unsigned int cmd,
                break;
 
           case FBIOGET_VBLANK:
+
+               memset(&sisvbblank, 0, sizeof(struct fb_vblank));
+
                sisvbblank.count = 0;
                sisvbblank.flags = sisfb_setupvbblankflags(ivideo, &sisvbblank.vcount, &sisvbblank.hcount);
 
index da03c074e32aad8b909c4b11c57a4c5a9f9d9285..4d553d0b8d7a450b9337a9b4375856c5b304217c 100644 (file)
@@ -25,6 +25,8 @@ int viafb_ioctl_get_viafb_info(u_long arg)
 {
        struct viafb_ioctl_info viainfo;
 
+       memset(&viainfo, 0, sizeof(struct viafb_ioctl_info));
+
        viainfo.viafb_id = VIAID;
        viainfo.vendor_id = PCI_VIA_VENDOR_ID;
 
index b036677df8c445420906c722d00611dedcaf8b0a..24efd8ea41bb04bab6830cfddfd2281881ec9ae7 100644 (file)
@@ -213,11 +213,11 @@ config OMAP_WATCHDOG
          here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog timer.
 
 config PNX4008_WATCHDOG
-       tristate "PNX4008 Watchdog"
-       depends on ARCH_PNX4008
+       tristate "PNX4008 and LPC32XX Watchdog"
+       depends on ARCH_PNX4008 || ARCH_LPC32XX
        help
          Say Y here if to include support for the watchdog timer
-         in the PNX4008 processor.
+         in the PNX4008 or LPC32XX processor.
          This driver can be built as a module by choosing M. The module
          will be called pnx4008_wdt.
 
index 88c83aa5730318b512d86a7a048dc60a9758de46..f31493e65b380cd63fef6d55132dd96e7af32d63 100644 (file)
@@ -305,7 +305,7 @@ static int __init sbwdog_init(void)
        if (ret) {
                printk(KERN_ERR "%s: failed to request irq 1 - %d\n",
                                                ident.identity, ret);
-               return ret;
+               goto out;
        }
 
        ret = misc_register(&sbwdog_miscdev);
@@ -313,14 +313,20 @@ static int __init sbwdog_init(void)
                printk(KERN_INFO "%s: timeout is %ld.%ld secs\n",
                                ident.identity,
                                timeout / 1000000, (timeout / 100000) % 10);
-       } else
-               free_irq(1, (void *)user_dog);
+               return 0;
+       }
+       free_irq(1, (void *)user_dog);
+out:
+       unregister_reboot_notifier(&sbwdog_notifier);
+
        return ret;
 }
 
 static void __exit sbwdog_exit(void)
 {
        misc_deregister(&sbwdog_miscdev);
+       free_irq(1, (void *)user_dog);
+       unregister_reboot_notifier(&sbwdog_notifier);
 }
 
 module_init(sbwdog_init);
index 458c499c1223c6f73aaab93f9a6bdc7c816de5a2..18cdeb4c4258a67ccdba77fc517596eaa71819b9 100644 (file)
@@ -449,6 +449,9 @@ static __devinit int ts72xx_wdt_probe(struct platform_device *pdev)
        wdt->pdev = pdev;
        mutex_init(&wdt->lock);
 
+       /* make sure that the watchdog is disabled */
+       ts72xx_wdt_stop(wdt);
+
        error = misc_register(&ts72xx_wdt_miscdev);
        if (error) {
                dev_err(&pdev->dev, "failed to register miscdev\n");
index 29bac5118877ef2028a781e6b409e2fe36463a99..d409495876f11b24fabaebc01d57f02224fe459f 100644 (file)
@@ -755,7 +755,10 @@ int register_xenstore_notifier(struct notifier_block *nb)
 {
        int ret = 0;
 
-       blocking_notifier_chain_register(&xenstore_chain, nb);
+       if (xenstored_ready > 0)
+               ret = nb->notifier_call(nb, 0, NULL);
+       else
+               blocking_notifier_chain_register(&xenstore_chain, nb);
 
        return ret;
 }
@@ -769,7 +772,7 @@ EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
 
 void xenbus_probe(struct work_struct *unused)
 {
-       BUG_ON((xenstored_ready <= 0));
+       xenstored_ready = 1;
 
        /* Enumerate devices in xenstore and watch for changes. */
        xenbus_probe_devices(&xenbus_frontend);
@@ -835,8 +838,8 @@ static int __init xenbus_init(void)
                        xen_store_evtchn = xen_start_info->store_evtchn;
                        xen_store_mfn = xen_start_info->store_mfn;
                        xen_store_interface = mfn_to_virt(xen_store_mfn);
+                       xenstored_ready = 1;
                }
-               xenstored_ready = 1;
        }
 
        /* Initialize the interface to xenstore. */
index 16c8a2a98c1bb6b93fbc8634cb1986cf555bad0e..899f168fd19cc4d4aca48e6d26e51aa08b34db59 100644 (file)
@@ -292,9 +292,11 @@ int v9fs_dir_release(struct inode *inode, struct file *filp)
 
        fid = filp->private_data;
        P9_DPRINTK(P9_DEBUG_VFS,
-                       "inode: %p filp: %p fid: %d\n", inode, filp, fid->fid);
+                       "v9fs_dir_release: inode: %p filp: %p fid: %d\n",
+                       inode, filp, fid ? fid->fid : -1);
        filemap_write_and_wait(inode->i_mapping);
-       p9_client_clunk(fid);
+       if (fid)
+               p9_client_clunk(fid);
        return 0;
 }
 
index c7c23eab94403468d161eac3bd254dfde1e27610..9e670d527646fc4abe2be6f0dfc992f4a3178042 100644 (file)
@@ -730,7 +730,10 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int mode,
                P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
                goto error;
        }
-       dentry->d_op = &v9fs_cached_dentry_operations;
+       if (v9ses->cache)
+               dentry->d_op = &v9fs_cached_dentry_operations;
+       else
+               dentry->d_op = &v9fs_dentry_operations;
        d_instantiate(dentry, inode);
        err = v9fs_fid_add(dentry, fid);
        if (err < 0)
@@ -1128,6 +1131,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
        v9fs_stat2inode(st, dentry->d_inode, dentry->d_inode->i_sb);
                generic_fillattr(dentry->d_inode, stat);
 
+       p9stat_free(st);
        kfree(st);
        return 0;
 }
@@ -1489,6 +1493,7 @@ static int v9fs_readlink(struct dentry *dentry, char *buffer, int buflen)
 
        retval = strnlen(buffer, buflen);
 done:
+       p9stat_free(st);
        kfree(st);
        return retval;
 }
@@ -1942,7 +1947,7 @@ static const struct inode_operations v9fs_dir_inode_operations_dotu = {
        .unlink = v9fs_vfs_unlink,
        .mkdir = v9fs_vfs_mkdir,
        .rmdir = v9fs_vfs_rmdir,
-       .mknod = v9fs_vfs_mknod_dotl,
+       .mknod = v9fs_vfs_mknod,
        .rename = v9fs_vfs_rename,
        .getattr = v9fs_vfs_getattr,
        .setattr = v9fs_vfs_setattr,
index f9311077de6842091df9f257e3e6d91c641a0622..1d12ba0ed3db52fa55e2e6ff4aa48ade2cae1a88 100644 (file)
@@ -122,6 +122,10 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
        fid = v9fs_session_init(v9ses, dev_name, data);
        if (IS_ERR(fid)) {
                retval = PTR_ERR(fid);
+               /*
+                * we need to call session_close to tear down some
+                * of the data structure setup by session_init
+                */
                goto close_session;
        }
 
@@ -144,7 +148,6 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
                retval = -ENOMEM;
                goto release_sb;
        }
-
        sb->s_root = root;
 
        if (v9fs_proto_dotl(v9ses)) {
@@ -152,7 +155,7 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
                st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
                if (IS_ERR(st)) {
                        retval = PTR_ERR(st);
-                       goto clunk_fid;
+                       goto release_sb;
                }
 
                v9fs_stat2inode_dotl(st, root->d_inode);
@@ -162,7 +165,7 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
                st = p9_client_stat(fid);
                if (IS_ERR(st)) {
                        retval = PTR_ERR(st);
-                       goto clunk_fid;
+                       goto release_sb;
                }
 
                root->d_inode->i_ino = v9fs_qid2ino(&st->qid);
@@ -174,19 +177,24 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
 
        v9fs_fid_add(root, fid);
 
-P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n");
+       P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n");
        simple_set_mnt(mnt, sb);
        return 0;
 
 clunk_fid:
        p9_client_clunk(fid);
-
 close_session:
        v9fs_session_close(v9ses);
        kfree(v9ses);
        return retval;
-
 release_sb:
+       /*
+        * we will do the session_close and root dentry release
+        * in the below call. But we need to clunk fid, because we haven't
+        * attached the fid to dentry so it won't get clunked
+        * automatically.
+        */
+       p9_client_clunk(fid);
        deactivate_locked_super(sb);
        return retval;
 }
index 33c4e7eef470e995246562b774fd88a82ca7f12b..9581ea94d5a146e95ba9533d1a7ac3aba2b83804 100644 (file)
@@ -109,8 +109,8 @@ static void init_once(void *foo)
 {
        struct affs_inode_info *ei = (struct affs_inode_info *) foo;
 
-       init_MUTEX(&ei->i_link_lock);
-       init_MUTEX(&ei->i_ext_lock);
+       sema_init(&ei->i_link_lock, 1);
+       sema_init(&ei->i_ext_lock, 1);
        inode_init_once(&ei->vfs_inode);
 }
 
index 3006b5bc33d697a2f773b2d81016fda57ee5ff68..250b0a73c8a8ca92b78c3a0282d2425ce7649dcf 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -712,8 +712,16 @@ static ssize_t aio_run_iocb(struct kiocb *iocb)
         */
        ret = retry(iocb);
 
-       if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED)
+       if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) {
+               /*
+                * There's no easy way to restart the syscall since other AIO's
+                * may be already running. Just fail this IO with EINTR.
+                */
+               if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR ||
+                            ret == -ERESTARTNOHAND || ret == -ERESTART_RESTARTBLOCK))
+                       ret = -EINTR;
                aio_complete(iocb, ret, 0);
+       }
 out:
        spin_lock_irq(&ctx->ctx_lock);
 
@@ -1659,6 +1667,9 @@ long do_io_submit(aio_context_t ctx_id, long nr,
        if (unlikely(nr < 0))
                return -EINVAL;
 
+       if (unlikely(nr > LONG_MAX/sizeof(*iocbpp)))
+               nr = LONG_MAX/sizeof(*iocbpp);
+
        if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp)))))
                return -EFAULT;
 
index f96eff04e11ab4a8b23f7489ee4b0de50e67e152..a6395bdb26aeb13b7b98c74df4f77780c1c95412 100644 (file)
@@ -134,10 +134,6 @@ static int aout_core_dump(struct coredump_params *cprm)
                if (!dump_write(file, dump_start, dump_size))
                        goto end_coredump;
        }
-/* Finally dump the task struct.  Not be used by gdb, but could be useful */
-       set_fs(KERNEL_DS);
-       if (!dump_write(file, current, sizeof(*current)))
-               goto end_coredump;
 end_coredump:
        set_fs(fs);
        return has_dumped;
index a7528b91393676bb1f1affa72f6b78f38206d4c5..fd0cc0bf9a40396a150ad77b69bf5284531d6125 100644 (file)
@@ -724,7 +724,7 @@ static int __init init_misc_binfmt(void)
 {
        int err = register_filesystem(&bm_fs_type);
        if (!err) {
-               err = register_binfmt(&misc_format);
+               err = insert_binfmt(&misc_format);
                if (err)
                        unregister_filesystem(&bm_fs_type);
        }
index 612a5c38d3c1a5fc49e0d0990e19550c27217650..4d0ff5ee27b86bef6d377b9211694941939999a9 100644 (file)
@@ -413,10 +413,10 @@ int bio_integrity_prep(struct bio *bio)
 
        /* Allocate kernel buffer for protection data */
        len = sectors * blk_integrity_tuple_size(bi);
-       buf = kmalloc(len, GFP_NOIO | __GFP_NOFAIL | q->bounce_gfp);
+       buf = kmalloc(len, GFP_NOIO | q->bounce_gfp);
        if (unlikely(buf == NULL)) {
                printk(KERN_ERR "could not allocate integrity buffer\n");
-               return -EIO;
+               return -ENOMEM;
        }
 
        end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
index bc87b9c1d27ea8e253f5a1b9b395a287ede4106e..9eb134ea6eb223a45be745c77f9530b80fcdc082 100644 (file)
@@ -1,8 +1,11 @@
 config CEPH_FS
         tristate "Ceph distributed file system (EXPERIMENTAL)"
        depends on INET && EXPERIMENTAL
+       select CEPH_LIB
        select LIBCRC32C
        select CRYPTO_AES
+       select CRYPTO
+       default n
        help
          Choose Y or M here to include support for mounting the
          experimental Ceph distributed file system.  Ceph is an extremely
@@ -13,15 +16,3 @@ config CEPH_FS
 
          If unsure, say N.
 
-config CEPH_FS_PRETTYDEBUG
-       bool "Include file:line in ceph debug output"
-       depends on CEPH_FS
-       default n
-       help
-         If you say Y here, debug output will include a filename and
-         line to aid debugging.  This icnreases kernel size and slows
-         execution slightly when debug call sites are enabled (e.g.,
-         via CONFIG_DYNAMIC_DEBUG).
-
-         If unsure, say N.
-
index 278e1172600dc3a3d5acba3654c53719d6f38697..9e6c4f2e8ff1f3e2712979d791da9e55fa780982 100644 (file)
@@ -8,15 +8,8 @@ obj-$(CONFIG_CEPH_FS) += ceph.o
 
 ceph-objs := super.o inode.o dir.o file.o locks.o addr.o ioctl.o \
        export.o caps.o snap.o xattr.o \
-       messenger.o msgpool.o buffer.o pagelist.o \
-       mds_client.o mdsmap.o \
-       mon_client.o \
-       osd_client.o osdmap.o crush/crush.o crush/mapper.o crush/hash.o \
-       debugfs.o \
-       auth.o auth_none.o \
-       crypto.o armor.o \
-       auth_x.o \
-       ceph_fs.o ceph_strings.o ceph_hash.o ceph_frag.o
+       mds_client.o mdsmap.o strings.o ceph_frag.o \
+       debugfs.o
 
 else
 #Otherwise we were called directly from the command
diff --git a/fs/ceph/README b/fs/ceph/README
deleted file mode 100644 (file)
index 18352fa..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# The following files are shared by (and manually synchronized
-# between) the Ceph userland and kernel client.
-#
-# userland                  kernel
-src/include/ceph_fs.h      fs/ceph/ceph_fs.h
-src/include/ceph_fs.cc     fs/ceph/ceph_fs.c
-src/include/msgr.h         fs/ceph/msgr.h
-src/include/rados.h        fs/ceph/rados.h
-src/include/ceph_strings.cc fs/ceph/ceph_strings.c
-src/include/ceph_frag.h            fs/ceph/ceph_frag.h
-src/include/ceph_frag.cc    fs/ceph/ceph_frag.c
-src/include/ceph_hash.h            fs/ceph/ceph_hash.h
-src/include/ceph_hash.cc    fs/ceph/ceph_hash.c
-src/crush/crush.c          fs/ceph/crush/crush.c
-src/crush/crush.h          fs/ceph/crush/crush.h
-src/crush/mapper.c         fs/ceph/crush/mapper.c
-src/crush/mapper.h         fs/ceph/crush/mapper.h
-src/crush/hash.h           fs/ceph/crush/hash.h
-src/crush/hash.c           fs/ceph/crush/hash.c
index 4cfce1ee31faaf4f2f6aab966acd5d6001753940..51bcc5ce323024a995d300b4a6035ecf8e72e94b 100644 (file)
@@ -1,4 +1,4 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
 
 #include <linux/backing-dev.h>
 #include <linux/fs.h>
@@ -10,7 +10,8 @@
 #include <linux/task_io_accounting_ops.h>
 
 #include "super.h"
-#include "osd_client.h"
+#include "mds_client.h"
+#include <linux/ceph/osd_client.h>
 
 /*
  * Ceph address space ops.
@@ -193,7 +194,8 @@ static int readpage_nounlock(struct file *filp, struct page *page)
 {
        struct inode *inode = filp->f_dentry->d_inode;
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_osd_client *osdc = &ceph_inode_to_client(inode)->osdc;
+       struct ceph_osd_client *osdc = 
+               &ceph_inode_to_client(inode)->client->osdc;
        int err = 0;
        u64 len = PAGE_CACHE_SIZE;
 
@@ -265,7 +267,8 @@ static int ceph_readpages(struct file *file, struct address_space *mapping,
 {
        struct inode *inode = file->f_dentry->d_inode;
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_osd_client *osdc = &ceph_inode_to_client(inode)->osdc;
+       struct ceph_osd_client *osdc =
+               &ceph_inode_to_client(inode)->client->osdc;
        int rc = 0;
        struct page **pages;
        loff_t offset;
@@ -365,7 +368,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
 {
        struct inode *inode;
        struct ceph_inode_info *ci;
-       struct ceph_client *client;
+       struct ceph_fs_client *fsc;
        struct ceph_osd_client *osdc;
        loff_t page_off = page->index << PAGE_CACHE_SHIFT;
        int len = PAGE_CACHE_SIZE;
@@ -383,8 +386,8 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
        }
        inode = page->mapping->host;
        ci = ceph_inode(inode);
-       client = ceph_inode_to_client(inode);
-       osdc = &client->osdc;
+       fsc = ceph_inode_to_client(inode);
+       osdc = &fsc->client->osdc;
 
        /* verify this is a writeable snap context */
        snapc = (void *)page->private;
@@ -411,13 +414,13 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
        if (i_size < page_off + len)
                len = i_size - page_off;
 
-       dout("writepage %p page %p index %lu on %llu~%u\n",
-            inode, page, page->index, page_off, len);
+       dout("writepage %p page %p index %lu on %llu~%u snapc %p\n",
+            inode, page, page->index, page_off, len, snapc);
 
-       writeback_stat = atomic_long_inc_return(&client->writeback_count);
+       writeback_stat = atomic_long_inc_return(&fsc->writeback_count);
        if (writeback_stat >
-           CONGESTION_ON_THRESH(client->mount_args->congestion_kb))
-               set_bdi_congested(&client->backing_dev_info, BLK_RW_ASYNC);
+           CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
+               set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC);
 
        set_page_writeback(page);
        err = ceph_osdc_writepages(osdc, ceph_vino(inode),
@@ -496,7 +499,7 @@ static void writepages_finish(struct ceph_osd_request *req,
        struct address_space *mapping = inode->i_mapping;
        __s32 rc = -EIO;
        u64 bytes = 0;
-       struct ceph_client *client = ceph_inode_to_client(inode);
+       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
        long writeback_stat;
        unsigned issued = ceph_caps_issued(ci);
 
@@ -529,10 +532,10 @@ static void writepages_finish(struct ceph_osd_request *req,
                WARN_ON(!PageUptodate(page));
 
                writeback_stat =
-                       atomic_long_dec_return(&client->writeback_count);
+                       atomic_long_dec_return(&fsc->writeback_count);
                if (writeback_stat <
-                   CONGESTION_OFF_THRESH(client->mount_args->congestion_kb))
-                       clear_bdi_congested(&client->backing_dev_info,
+                   CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb))
+                       clear_bdi_congested(&fsc->backing_dev_info,
                                            BLK_RW_ASYNC);
 
                ceph_put_snap_context((void *)page->private);
@@ -569,13 +572,13 @@ static void writepages_finish(struct ceph_osd_request *req,
  * mempool.  we avoid the mempool if we can because req->r_num_pages
  * may be less than the maximum write size.
  */
-static void alloc_page_vec(struct ceph_client *client,
+static void alloc_page_vec(struct ceph_fs_client *fsc,
                           struct ceph_osd_request *req)
 {
        req->r_pages = kmalloc(sizeof(struct page *) * req->r_num_pages,
                               GFP_NOFS);
        if (!req->r_pages) {
-               req->r_pages = mempool_alloc(client->wb_pagevec_pool, GFP_NOFS);
+               req->r_pages = mempool_alloc(fsc->wb_pagevec_pool, GFP_NOFS);
                req->r_pages_from_pool = 1;
                WARN_ON(!req->r_pages);
        }
@@ -590,7 +593,7 @@ static int ceph_writepages_start(struct address_space *mapping,
        struct inode *inode = mapping->host;
        struct backing_dev_info *bdi = mapping->backing_dev_info;
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_client *client;
+       struct ceph_fs_client *fsc;
        pgoff_t index, start, end;
        int range_whole = 0;
        int should_loop = 1;
@@ -617,13 +620,13 @@ static int ceph_writepages_start(struct address_space *mapping,
             wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
             (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
 
-       client = ceph_inode_to_client(inode);
-       if (client->mount_state == CEPH_MOUNT_SHUTDOWN) {
+       fsc = ceph_inode_to_client(inode);
+       if (fsc->mount_state == CEPH_MOUNT_SHUTDOWN) {
                pr_warning("writepage_start %p on forced umount\n", inode);
                return -EIO; /* we're in a forced umount, don't write! */
        }
-       if (client->mount_args->wsize && client->mount_args->wsize < wsize)
-               wsize = client->mount_args->wsize;
+       if (fsc->mount_options->wsize && fsc->mount_options->wsize < wsize)
+               wsize = fsc->mount_options->wsize;
        if (wsize < PAGE_CACHE_SIZE)
                wsize = PAGE_CACHE_SIZE;
        max_pages_ever = wsize >> PAGE_CACHE_SHIFT;
@@ -766,9 +769,10 @@ get_more_pages:
                        /* ok */
                        if (locked_pages == 0) {
                                /* prepare async write request */
-                               offset = page->index << PAGE_CACHE_SHIFT;
+                               offset = (unsigned long long)page->index
+                                       << PAGE_CACHE_SHIFT;
                                len = wsize;
-                               req = ceph_osdc_new_request(&client->osdc,
+                               req = ceph_osdc_new_request(&fsc->client->osdc,
                                            &ci->i_layout,
                                            ceph_vino(inode),
                                            offset, &len,
@@ -781,7 +785,7 @@ get_more_pages:
                                            &inode->i_mtime, true, 1);
                                max_pages = req->r_num_pages;
 
-                               alloc_page_vec(client, req);
+                               alloc_page_vec(fsc, req);
                                req->r_callback = writepages_finish;
                                req->r_inode = inode;
                        }
@@ -793,10 +797,10 @@ get_more_pages:
                             inode, page, page->index);
 
                        writeback_stat =
-                              atomic_long_inc_return(&client->writeback_count);
+                              atomic_long_inc_return(&fsc->writeback_count);
                        if (writeback_stat > CONGESTION_ON_THRESH(
-                                   client->mount_args->congestion_kb)) {
-                               set_bdi_congested(&client->backing_dev_info,
+                                   fsc->mount_options->congestion_kb)) {
+                               set_bdi_congested(&fsc->backing_dev_info,
                                                  BLK_RW_ASYNC);
                        }
 
@@ -845,7 +849,7 @@ get_more_pages:
                op->payload_len = cpu_to_le32(len);
                req->r_request->hdr.data_len = cpu_to_le32(len);
 
-               ceph_osdc_start_request(&client->osdc, req, true);
+               ceph_osdc_start_request(&fsc->client->osdc, req, true);
                req = NULL;
 
                /* continue? */
@@ -914,7 +918,7 @@ static int ceph_update_writeable_page(struct file *file,
 {
        struct inode *inode = file->f_dentry->d_inode;
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc;
+       struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
        loff_t page_off = pos & PAGE_CACHE_MASK;
        int pos_in_page = pos & ~PAGE_CACHE_MASK;
        int end_in_page = pos_in_page + len;
@@ -1052,8 +1056,8 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
                          struct page *page, void *fsdata)
 {
        struct inode *inode = file->f_dentry->d_inode;
-       struct ceph_client *client = ceph_inode_to_client(inode);
-       struct ceph_mds_client *mdsc = &client->mdsc;
+       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+       struct ceph_mds_client *mdsc = fsc->mdsc;
        unsigned from = pos & (PAGE_CACHE_SIZE - 1);
        int check_cap = 0;
 
@@ -1122,7 +1126,7 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct inode *inode = vma->vm_file->f_dentry->d_inode;
        struct page *page = vmf->page;
-       struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc;
+       struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
        loff_t off = page->index << PAGE_CACHE_SHIFT;
        loff_t size, len;
        int ret;
index a2069b6680aed83eb0be0af7c584b4a619ae239a..98ab13e2b71d9d8ebcbf1715da112d78cb0bac17 100644 (file)
@@ -1,4 +1,4 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
 
 #include <linux/fs.h>
 #include <linux/kernel.h>
@@ -9,8 +9,9 @@
 #include <linux/writeback.h>
 
 #include "super.h"
-#include "decode.h"
-#include "messenger.h"
+#include "mds_client.h"
+#include <linux/ceph/decode.h>
+#include <linux/ceph/messenger.h>
 
 /*
  * Capability management
@@ -287,11 +288,11 @@ void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap)
        spin_unlock(&mdsc->caps_list_lock);
 }
 
-void ceph_reservation_status(struct ceph_client *client,
+void ceph_reservation_status(struct ceph_fs_client *fsc,
                             int *total, int *avail, int *used, int *reserved,
                             int *min)
 {
-       struct ceph_mds_client *mdsc = &client->mdsc;
+       struct ceph_mds_client *mdsc = fsc->mdsc;
 
        if (total)
                *total = mdsc->caps_total_count;
@@ -399,7 +400,7 @@ static void __insert_cap_node(struct ceph_inode_info *ci,
 static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
                               struct ceph_inode_info *ci)
 {
-       struct ceph_mount_args *ma = mdsc->client->mount_args;
+       struct ceph_mount_options *ma = mdsc->fsc->mount_options;
 
        ci->i_hold_caps_min = round_jiffies(jiffies +
                                            ma->caps_wanted_delay_min * HZ);
@@ -515,7 +516,7 @@ int ceph_add_cap(struct inode *inode,
                 unsigned seq, unsigned mseq, u64 realmino, int flags,
                 struct ceph_cap_reservation *caps_reservation)
 {
-       struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc;
+       struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_cap *new_cap = NULL;
        struct ceph_cap *cap;
@@ -814,7 +815,7 @@ int __ceph_caps_used(struct ceph_inode_info *ci)
                used |= CEPH_CAP_PIN;
        if (ci->i_rd_ref)
                used |= CEPH_CAP_FILE_RD;
-       if (ci->i_rdcache_ref || ci->i_rdcache_gen)
+       if (ci->i_rdcache_ref || ci->vfs_inode.i_data.nrpages)
                used |= CEPH_CAP_FILE_CACHE;
        if (ci->i_wr_ref)
                used |= CEPH_CAP_FILE_WR;
@@ -873,7 +874,7 @@ void __ceph_remove_cap(struct ceph_cap *cap)
        struct ceph_mds_session *session = cap->session;
        struct ceph_inode_info *ci = cap->ci;
        struct ceph_mds_client *mdsc =
-               &ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
+               ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
        int removed = 0;
 
        dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
@@ -1195,10 +1196,14 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
  * asynchronously back to the MDS once sync writes complete and dirty
  * data is written out.
  *
+ * Unless @again is true, skip cap_snaps that were already sent to
+ * the MDS (i.e., during this session).
+ *
  * Called under i_lock.  Takes s_mutex as needed.
  */
 void __ceph_flush_snaps(struct ceph_inode_info *ci,
-                       struct ceph_mds_session **psession)
+                       struct ceph_mds_session **psession,
+                       int again)
                __releases(ci->vfs_inode->i_lock)
                __acquires(ci->vfs_inode->i_lock)
 {
@@ -1206,7 +1211,7 @@ void __ceph_flush_snaps(struct ceph_inode_info *ci,
        int mds;
        struct ceph_cap_snap *capsnap;
        u32 mseq;
-       struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc;
+       struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
        struct ceph_mds_session *session = NULL; /* if session != NULL, we hold
                                                    session->s_mutex */
        u64 next_follows = 0;  /* keep track of how far we've gotten through the
@@ -1227,7 +1232,7 @@ retry:
                 * pages to be written out.
                 */
                if (capsnap->dirty_pages || capsnap->writing)
-                       continue;
+                       break;
 
                /*
                 * if cap writeback already occurred, we should have dropped
@@ -1240,6 +1245,13 @@ retry:
                        dout("no auth cap (migrating?), doing nothing\n");
                        goto out;
                }
+
+               /* only flush each capsnap once */
+               if (!again && !list_empty(&capsnap->flushing_item)) {
+                       dout("already flushed %p, skipping\n", capsnap);
+                       continue;
+               }
+
                mds = ci->i_auth_cap->session->s_mds;
                mseq = ci->i_auth_cap->mseq;
 
@@ -1276,8 +1288,8 @@ retry:
                              &session->s_cap_snaps_flushing);
                spin_unlock(&inode->i_lock);
 
-               dout("flush_snaps %p cap_snap %p follows %lld size %llu\n",
-                    inode, capsnap, next_follows, capsnap->size);
+               dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n",
+                    inode, capsnap, capsnap->follows, capsnap->flush_tid);
                send_cap_msg(session, ceph_vino(inode).ino, 0,
                             CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0,
                             capsnap->dirty, 0, capsnap->flush_tid, 0, mseq,
@@ -1314,7 +1326,7 @@ static void ceph_flush_snaps(struct ceph_inode_info *ci)
        struct inode *inode = &ci->vfs_inode;
 
        spin_lock(&inode->i_lock);
-       __ceph_flush_snaps(ci, NULL);
+       __ceph_flush_snaps(ci, NULL, 0);
        spin_unlock(&inode->i_lock);
 }
 
@@ -1325,7 +1337,7 @@ static void ceph_flush_snaps(struct ceph_inode_info *ci)
 void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
 {
        struct ceph_mds_client *mdsc =
-               &ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
+               ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
        struct inode *inode = &ci->vfs_inode;
        int was = ci->i_dirty_caps;
        int dirty = 0;
@@ -1367,7 +1379,7 @@ void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
 static int __mark_caps_flushing(struct inode *inode,
                                 struct ceph_mds_session *session)
 {
-       struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc;
+       struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
        struct ceph_inode_info *ci = ceph_inode(inode);
        int flushing;
 
@@ -1405,17 +1417,6 @@ static int __mark_caps_flushing(struct inode *inode,
 /*
  * try to invalidate mapping pages without blocking.
  */
-static int mapping_is_empty(struct address_space *mapping)
-{
-       struct page *page = find_get_page(mapping, 0);
-
-       if (!page)
-               return 1;
-
-       put_page(page);
-       return 0;
-}
-
 static int try_nonblocking_invalidate(struct inode *inode)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
@@ -1425,7 +1426,7 @@ static int try_nonblocking_invalidate(struct inode *inode)
        invalidate_mapping_pages(&inode->i_data, 0, -1);
        spin_lock(&inode->i_lock);
 
-       if (mapping_is_empty(&inode->i_data) &&
+       if (inode->i_data.nrpages == 0 &&
            invalidating_gen == ci->i_rdcache_gen) {
                /* success. */
                dout("try_nonblocking_invalidate %p success\n", inode);
@@ -1451,8 +1452,8 @@ static int try_nonblocking_invalidate(struct inode *inode)
 void ceph_check_caps(struct ceph_inode_info *ci, int flags,
                     struct ceph_mds_session *session)
 {
-       struct ceph_client *client = ceph_inode_to_client(&ci->vfs_inode);
-       struct ceph_mds_client *mdsc = &client->mdsc;
+       struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode);
+       struct ceph_mds_client *mdsc = fsc->mdsc;
        struct inode *inode = &ci->vfs_inode;
        struct ceph_cap *cap;
        int file_wanted, used;
@@ -1477,7 +1478,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
 
        /* flush snaps first time around only */
        if (!list_empty(&ci->i_cap_snaps))
-               __ceph_flush_snaps(ci, &session);
+               __ceph_flush_snaps(ci, &session, 0);
        goto retry_locked;
 retry:
        spin_lock(&inode->i_lock);
@@ -1522,7 +1523,7 @@ retry_locked:
         */
        if ((!is_delayed || mdsc->stopping) &&
            ci->i_wrbuffer_ref == 0 &&               /* no dirty pages... */
-           ci->i_rdcache_gen &&                     /* may have cached pages */
+           inode->i_data.nrpages &&                 /* have cached pages */
            (file_wanted == 0 ||                     /* no open files */
             (revoking & (CEPH_CAP_FILE_CACHE|
                          CEPH_CAP_FILE_LAZYIO))) && /*  or revoking cache */
@@ -1695,7 +1696,7 @@ ack:
 static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session,
                          unsigned *flush_tid)
 {
-       struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc;
+       struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
        struct ceph_inode_info *ci = ceph_inode(inode);
        int unlock_session = session ? 0 : 1;
        int flushing = 0;
@@ -1861,7 +1862,7 @@ int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
                                       caps_are_flushed(inode, flush_tid));
        } else {
                struct ceph_mds_client *mdsc =
-                       &ceph_sb_to_client(inode->i_sb)->mdsc;
+                       ceph_sb_to_client(inode->i_sb)->mdsc;
 
                spin_lock(&inode->i_lock);
                if (__ceph_caps_dirty(ci))
@@ -1894,7 +1895,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
                if (cap && cap->session == session) {
                        dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
                             cap, capsnap);
-                       __ceph_flush_snaps(ci, &session);
+                       __ceph_flush_snaps(ci, &session, 1);
                } else {
                        pr_err("%p auth cap %p not mds%d ???\n", inode,
                               cap, session->s_mds);
@@ -2272,7 +2273,8 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
        int mds = session->s_mds;
-       int seq = le32_to_cpu(grant->seq);
+       unsigned seq = le32_to_cpu(grant->seq);
+       unsigned issue_seq = le32_to_cpu(grant->issue_seq);
        int newcaps = le32_to_cpu(grant->caps);
        int issued, implemented, used, wanted, dirty;
        u64 size = le64_to_cpu(grant->size);
@@ -2284,8 +2286,8 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
        int revoked_rdcache = 0;
        int queue_invalidate = 0;
 
-       dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
-            inode, cap, mds, seq, ceph_cap_string(newcaps));
+       dout("handle_cap_grant inode %p cap %p mds%d seq %u/%u %s\n",
+            inode, cap, mds, seq, issue_seq, ceph_cap_string(newcaps));
        dout(" size %llu max_size %llu, i_size %llu\n", size, max_size,
                inode->i_size);
 
@@ -2381,6 +2383,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
        }
 
        cap->seq = seq;
+       cap->issue_seq = issue_seq;
 
        /* file layout may have changed */
        ci->i_layout = grant->layout;
@@ -2452,7 +2455,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
        __releases(inode->i_lock)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc;
+       struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
        unsigned seq = le32_to_cpu(m->seq);
        int dirty = le32_to_cpu(m->dirty);
        int cleaned = 0;
@@ -2700,7 +2703,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
                      struct ceph_msg *msg)
 {
        struct ceph_mds_client *mdsc = session->s_mdsc;
-       struct super_block *sb = mdsc->client->sb;
+       struct super_block *sb = mdsc->fsc->sb;
        struct inode *inode;
        struct ceph_cap *cap;
        struct ceph_mds_caps *h;
@@ -2763,15 +2766,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
                if (op == CEPH_CAP_OP_IMPORT)
                        __queue_cap_release(session, vino.ino, cap_id,
                                            mseq, seq);
-
-               /*
-                * send any full release message to try to move things
-                * along for the mds (who clearly thinks we still have this
-                * cap).
-                */
-               ceph_add_cap_releases(mdsc, session);
-               ceph_send_cap_releases(mdsc, session);
-               goto done;
+               goto flush_cap_releases;
        }
 
        /* these will work even if we don't have a cap yet */
@@ -2799,7 +2794,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
                dout(" no cap on %p ino %llx.%llx from mds%d\n",
                     inode, ceph_ino(inode), ceph_snap(inode), mds);
                spin_unlock(&inode->i_lock);
-               goto done;
+               goto flush_cap_releases;
        }
 
        /* note that each of these drops i_lock for us */
@@ -2823,6 +2818,17 @@ void ceph_handle_caps(struct ceph_mds_session *session,
                       ceph_cap_op_name(op));
        }
 
+       goto done;
+
+flush_cap_releases:
+       /*
+        * send any full release message to try to move things
+        * along for the mds (who clearly thinks we still have this
+        * cap).
+        */
+       ceph_add_cap_releases(mdsc, session);
+       ceph_send_cap_releases(mdsc, session);
+
 done:
        mutex_unlock(&session->s_mutex);
 done_unlocked:
index ab6cf35c40919843e40f29137500e45a8170e5ab..bdce8b1fbd06794d9de7be918c9ab8aab97bcbd2 100644 (file)
@@ -1,7 +1,8 @@
 /*
  * Ceph 'frag' type
  */
-#include "types.h"
+#include <linux/module.h>
+#include <linux/ceph/types.h>
 
 int ceph_frag_compare(__u32 a, __u32 b)
 {
index 6fd8b20a86112c367c788a20c2f134108acc40e8..7ae1b3d55b58a7b70bf55e79a0788f211b90ff3c 100644 (file)
@@ -1,4 +1,4 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
 
 #include <linux/device.h>
 #include <linux/slab.h>
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
 
+#include <linux/ceph/libceph.h>
+#include <linux/ceph/mon_client.h>
+#include <linux/ceph/auth.h>
+#include <linux/ceph/debugfs.h>
+
 #include "super.h"
-#include "mds_client.h"
-#include "mon_client.h"
-#include "auth.h"
 
 #ifdef CONFIG_DEBUG_FS
 
-/*
- * Implement /sys/kernel/debug/ceph fun
- *
- * /sys/kernel/debug/ceph/client*  - an instance of the ceph client
- *      .../osdmap      - current osdmap
- *      .../mdsmap      - current mdsmap
- *      .../monmap      - current monmap
- *      .../osdc        - active osd requests
- *      .../mdsc        - active mds requests
- *      .../monc        - mon client state
- *      .../dentry_lru  - dump contents of dentry lru
- *      .../caps        - expose cap (reservation) stats
- *      .../bdi         - symlink to ../../bdi/something
- */
-
-static struct dentry *ceph_debugfs_dir;
-
-static int monmap_show(struct seq_file *s, void *p)
-{
-       int i;
-       struct ceph_client *client = s->private;
-
-       if (client->monc.monmap == NULL)
-               return 0;
-
-       seq_printf(s, "epoch %d\n", client->monc.monmap->epoch);
-       for (i = 0; i < client->monc.monmap->num_mon; i++) {
-               struct ceph_entity_inst *inst =
-                       &client->monc.monmap->mon_inst[i];
-
-               seq_printf(s, "\t%s%lld\t%s\n",
-                          ENTITY_NAME(inst->name),
-                          pr_addr(&inst->addr.in_addr));
-       }
-       return 0;
-}
+#include "mds_client.h"
 
 static int mdsmap_show(struct seq_file *s, void *p)
 {
        int i;
-       struct ceph_client *client = s->private;
+       struct ceph_fs_client *fsc = s->private;
 
-       if (client->mdsc.mdsmap == NULL)
+       if (fsc->mdsc == NULL || fsc->mdsc->mdsmap == NULL)
                return 0;
-       seq_printf(s, "epoch %d\n", client->mdsc.mdsmap->m_epoch);
-       seq_printf(s, "root %d\n", client->mdsc.mdsmap->m_root);
+       seq_printf(s, "epoch %d\n", fsc->mdsc->mdsmap->m_epoch);
+       seq_printf(s, "root %d\n", fsc->mdsc->mdsmap->m_root);
        seq_printf(s, "session_timeout %d\n",
-                      client->mdsc.mdsmap->m_session_timeout);
+                      fsc->mdsc->mdsmap->m_session_timeout);
        seq_printf(s, "session_autoclose %d\n",
-                      client->mdsc.mdsmap->m_session_autoclose);
-       for (i = 0; i < client->mdsc.mdsmap->m_max_mds; i++) {
+                      fsc->mdsc->mdsmap->m_session_autoclose);
+       for (i = 0; i < fsc->mdsc->mdsmap->m_max_mds; i++) {
                struct ceph_entity_addr *addr =
-                       &client->mdsc.mdsmap->m_info[i].addr;
-               int state = client->mdsc.mdsmap->m_info[i].state;
+                       &fsc->mdsc->mdsmap->m_info[i].addr;
+               int state = fsc->mdsc->mdsmap->m_info[i].state;
 
-               seq_printf(s, "\tmds%d\t%s\t(%s)\n", i, pr_addr(&addr->in_addr),
+               seq_printf(s, "\tmds%d\t%s\t(%s)\n", i,
+                              ceph_pr_addr(&addr->in_addr),
                               ceph_mds_state_name(state));
        }
        return 0;
 }
 
-static int osdmap_show(struct seq_file *s, void *p)
-{
-       int i;
-       struct ceph_client *client = s->private;
-       struct rb_node *n;
-
-       if (client->osdc.osdmap == NULL)
-               return 0;
-       seq_printf(s, "epoch %d\n", client->osdc.osdmap->epoch);
-       seq_printf(s, "flags%s%s\n",
-                  (client->osdc.osdmap->flags & CEPH_OSDMAP_NEARFULL) ?
-                  " NEARFULL" : "",
-                  (client->osdc.osdmap->flags & CEPH_OSDMAP_FULL) ?
-                  " FULL" : "");
-       for (n = rb_first(&client->osdc.osdmap->pg_pools); n; n = rb_next(n)) {
-               struct ceph_pg_pool_info *pool =
-                       rb_entry(n, struct ceph_pg_pool_info, node);
-               seq_printf(s, "pg_pool %d pg_num %d / %d, lpg_num %d / %d\n",
-                          pool->id, pool->v.pg_num, pool->pg_num_mask,
-                          pool->v.lpg_num, pool->lpg_num_mask);
-       }
-       for (i = 0; i < client->osdc.osdmap->max_osd; i++) {
-               struct ceph_entity_addr *addr =
-                       &client->osdc.osdmap->osd_addr[i];
-               int state = client->osdc.osdmap->osd_state[i];
-               char sb[64];
-
-               seq_printf(s, "\tosd%d\t%s\t%3d%%\t(%s)\n",
-                          i, pr_addr(&addr->in_addr),
-                          ((client->osdc.osdmap->osd_weight[i]*100) >> 16),
-                          ceph_osdmap_state_str(sb, sizeof(sb), state));
-       }
-       return 0;
-}
-
-static int monc_show(struct seq_file *s, void *p)
-{
-       struct ceph_client *client = s->private;
-       struct ceph_mon_generic_request *req;
-       struct ceph_mon_client *monc = &client->monc;
-       struct rb_node *rp;
-
-       mutex_lock(&monc->mutex);
-
-       if (monc->have_mdsmap)
-               seq_printf(s, "have mdsmap %u\n", (unsigned)monc->have_mdsmap);
-       if (monc->have_osdmap)
-               seq_printf(s, "have osdmap %u\n", (unsigned)monc->have_osdmap);
-       if (monc->want_next_osdmap)
-               seq_printf(s, "want next osdmap\n");
-
-       for (rp = rb_first(&monc->generic_request_tree); rp; rp = rb_next(rp)) {
-               __u16 op;
-               req = rb_entry(rp, struct ceph_mon_generic_request, node);
-               op = le16_to_cpu(req->request->hdr.type);
-               if (op == CEPH_MSG_STATFS)
-                       seq_printf(s, "%lld statfs\n", req->tid);
-               else
-                       seq_printf(s, "%lld unknown\n", req->tid);
-       }
-
-       mutex_unlock(&monc->mutex);
-       return 0;
-}
-
+/*
+ * mdsc debugfs
+ */
 static int mdsc_show(struct seq_file *s, void *p)
 {
-       struct ceph_client *client = s->private;
-       struct ceph_mds_client *mdsc = &client->mdsc;
+       struct ceph_fs_client *fsc = s->private;
+       struct ceph_mds_client *mdsc = fsc->mdsc;
        struct ceph_mds_request *req;
        struct rb_node *rp;
        int pathlen;
@@ -214,61 +120,12 @@ static int mdsc_show(struct seq_file *s, void *p)
        return 0;
 }
 
-static int osdc_show(struct seq_file *s, void *pp)
-{
-       struct ceph_client *client = s->private;
-       struct ceph_osd_client *osdc = &client->osdc;
-       struct rb_node *p;
-
-       mutex_lock(&osdc->request_mutex);
-       for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
-               struct ceph_osd_request *req;
-               struct ceph_osd_request_head *head;
-               struct ceph_osd_op *op;
-               int num_ops;
-               int opcode, olen;
-               int i;
-
-               req = rb_entry(p, struct ceph_osd_request, r_node);
-
-               seq_printf(s, "%lld\tosd%d\t%d.%x\t", req->r_tid,
-                          req->r_osd ? req->r_osd->o_osd : -1,
-                          le32_to_cpu(req->r_pgid.pool),
-                          le16_to_cpu(req->r_pgid.ps));
-
-               head = req->r_request->front.iov_base;
-               op = (void *)(head + 1);
-
-               num_ops = le16_to_cpu(head->num_ops);
-               olen = le32_to_cpu(head->object_len);
-               seq_printf(s, "%.*s", olen,
-                          (const char *)(head->ops + num_ops));
-
-               if (req->r_reassert_version.epoch)
-                       seq_printf(s, "\t%u'%llu",
-                          (unsigned)le32_to_cpu(req->r_reassert_version.epoch),
-                          le64_to_cpu(req->r_reassert_version.version));
-               else
-                       seq_printf(s, "\t");
-
-               for (i = 0; i < num_ops; i++) {
-                       opcode = le16_to_cpu(op->op);
-                       seq_printf(s, "\t%s", ceph_osd_op_name(opcode));
-                       op++;
-               }
-
-               seq_printf(s, "\n");
-       }
-       mutex_unlock(&osdc->request_mutex);
-       return 0;
-}
-
 static int caps_show(struct seq_file *s, void *p)
 {
-       struct ceph_client *client = s->private;
+       struct ceph_fs_client *fsc = s->private;
        int total, avail, used, reserved, min;
 
-       ceph_reservation_status(client, &total, &avail, &used, &reserved, &min);
+       ceph_reservation_status(fsc, &total, &avail, &used, &reserved, &min);
        seq_printf(s, "total\t\t%d\n"
                   "avail\t\t%d\n"
                   "used\t\t%d\n"
@@ -280,8 +137,8 @@ static int caps_show(struct seq_file *s, void *p)
 
 static int dentry_lru_show(struct seq_file *s, void *ptr)
 {
-       struct ceph_client *client = s->private;
-       struct ceph_mds_client *mdsc = &client->mdsc;
+       struct ceph_fs_client *fsc = s->private;
+       struct ceph_mds_client *mdsc = fsc->mdsc;
        struct ceph_dentry_info *di;
 
        spin_lock(&mdsc->dentry_lru_lock);
@@ -295,199 +152,124 @@ static int dentry_lru_show(struct seq_file *s, void *ptr)
        return 0;
 }
 
-#define DEFINE_SHOW_FUNC(name)                                         \
-static int name##_open(struct inode *inode, struct file *file)         \
-{                                                                      \
-       struct seq_file *sf;                                            \
-       int ret;                                                        \
-                                                                       \
-       ret = single_open(file, name, NULL);                            \
-       sf = file->private_data;                                        \
-       sf->private = inode->i_private;                                 \
-       return ret;                                                     \
-}                                                                      \
-                                                                       \
-static const struct file_operations name##_fops = {                    \
-       .open           = name##_open,                                  \
-       .read           = seq_read,                                     \
-       .llseek         = seq_lseek,                                    \
-       .release        = single_release,                               \
-};
-
-DEFINE_SHOW_FUNC(monmap_show)
-DEFINE_SHOW_FUNC(mdsmap_show)
-DEFINE_SHOW_FUNC(osdmap_show)
-DEFINE_SHOW_FUNC(monc_show)
-DEFINE_SHOW_FUNC(mdsc_show)
-DEFINE_SHOW_FUNC(osdc_show)
-DEFINE_SHOW_FUNC(dentry_lru_show)
-DEFINE_SHOW_FUNC(caps_show)
+CEPH_DEFINE_SHOW_FUNC(mdsmap_show)
+CEPH_DEFINE_SHOW_FUNC(mdsc_show)
+CEPH_DEFINE_SHOW_FUNC(caps_show)
+CEPH_DEFINE_SHOW_FUNC(dentry_lru_show)
+
 
+/*
+ * debugfs
+ */
 static int congestion_kb_set(void *data, u64 val)
 {
-       struct ceph_client *client = (struct ceph_client *)data;
-
-       if (client)
-               client->mount_args->congestion_kb = (int)val;
+       struct ceph_fs_client *fsc = (struct ceph_fs_client *)data;
 
+       fsc->mount_options->congestion_kb = (int)val;
        return 0;
 }
 
 static int congestion_kb_get(void *data, u64 *val)
 {
-       struct ceph_client *client = (struct ceph_client *)data;
-
-       if (client)
-               *val = (u64)client->mount_args->congestion_kb;
+       struct ceph_fs_client *fsc = (struct ceph_fs_client *)data;
 
+       *val = (u64)fsc->mount_options->congestion_kb;
        return 0;
 }
 
-
 DEFINE_SIMPLE_ATTRIBUTE(congestion_kb_fops, congestion_kb_get,
                        congestion_kb_set, "%llu\n");
 
-int __init ceph_debugfs_init(void)
-{
-       ceph_debugfs_dir = debugfs_create_dir("ceph", NULL);
-       if (!ceph_debugfs_dir)
-               return -ENOMEM;
-       return 0;
-}
 
-void ceph_debugfs_cleanup(void)
+void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc)
 {
-       debugfs_remove(ceph_debugfs_dir);
+       dout("ceph_fs_debugfs_cleanup\n");
+       debugfs_remove(fsc->debugfs_bdi);
+       debugfs_remove(fsc->debugfs_congestion_kb);
+       debugfs_remove(fsc->debugfs_mdsmap);
+       debugfs_remove(fsc->debugfs_caps);
+       debugfs_remove(fsc->debugfs_mdsc);
+       debugfs_remove(fsc->debugfs_dentry_lru);
 }
 
-int ceph_debugfs_client_init(struct ceph_client *client)
+int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
 {
-       int ret = 0;
-       char name[80];
-
-       snprintf(name, sizeof(name), "%pU.client%lld", &client->fsid,
-                client->monc.auth->global_id);
+       char name[100];
+       int err = -ENOMEM;
 
-       client->debugfs_dir = debugfs_create_dir(name, ceph_debugfs_dir);
-       if (!client->debugfs_dir)
-               goto out;
-
-       client->monc.debugfs_file = debugfs_create_file("monc",
-                                                     0600,
-                                                     client->debugfs_dir,
-                                                     client,
-                                                     &monc_show_fops);
-       if (!client->monc.debugfs_file)
+       dout("ceph_fs_debugfs_init\n");
+       fsc->debugfs_congestion_kb =
+               debugfs_create_file("writeback_congestion_kb",
+                                   0600,
+                                   fsc->client->debugfs_dir,
+                                   fsc,
+                                   &congestion_kb_fops);
+       if (!fsc->debugfs_congestion_kb)
                goto out;
 
-       client->mdsc.debugfs_file = debugfs_create_file("mdsc",
-                                                     0600,
-                                                     client->debugfs_dir,
-                                                     client,
-                                                     &mdsc_show_fops);
-       if (!client->mdsc.debugfs_file)
-               goto out;
+       dout("a\n");
 
-       client->osdc.debugfs_file = debugfs_create_file("osdc",
-                                                     0600,
-                                                     client->debugfs_dir,
-                                                     client,
-                                                     &osdc_show_fops);
-       if (!client->osdc.debugfs_file)
+       snprintf(name, sizeof(name), "../../bdi/%s",
+                dev_name(fsc->backing_dev_info.dev));
+       fsc->debugfs_bdi =
+               debugfs_create_symlink("bdi",
+                                      fsc->client->debugfs_dir,
+                                      name);
+       if (!fsc->debugfs_bdi)
                goto out;
 
-       client->debugfs_monmap = debugfs_create_file("monmap",
+       dout("b\n");
+       fsc->debugfs_mdsmap = debugfs_create_file("mdsmap",
                                        0600,
-                                       client->debugfs_dir,
-                                       client,
-                                       &monmap_show_fops);
-       if (!client->debugfs_monmap)
-               goto out;
-
-       client->debugfs_mdsmap = debugfs_create_file("mdsmap",
-                                       0600,
-                                       client->debugfs_dir,
-                                       client,
+                                       fsc->client->debugfs_dir,
+                                       fsc,
                                        &mdsmap_show_fops);
-       if (!client->debugfs_mdsmap)
-               goto out;
-
-       client->debugfs_osdmap = debugfs_create_file("osdmap",
-                                       0600,
-                                       client->debugfs_dir,
-                                       client,
-                                       &osdmap_show_fops);
-       if (!client->debugfs_osdmap)
+       if (!fsc->debugfs_mdsmap)
                goto out;
 
-       client->debugfs_dentry_lru = debugfs_create_file("dentry_lru",
-                                       0600,
-                                       client->debugfs_dir,
-                                       client,
-                                       &dentry_lru_show_fops);
-       if (!client->debugfs_dentry_lru)
+       dout("ca\n");
+       fsc->debugfs_mdsc = debugfs_create_file("mdsc",
+                                               0600,
+                                               fsc->client->debugfs_dir,
+                                               fsc,
+                                               &mdsc_show_fops);
+       if (!fsc->debugfs_mdsc)
                goto out;
 
-       client->debugfs_caps = debugfs_create_file("caps",
+       dout("da\n");
+       fsc->debugfs_caps = debugfs_create_file("caps",
                                                   0400,
-                                                  client->debugfs_dir,
-                                                  client,
+                                                  fsc->client->debugfs_dir,
+                                                  fsc,
                                                   &caps_show_fops);
-       if (!client->debugfs_caps)
+       if (!fsc->debugfs_caps)
                goto out;
 
-       client->debugfs_congestion_kb =
-               debugfs_create_file("writeback_congestion_kb",
-                                   0600,
-                                   client->debugfs_dir,
-                                   client,
-                                   &congestion_kb_fops);
-       if (!client->debugfs_congestion_kb)
+       dout("ea\n");
+       fsc->debugfs_dentry_lru = debugfs_create_file("dentry_lru",
+                                       0600,
+                                       fsc->client->debugfs_dir,
+                                       fsc,
+                                       &dentry_lru_show_fops);
+       if (!fsc->debugfs_dentry_lru)
                goto out;
 
-       sprintf(name, "../../bdi/%s", dev_name(client->sb->s_bdi->dev));
-       client->debugfs_bdi = debugfs_create_symlink("bdi", client->debugfs_dir,
-                                                    name);
-
        return 0;
 
 out:
-       ceph_debugfs_client_cleanup(client);
-       return ret;
+       ceph_fs_debugfs_cleanup(fsc);
+       return err;
 }
 
-void ceph_debugfs_client_cleanup(struct ceph_client *client)
-{
-       debugfs_remove(client->debugfs_bdi);
-       debugfs_remove(client->debugfs_caps);
-       debugfs_remove(client->debugfs_dentry_lru);
-       debugfs_remove(client->debugfs_osdmap);
-       debugfs_remove(client->debugfs_mdsmap);
-       debugfs_remove(client->debugfs_monmap);
-       debugfs_remove(client->osdc.debugfs_file);
-       debugfs_remove(client->mdsc.debugfs_file);
-       debugfs_remove(client->monc.debugfs_file);
-       debugfs_remove(client->debugfs_congestion_kb);
-       debugfs_remove(client->debugfs_dir);
-}
 
 #else  /* CONFIG_DEBUG_FS */
 
-int __init ceph_debugfs_init(void)
-{
-       return 0;
-}
-
-void ceph_debugfs_cleanup(void)
-{
-}
-
-int ceph_debugfs_client_init(struct ceph_client *client)
+int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
 {
        return 0;
 }
 
-void ceph_debugfs_client_cleanup(struct ceph_client *client)
+void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc)
 {
 }
 
index 6e4f43ff23ec587050eab1b0e735e8d519827c85..e0a2dc6fcafcb62266909c5ec71329e58083d1d7 100644 (file)
@@ -1,4 +1,4 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
 
 #include <linux/spinlock.h>
 #include <linux/fs_struct.h>
@@ -7,6 +7,7 @@
 #include <linux/sched.h>
 
 #include "super.h"
+#include "mds_client.h"
 
 /*
  * Directory operations: readdir, lookup, create, link, unlink,
@@ -94,10 +95,7 @@ static unsigned fpos_off(loff_t p)
  */
 static int __dcache_readdir(struct file *filp,
                            void *dirent, filldir_t filldir)
-               __releases(inode->i_lock)
-               __acquires(inode->i_lock)
 {
-       struct inode *inode = filp->f_dentry->d_inode;
        struct ceph_file_info *fi = filp->private_data;
        struct dentry *parent = filp->f_dentry;
        struct inode *dir = parent->d_inode;
@@ -153,7 +151,6 @@ more:
 
        atomic_inc(&dentry->d_count);
        spin_unlock(&dcache_lock);
-       spin_unlock(&inode->i_lock);
 
        dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos,
             dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
@@ -171,35 +168,30 @@ more:
                } else {
                        dput(last);
                }
-               last = NULL;
        }
-
-       spin_lock(&inode->i_lock);
-       spin_lock(&dcache_lock);
-
        last = dentry;
 
        if (err < 0)
-               goto out_unlock;
+               goto out;
 
-       p = p->prev;
        filp->f_pos++;
 
        /* make sure a dentry wasn't dropped while we didn't have dcache_lock */
-       if ((ceph_inode(dir)->i_ceph_flags & CEPH_I_COMPLETE))
-               goto more;
-       dout(" lost I_COMPLETE on %p; falling back to mds\n", dir);
-       err = -EAGAIN;
+       if (!ceph_i_test(dir, CEPH_I_COMPLETE)) {
+               dout(" lost I_COMPLETE on %p; falling back to mds\n", dir);
+               err = -EAGAIN;
+               goto out;
+       }
+
+       spin_lock(&dcache_lock);
+       p = p->prev;    /* advance to next dentry */
+       goto more;
 
 out_unlock:
        spin_unlock(&dcache_lock);
-
-       if (last) {
-               spin_unlock(&inode->i_lock);
+out:
+       if (last)
                dput(last);
-               spin_lock(&inode->i_lock);
-       }
-
        return err;
 }
 
@@ -227,15 +219,15 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
        struct ceph_file_info *fi = filp->private_data;
        struct inode *inode = filp->f_dentry->d_inode;
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_client *client = ceph_inode_to_client(inode);
-       struct ceph_mds_client *mdsc = &client->mdsc;
+       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+       struct ceph_mds_client *mdsc = fsc->mdsc;
        unsigned frag = fpos_frag(filp->f_pos);
        int off = fpos_off(filp->f_pos);
        int err;
        u32 ftype;
        struct ceph_mds_reply_info_parsed *rinfo;
-       const int max_entries = client->mount_args->max_readdir;
-       const int max_bytes = client->mount_args->max_readdir_bytes;
+       const int max_entries = fsc->mount_options->max_readdir;
+       const int max_bytes = fsc->mount_options->max_readdir_bytes;
 
        dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off);
        if (fi->at_end)
@@ -267,17 +259,17 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
        /* can we use the dcache? */
        spin_lock(&inode->i_lock);
        if ((filp->f_pos == 2 || fi->dentry) &&
-           !ceph_test_opt(client, NOASYNCREADDIR) &&
+           !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
            ceph_snap(inode) != CEPH_SNAPDIR &&
            (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
            __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
+               spin_unlock(&inode->i_lock);
                err = __dcache_readdir(filp, dirent, filldir);
-               if (err != -EAGAIN) {
-                       spin_unlock(&inode->i_lock);
+               if (err != -EAGAIN)
                        return err;
-               }
+       } else {
+               spin_unlock(&inode->i_lock);
        }
-       spin_unlock(&inode->i_lock);
        if (fi->dentry) {
                err = note_last_dentry(fi, fi->dentry->d_name.name,
                                       fi->dentry->d_name.len);
@@ -487,14 +479,13 @@ static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int origin)
 struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
                                  struct dentry *dentry, int err)
 {
-       struct ceph_client *client = ceph_sb_to_client(dentry->d_sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
        struct inode *parent = dentry->d_parent->d_inode;
 
        /* .snap dir? */
        if (err == -ENOENT &&
-           ceph_vino(parent).ino != CEPH_INO_ROOT && /* no .snap in root dir */
            strcmp(dentry->d_name.name,
-                  client->mount_args->snapdir_name) == 0) {
+                  fsc->mount_options->snapdir_name) == 0) {
                struct inode *inode = ceph_get_snapdir(parent);
                dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
                     dentry, dentry->d_name.len, dentry->d_name.name, inode);
@@ -539,8 +530,8 @@ static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
 static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
                                  struct nameidata *nd)
 {
-       struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
-       struct ceph_mds_client *mdsc = &client->mdsc;
+       struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
+       struct ceph_mds_client *mdsc = fsc->mdsc;
        struct ceph_mds_request *req;
        int op;
        int err;
@@ -572,7 +563,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
                spin_lock(&dir->i_lock);
                dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
                if (strncmp(dentry->d_name.name,
-                           client->mount_args->snapdir_name,
+                           fsc->mount_options->snapdir_name,
                            dentry->d_name.len) &&
                    !is_root_ceph_dentry(dir, dentry) &&
                    (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
@@ -629,8 +620,8 @@ int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
 static int ceph_mknod(struct inode *dir, struct dentry *dentry,
                      int mode, dev_t rdev)
 {
-       struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
-       struct ceph_mds_client *mdsc = &client->mdsc;
+       struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
+       struct ceph_mds_client *mdsc = fsc->mdsc;
        struct ceph_mds_request *req;
        int err;
 
@@ -685,8 +676,8 @@ static int ceph_create(struct inode *dir, struct dentry *dentry, int mode,
 static int ceph_symlink(struct inode *dir, struct dentry *dentry,
                            const char *dest)
 {
-       struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
-       struct ceph_mds_client *mdsc = &client->mdsc;
+       struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
+       struct ceph_mds_client *mdsc = fsc->mdsc;
        struct ceph_mds_request *req;
        int err;
 
@@ -716,8 +707,8 @@ static int ceph_symlink(struct inode *dir, struct dentry *dentry,
 
 static int ceph_mkdir(struct inode *dir, struct dentry *dentry, int mode)
 {
-       struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
-       struct ceph_mds_client *mdsc = &client->mdsc;
+       struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
+       struct ceph_mds_client *mdsc = fsc->mdsc;
        struct ceph_mds_request *req;
        int err = -EROFS;
        int op;
@@ -758,8 +749,8 @@ out:
 static int ceph_link(struct dentry *old_dentry, struct inode *dir,
                     struct dentry *dentry)
 {
-       struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
-       struct ceph_mds_client *mdsc = &client->mdsc;
+       struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
+       struct ceph_mds_client *mdsc = fsc->mdsc;
        struct ceph_mds_request *req;
        int err;
 
@@ -813,8 +804,8 @@ static int drop_caps_for_unlink(struct inode *inode)
  */
 static int ceph_unlink(struct inode *dir, struct dentry *dentry)
 {
-       struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
-       struct ceph_mds_client *mdsc = &client->mdsc;
+       struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
+       struct ceph_mds_client *mdsc = fsc->mdsc;
        struct inode *inode = dentry->d_inode;
        struct ceph_mds_request *req;
        int err = -EROFS;
@@ -854,8 +845,8 @@ out:
 static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
                       struct inode *new_dir, struct dentry *new_dentry)
 {
-       struct ceph_client *client = ceph_sb_to_client(old_dir->i_sb);
-       struct ceph_mds_client *mdsc = &client->mdsc;
+       struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
+       struct ceph_mds_client *mdsc = fsc->mdsc;
        struct ceph_mds_request *req;
        int err;
 
@@ -1021,11 +1012,15 @@ out_touch:
 static void ceph_dentry_release(struct dentry *dentry)
 {
        struct ceph_dentry_info *di = ceph_dentry(dentry);
-       struct inode *parent_inode = dentry->d_parent->d_inode;
-       u64 snapid = ceph_snap(parent_inode);
+       struct inode *parent_inode = NULL;
+       u64 snapid = CEPH_NOSNAP;
 
+       if (!IS_ROOT(dentry)) {
+               parent_inode = dentry->d_parent->d_inode;
+               if (parent_inode)
+                       snapid = ceph_snap(parent_inode);
+       }
        dout("dentry_release %p parent %p\n", dentry, parent_inode);
-
        if (parent_inode && snapid != CEPH_SNAPDIR) {
                struct ceph_inode_info *ci = ceph_inode(parent_inode);
 
@@ -1072,7 +1067,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
        struct ceph_inode_info *ci = ceph_inode(inode);
        int left;
 
-       if (!ceph_test_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
+       if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
                return -EISDIR;
 
        if (!cf->dir_info) {
@@ -1173,7 +1168,7 @@ void ceph_dentry_lru_add(struct dentry *dn)
        dout("dentry_lru_add %p %p '%.*s'\n", di, dn,
             dn->d_name.len, dn->d_name.name);
        if (di) {
-               mdsc = &ceph_sb_to_client(dn->d_sb)->mdsc;
+               mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
                spin_lock(&mdsc->dentry_lru_lock);
                list_add_tail(&di->lru, &mdsc->dentry_lru);
                mdsc->num_dentry++;
@@ -1189,7 +1184,7 @@ void ceph_dentry_lru_touch(struct dentry *dn)
        dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn,
             dn->d_name.len, dn->d_name.name, di->offset);
        if (di) {
-               mdsc = &ceph_sb_to_client(dn->d_sb)->mdsc;
+               mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
                spin_lock(&mdsc->dentry_lru_lock);
                list_move_tail(&di->lru, &mdsc->dentry_lru);
                spin_unlock(&mdsc->dentry_lru_lock);
@@ -1204,7 +1199,7 @@ void ceph_dentry_lru_del(struct dentry *dn)
        dout("dentry_lru_del %p %p '%.*s'\n", di, dn,
             dn->d_name.len, dn->d_name.name);
        if (di) {
-               mdsc = &ceph_sb_to_client(dn->d_sb)->mdsc;
+               mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
                spin_lock(&mdsc->dentry_lru_lock);
                list_del_init(&di->lru);
                mdsc->num_dentry--;
index 4480cb1c63e7c69b107628481388cf1e35f49b8d..2297d9426992b0132b991e0d4e129982c4ab4a01 100644 (file)
@@ -1,10 +1,11 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
 
 #include <linux/exportfs.h>
 #include <linux/slab.h>
 #include <asm/unaligned.h>
 
 #include "super.h"
+#include "mds_client.h"
 
 /*
  * NFS export support
@@ -42,32 +43,37 @@ struct ceph_nfs_confh {
 static int ceph_encode_fh(struct dentry *dentry, u32 *rawfh, int *max_len,
                          int connectable)
 {
+       int type;
        struct ceph_nfs_fh *fh = (void *)rawfh;
        struct ceph_nfs_confh *cfh = (void *)rawfh;
        struct dentry *parent = dentry->d_parent;
        struct inode *inode = dentry->d_inode;
-       int type;
+       int connected_handle_length = sizeof(*cfh)/4;
+       int handle_length = sizeof(*fh)/4;
 
        /* don't re-export snaps */
        if (ceph_snap(inode) != CEPH_NOSNAP)
                return -EINVAL;
 
-       if (*max_len >= sizeof(*cfh)) {
+       if (*max_len >= connected_handle_length) {
                dout("encode_fh %p connectable\n", dentry);
                cfh->ino = ceph_ino(dentry->d_inode);
                cfh->parent_ino = ceph_ino(parent->d_inode);
                cfh->parent_name_hash = parent->d_name.hash;
-               *max_len = sizeof(*cfh);
+               *max_len = connected_handle_length;
                type = 2;
-       } else if (*max_len > sizeof(*fh)) {
-               if (connectable)
-                       return -ENOSPC;
+       } else if (*max_len >= handle_length) {
+               if (connectable) {
+                       *max_len = connected_handle_length;
+                       return 255;
+               }
                dout("encode_fh %p\n", dentry);
                fh->ino = ceph_ino(dentry->d_inode);
-               *max_len = sizeof(*fh);
+               *max_len = handle_length;
                type = 1;
        } else {
-               return -ENOSPC;
+               *max_len = handle_length;
+               return 255;
        }
        return type;
 }
@@ -115,7 +121,7 @@ static struct dentry *__fh_to_dentry(struct super_block *sb,
 static struct dentry *__cfh_to_dentry(struct super_block *sb,
                                      struct ceph_nfs_confh *cfh)
 {
-       struct ceph_mds_client *mdsc = &ceph_sb_to_client(sb)->mdsc;
+       struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
        struct inode *inode;
        struct dentry *dentry;
        struct ceph_vino vino;
index 8c044a4f045751c62420e67664705f981efe238a..e77c28cf369059112dd06ee3b7c78c5146f0ffdc 100644 (file)
@@ -1,5 +1,6 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
 
+#include <linux/module.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/file.h>
@@ -38,8 +39,8 @@
 static struct ceph_mds_request *
 prepare_open_request(struct super_block *sb, int flags, int create_mode)
 {
-       struct ceph_client *client = ceph_sb_to_client(sb);
-       struct ceph_mds_client *mdsc = &client->mdsc;
+       struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
+       struct ceph_mds_client *mdsc = fsc->mdsc;
        struct ceph_mds_request *req;
        int want_auth = USE_ANY_MDS;
        int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
@@ -117,8 +118,8 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
 int ceph_open(struct inode *inode, struct file *file)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_client *client = ceph_sb_to_client(inode->i_sb);
-       struct ceph_mds_client *mdsc = &client->mdsc;
+       struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
+       struct ceph_mds_client *mdsc = fsc->mdsc;
        struct ceph_mds_request *req;
        struct ceph_file_info *cf = file->private_data;
        struct inode *parent_inode = file->f_dentry->d_parent->d_inode;
@@ -216,8 +217,8 @@ struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry,
                                struct nameidata *nd, int mode,
                                int locked_dir)
 {
-       struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
-       struct ceph_mds_client *mdsc = &client->mdsc;
+       struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
+       struct ceph_mds_client *mdsc = fsc->mdsc;
        struct file *file = nd->intent.open.file;
        struct inode *parent_inode = get_dentry_parent_inode(file->f_dentry);
        struct ceph_mds_request *req;
@@ -269,163 +270,6 @@ int ceph_release(struct inode *inode, struct file *file)
        return 0;
 }
 
-/*
- * build a vector of user pages
- */
-static struct page **get_direct_page_vector(const char __user *data,
-                                           int num_pages,
-                                           loff_t off, size_t len)
-{
-       struct page **pages;
-       int rc;
-
-       pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
-       if (!pages)
-               return ERR_PTR(-ENOMEM);
-
-       down_read(&current->mm->mmap_sem);
-       rc = get_user_pages(current, current->mm, (unsigned long)data,
-                           num_pages, 0, 0, pages, NULL);
-       up_read(&current->mm->mmap_sem);
-       if (rc < 0)
-               goto fail;
-       return pages;
-
-fail:
-       kfree(pages);
-       return ERR_PTR(rc);
-}
-
-static void put_page_vector(struct page **pages, int num_pages)
-{
-       int i;
-
-       for (i = 0; i < num_pages; i++)
-               put_page(pages[i]);
-       kfree(pages);
-}
-
-void ceph_release_page_vector(struct page **pages, int num_pages)
-{
-       int i;
-
-       for (i = 0; i < num_pages; i++)
-               __free_pages(pages[i], 0);
-       kfree(pages);
-}
-
-/*
- * allocate a vector new pages
- */
-static struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags)
-{
-       struct page **pages;
-       int i;
-
-       pages = kmalloc(sizeof(*pages) * num_pages, flags);
-       if (!pages)
-               return ERR_PTR(-ENOMEM);
-       for (i = 0; i < num_pages; i++) {
-               pages[i] = __page_cache_alloc(flags);
-               if (pages[i] == NULL) {
-                       ceph_release_page_vector(pages, i);
-                       return ERR_PTR(-ENOMEM);
-               }
-       }
-       return pages;
-}
-
-/*
- * copy user data into a page vector
- */
-static int copy_user_to_page_vector(struct page **pages,
-                                   const char __user *data,
-                                   loff_t off, size_t len)
-{
-       int i = 0;
-       int po = off & ~PAGE_CACHE_MASK;
-       int left = len;
-       int l, bad;
-
-       while (left > 0) {
-               l = min_t(int, PAGE_CACHE_SIZE-po, left);
-               bad = copy_from_user(page_address(pages[i]) + po, data, l);
-               if (bad == l)
-                       return -EFAULT;
-               data += l - bad;
-               left -= l - bad;
-               po += l - bad;
-               if (po == PAGE_CACHE_SIZE) {
-                       po = 0;
-                       i++;
-               }
-       }
-       return len;
-}
-
-/*
- * copy user data from a page vector into a user pointer
- */
-static int copy_page_vector_to_user(struct page **pages, char __user *data,
-                                   loff_t off, size_t len)
-{
-       int i = 0;
-       int po = off & ~PAGE_CACHE_MASK;
-       int left = len;
-       int l, bad;
-
-       while (left > 0) {
-               l = min_t(int, left, PAGE_CACHE_SIZE-po);
-               bad = copy_to_user(data, page_address(pages[i]) + po, l);
-               if (bad == l)
-                       return -EFAULT;
-               data += l - bad;
-               left -= l - bad;
-               if (po) {
-                       po += l - bad;
-                       if (po == PAGE_CACHE_SIZE)
-                               po = 0;
-               }
-               i++;
-       }
-       return len;
-}
-
-/*
- * Zero an extent within a page vector.  Offset is relative to the
- * start of the first page.
- */
-static void zero_page_vector_range(int off, int len, struct page **pages)
-{
-       int i = off >> PAGE_CACHE_SHIFT;
-
-       off &= ~PAGE_CACHE_MASK;
-
-       dout("zero_page_vector_page %u~%u\n", off, len);
-
-       /* leading partial page? */
-       if (off) {
-               int end = min((int)PAGE_CACHE_SIZE, off + len);
-               dout("zeroing %d %p head from %d\n", i, pages[i],
-                    (int)off);
-               zero_user_segment(pages[i], off, end);
-               len -= (end - off);
-               i++;
-       }
-       while (len >= PAGE_CACHE_SIZE) {
-               dout("zeroing %d %p len=%d\n", i, pages[i], len);
-               zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
-               len -= PAGE_CACHE_SIZE;
-               i++;
-       }
-       /* trailing partial page? */
-       if (len) {
-               dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len);
-               zero_user_segment(pages[i], 0, len);
-       }
-}
-
-
 /*
  * Read a range of bytes striped over one or more objects.  Iterate over
  * objects we stripe over.  (That's not atomic, but good enough for now.)
@@ -438,7 +282,7 @@ static int striped_read(struct inode *inode,
                        struct page **pages, int num_pages,
                        int *checkeof)
 {
-       struct ceph_client *client = ceph_inode_to_client(inode);
+       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
        struct ceph_inode_info *ci = ceph_inode(inode);
        u64 pos, this_len;
        int page_off = off & ~PAGE_CACHE_MASK; /* first byte's offset in page */
@@ -459,7 +303,7 @@ static int striped_read(struct inode *inode,
 
 more:
        this_len = left;
-       ret = ceph_osdc_readpages(&client->osdc, ceph_vino(inode),
+       ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
                                  &ci->i_layout, pos, &this_len,
                                  ci->i_truncate_seq,
                                  ci->i_truncate_size,
@@ -477,8 +321,8 @@ more:
 
                if (read < pos - off) {
                        dout(" zero gap %llu to %llu\n", off + read, pos);
-                       zero_page_vector_range(page_off + read,
-                                              pos - off - read, pages);
+                       ceph_zero_page_vector_range(page_off + read,
+                                                   pos - off - read, pages);
                }
                pos += ret;
                read = pos - off;
@@ -495,8 +339,8 @@ more:
                /* was original extent fully inside i_size? */
                if (pos + left <= inode->i_size) {
                        dout("zero tail\n");
-                       zero_page_vector_range(page_off + read, len - read,
-                                              pages);
+                       ceph_zero_page_vector_range(page_off + read, len - read,
+                                                   pages);
                        read = len;
                        goto out;
                }
@@ -531,7 +375,7 @@ static ssize_t ceph_sync_read(struct file *file, char __user *data,
             (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
 
        if (file->f_flags & O_DIRECT) {
-               pages = get_direct_page_vector(data, num_pages, off, len);
+               pages = ceph_get_direct_page_vector(data, num_pages, off, len);
 
                /*
                 * flush any page cache pages in this range.  this
@@ -552,13 +396,13 @@ static ssize_t ceph_sync_read(struct file *file, char __user *data,
        ret = striped_read(inode, off, len, pages, num_pages, checkeof);
 
        if (ret >= 0 && (file->f_flags & O_DIRECT) == 0)
-               ret = copy_page_vector_to_user(pages, data, off, ret);
+               ret = ceph_copy_page_vector_to_user(pages, data, off, ret);
        if (ret >= 0)
                *poff = off + ret;
 
 done:
        if (file->f_flags & O_DIRECT)
-               put_page_vector(pages, num_pages);
+               ceph_put_page_vector(pages, num_pages);
        else
                ceph_release_page_vector(pages, num_pages);
        dout("sync_read result %d\n", ret);
@@ -594,7 +438,7 @@ static ssize_t ceph_sync_write(struct file *file, const char __user *data,
 {
        struct inode *inode = file->f_dentry->d_inode;
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_client *client = ceph_inode_to_client(inode);
+       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
        struct ceph_osd_request *req;
        struct page **pages;
        int num_pages;
@@ -642,7 +486,7 @@ static ssize_t ceph_sync_write(struct file *file, const char __user *data,
         */
 more:
        len = left;
-       req = ceph_osdc_new_request(&client->osdc, &ci->i_layout,
+       req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
                                    ceph_vino(inode), pos, &len,
                                    CEPH_OSD_OP_WRITE, flags,
                                    ci->i_snap_realm->cached_context,
@@ -655,7 +499,7 @@ more:
        num_pages = calc_pages_for(pos, len);
 
        if (file->f_flags & O_DIRECT) {
-               pages = get_direct_page_vector(data, num_pages, pos, len);
+               pages = ceph_get_direct_page_vector(data, num_pages, pos, len);
                if (IS_ERR(pages)) {
                        ret = PTR_ERR(pages);
                        goto out;
@@ -673,7 +517,7 @@ more:
                        ret = PTR_ERR(pages);
                        goto out;
                }
-               ret = copy_user_to_page_vector(pages, data, pos, len);
+               ret = ceph_copy_user_to_page_vector(pages, data, pos, len);
                if (ret < 0) {
                        ceph_release_page_vector(pages, num_pages);
                        goto out;
@@ -689,7 +533,7 @@ more:
        req->r_num_pages = num_pages;
        req->r_inode = inode;
 
-       ret = ceph_osdc_start_request(&client->osdc, req, false);
+       ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
        if (!ret) {
                if (req->r_safe_callback) {
                        /*
@@ -697,15 +541,15 @@ more:
                         * start_request so that a tid has been assigned.
                         */
                        spin_lock(&ci->i_unsafe_lock);
-                       list_add(&ci->i_unsafe_writes, &req->r_unsafe_item);
+                       list_add(&req->r_unsafe_item, &ci->i_unsafe_writes);
                        spin_unlock(&ci->i_unsafe_lock);
                        ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
                }
-               ret = ceph_osdc_wait_request(&client->osdc, req);
+               ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
        }
 
        if (file->f_flags & O_DIRECT)
-               put_page_vector(pages, num_pages);
+               ceph_put_page_vector(pages, num_pages);
        else if (file->f_flags & O_SYNC)
                ceph_release_page_vector(pages, num_pages);
 
@@ -814,7 +658,8 @@ static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
        struct ceph_file_info *fi = file->private_data;
        struct inode *inode = file->f_dentry->d_inode;
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_osd_client *osdc = &ceph_sb_to_client(inode->i_sb)->osdc;
+       struct ceph_osd_client *osdc =
+               &ceph_sb_to_client(inode->i_sb)->client->osdc;
        loff_t endoff = pos + iov->iov_len;
        int want, got = 0;
        int ret, err;
index e7cca414da03bcbd7549889a5ecb00d05ee11901..1d6a45b5a04c696591879d141165627746d6a476 100644 (file)
@@ -1,4 +1,4 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
 
 #include <linux/module.h>
 #include <linux/fs.h>
@@ -13,7 +13,8 @@
 #include <linux/pagevec.h>
 
 #include "super.h"
-#include "decode.h"
+#include "mds_client.h"
+#include <linux/ceph/decode.h>
 
 /*
  * Ceph inode operations
@@ -384,7 +385,7 @@ void ceph_destroy_inode(struct inode *inode)
         */
        if (ci->i_snap_realm) {
                struct ceph_mds_client *mdsc =
-                       &ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
+                       ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
                struct ceph_snap_realm *realm = ci->i_snap_realm;
 
                dout(" dropping residual ref to snap realm %p\n", realm);
@@ -685,7 +686,7 @@ static int fill_inode(struct inode *inode,
                }
 
                /* it may be better to set st_size in getattr instead? */
-               if (ceph_test_opt(ceph_sb_to_client(inode->i_sb), RBYTES))
+               if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), RBYTES))
                        inode->i_size = ci->i_rbytes;
                break;
        default:
@@ -845,7 +846,7 @@ static void ceph_set_dentry_offset(struct dentry *dn)
  * the caller) if we fail.
  */
 static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
-                                   bool *prehash)
+                                   bool *prehash, bool set_offset)
 {
        struct dentry *realdn;
 
@@ -877,7 +878,8 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
        }
        if ((!prehash || *prehash) && d_unhashed(dn))
                d_rehash(dn);
-       ceph_set_dentry_offset(dn);
+       if (set_offset)
+               ceph_set_dentry_offset(dn);
 out:
        return dn;
 }
@@ -900,7 +902,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
        struct inode *in = NULL;
        struct ceph_mds_reply_inode *ininfo;
        struct ceph_vino vino;
-       struct ceph_client *client = ceph_sb_to_client(sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
        int i = 0;
        int err = 0;
 
@@ -964,7 +966,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
         */
        if (rinfo->head->is_dentry && !req->r_aborted &&
            (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
-                                              client->mount_args->snapdir_name,
+                                              fsc->mount_options->snapdir_name,
                                               req->r_dentry->d_name.len))) {
                /*
                 * lookup link rename   : null -> possibly existing inode
@@ -1062,7 +1064,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
                                d_delete(dn);
                                goto done;
                        }
-                       dn = splice_dentry(dn, in, &have_lease);
+                       dn = splice_dentry(dn, in, &have_lease, true);
                        if (IS_ERR(dn)) {
                                err = PTR_ERR(dn);
                                goto done;
@@ -1105,7 +1107,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
                        goto done;
                }
                dout(" linking snapped dir %p to dn %p\n", in, dn);
-               dn = splice_dentry(dn, in, NULL);
+               dn = splice_dentry(dn, in, NULL, true);
                if (IS_ERR(dn)) {
                        err = PTR_ERR(dn);
                        goto done;
@@ -1237,7 +1239,7 @@ retry_lookup:
                                err = PTR_ERR(in);
                                goto out;
                        }
-                       dn = splice_dentry(dn, in, NULL);
+                       dn = splice_dentry(dn, in, NULL, false);
                        if (IS_ERR(dn))
                                dn = NULL;
                }
@@ -1532,7 +1534,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
        struct inode *parent_inode = dentry->d_parent->d_inode;
        const unsigned int ia_valid = attr->ia_valid;
        struct ceph_mds_request *req;
-       struct ceph_mds_client *mdsc = &ceph_sb_to_client(dentry->d_sb)->mdsc;
+       struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
        int issued;
        int release = 0, dirtied = 0;
        int mask = 0;
@@ -1727,8 +1729,8 @@ out:
  */
 int ceph_do_getattr(struct inode *inode, int mask)
 {
-       struct ceph_client *client = ceph_sb_to_client(inode->i_sb);
-       struct ceph_mds_client *mdsc = &client->mdsc;
+       struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
+       struct ceph_mds_client *mdsc = fsc->mdsc;
        struct ceph_mds_request *req;
        int err;
 
index 76e307d2aba16868c9b1e8136496d929e0cd971f..8888c9ba68dbfec194e06f06142547ee2d35c8bc 100644 (file)
@@ -1,8 +1,10 @@
 #include <linux/in.h>
 
-#include "ioctl.h"
 #include "super.h"
-#include "ceph_debug.h"
+#include "mds_client.h"
+#include <linux/ceph/ceph_debug.h>
+
+#include "ioctl.h"
 
 
 /*
@@ -37,7 +39,7 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
 {
        struct inode *inode = file->f_dentry->d_inode;
        struct inode *parent_inode = file->f_dentry->d_parent->d_inode;
-       struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc;
+       struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
        struct ceph_mds_request *req;
        struct ceph_ioctl_layout l;
        int err, i;
@@ -89,6 +91,68 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
        return err;
 }
 
+/*
+ * Set a layout policy on a directory inode. All items in the tree
+ * rooted at this inode will inherit this layout on creation,
+ * (It doesn't apply retroactively )
+ * unless a subdirectory has its own layout policy.
+ */
+static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg)
+{
+       struct inode *inode = file->f_dentry->d_inode;
+       struct ceph_mds_request *req;
+       struct ceph_ioctl_layout l;
+       int err, i;
+       struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+
+       /* copy and validate */
+       if (copy_from_user(&l, arg, sizeof(l)))
+               return -EFAULT;
+
+       if ((l.object_size & ~PAGE_MASK) ||
+           (l.stripe_unit & ~PAGE_MASK) ||
+           !l.stripe_unit ||
+           (l.object_size &&
+               (unsigned)l.object_size % (unsigned)l.stripe_unit))
+               return -EINVAL;
+
+       /* make sure it's a valid data pool */
+       if (l.data_pool > 0) {
+               mutex_lock(&mdsc->mutex);
+               err = -EINVAL;
+               for (i = 0; i < mdsc->mdsmap->m_num_data_pg_pools; i++)
+                       if (mdsc->mdsmap->m_data_pg_pools[i] == l.data_pool) {
+                               err = 0;
+                               break;
+                       }
+               mutex_unlock(&mdsc->mutex);
+               if (err)
+                       return err;
+       }
+
+       req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETDIRLAYOUT,
+                                      USE_AUTH_MDS);
+
+       if (IS_ERR(req))
+               return PTR_ERR(req);
+       req->r_inode = igrab(inode);
+
+       req->r_args.setlayout.layout.fl_stripe_unit =
+                       cpu_to_le32(l.stripe_unit);
+       req->r_args.setlayout.layout.fl_stripe_count =
+                       cpu_to_le32(l.stripe_count);
+       req->r_args.setlayout.layout.fl_object_size =
+                       cpu_to_le32(l.object_size);
+       req->r_args.setlayout.layout.fl_pg_pool =
+                       cpu_to_le32(l.data_pool);
+       req->r_args.setlayout.layout.fl_pg_preferred =
+                       cpu_to_le32(l.preferred_osd);
+
+       err = ceph_mdsc_do_request(mdsc, inode, req);
+       ceph_mdsc_put_request(req);
+       return err;
+}
+
 /*
  * Return object name, size/offset information, and location (OSD
  * number, network address) for a given file offset.
@@ -98,7 +162,8 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
        struct ceph_ioctl_dataloc dl;
        struct inode *inode = file->f_dentry->d_inode;
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_osd_client *osdc = &ceph_sb_to_client(inode->i_sb)->osdc;
+       struct ceph_osd_client *osdc =
+               &ceph_sb_to_client(inode->i_sb)->client->osdc;
        u64 len = 1, olen;
        u64 tmp;
        struct ceph_object_layout ol;
@@ -174,11 +239,15 @@ long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        case CEPH_IOC_SET_LAYOUT:
                return ceph_ioctl_set_layout(file, (void __user *)arg);
 
+       case CEPH_IOC_SET_LAYOUT_POLICY:
+               return ceph_ioctl_set_layout_policy(file, (void __user *)arg);
+
        case CEPH_IOC_GET_DATALOC:
                return ceph_ioctl_get_dataloc(file, (void __user *)arg);
 
        case CEPH_IOC_LAZYIO:
                return ceph_ioctl_lazyio(file);
        }
+
        return -ENOTTY;
 }
index 88451a3b6857d14bc7b21f418bcd82e56e9fdc64..a6ce54e94eb5ab435670093cd6ad789e72cc627b 100644 (file)
@@ -4,7 +4,7 @@
 #include <linux/ioctl.h>
 #include <linux/types.h>
 
-#define CEPH_IOCTL_MAGIC 0x97
+#define CEPH_IOCTL_MAGIC 0x98
 
 /* just use u64 to align sanely on all archs */
 struct ceph_ioctl_layout {
@@ -17,6 +17,8 @@ struct ceph_ioctl_layout {
                                   struct ceph_ioctl_layout)
 #define CEPH_IOC_SET_LAYOUT _IOW(CEPH_IOCTL_MAGIC, 2,          \
                                   struct ceph_ioctl_layout)
+#define CEPH_IOC_SET_LAYOUT_POLICY _IOW(CEPH_IOCTL_MAGIC, 5,   \
+                                  struct ceph_ioctl_layout)
 
 /*
  * Extract identity, address of the OSD and object storing a given
index ff4e753aae929d37d414567d22fd6afef7316c7e..40abde93c345d054279fd51cbca998525c4931c7 100644 (file)
@@ -1,11 +1,11 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
 
 #include <linux/file.h>
 #include <linux/namei.h>
 
 #include "super.h"
 #include "mds_client.h"
-#include "pagelist.h"
+#include <linux/ceph/pagelist.h>
 
 /**
  * Implement fcntl and flock locking functions.
@@ -16,7 +16,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
 {
        struct inode *inode = file->f_dentry->d_inode;
        struct ceph_mds_client *mdsc =
-               &ceph_sb_to_client(inode->i_sb)->mdsc;
+               ceph_sb_to_client(inode->i_sb)->mdsc;
        struct ceph_mds_request *req;
        int err;
 
@@ -181,8 +181,9 @@ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
  * Encode the flock and fcntl locks for the given inode into the pagelist.
  * Format is: #fcntl locks, sequential fcntl locks, #flock locks,
  * sequential flock locks.
- * Must be called with BLK already held, and the lock numbers should have
- * been gathered under the same lock holding window.
+ * Must be called with lock_flocks() already held.
+ * If we encounter more of a specific lock type than expected,
+ * we return the value 1.
  */
 int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist,
                      int num_fcntl_locks, int num_flock_locks)
@@ -190,6 +191,8 @@ int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist,
        struct file_lock *lock;
        struct ceph_filelock cephlock;
        int err = 0;
+       int seen_fcntl = 0;
+       int seen_flock = 0;
 
        dout("encoding %d flock and %d fcntl locks", num_flock_locks,
             num_fcntl_locks);
@@ -198,6 +201,11 @@ int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist,
                goto fail;
        for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
                if (lock->fl_flags & FL_POSIX) {
+                       ++seen_fcntl;
+                       if (seen_fcntl > num_fcntl_locks) {
+                               err = -ENOSPC;
+                               goto fail;
+                       }
                        err = lock_to_ceph_filelock(lock, &cephlock);
                        if (err)
                                goto fail;
@@ -213,6 +221,11 @@ int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist,
                goto fail;
        for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
                if (lock->fl_flags & FL_FLOCK) {
+                       ++seen_flock;
+                       if (seen_flock > num_flock_locks) {
+                               err = -ENOSPC;
+                               goto fail;
+                       }
                        err = lock_to_ceph_filelock(lock, &cephlock);
                        if (err)
                                goto fail;
index f091b1351786368de18757d8cb262a19d1006bf1..3142b15940c25656a43ec3a5d72af3e1ee1cece9 100644 (file)
@@ -1,17 +1,21 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
 
+#include <linux/fs.h>
 #include <linux/wait.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
 #include <linux/smp_lock.h>
 
-#include "mds_client.h"
-#include "mon_client.h"
 #include "super.h"
-#include "messenger.h"
-#include "decode.h"
-#include "auth.h"
-#include "pagelist.h"
+#include "mds_client.h"
+
+#include <linux/ceph/messenger.h>
+#include <linux/ceph/decode.h>
+#include <linux/ceph/pagelist.h>
+#include <linux/ceph/auth.h>
+#include <linux/ceph/debugfs.h>
 
 /*
  * A cluster of MDS (metadata server) daemons is responsible for
@@ -286,8 +290,9 @@ void ceph_put_mds_session(struct ceph_mds_session *s)
             atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
        if (atomic_dec_and_test(&s->s_ref)) {
                if (s->s_authorizer)
-                       s->s_mdsc->client->monc.auth->ops->destroy_authorizer(
-                               s->s_mdsc->client->monc.auth, s->s_authorizer);
+                    s->s_mdsc->fsc->client->monc.auth->ops->destroy_authorizer(
+                            s->s_mdsc->fsc->client->monc.auth,
+                            s->s_authorizer);
                kfree(s);
        }
 }
@@ -344,7 +349,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
        s->s_seq = 0;
        mutex_init(&s->s_mutex);
 
-       ceph_con_init(mdsc->client->msgr, &s->s_con);
+       ceph_con_init(mdsc->fsc->client->msgr, &s->s_con);
        s->s_con.private = s;
        s->s_con.ops = &mds_con_ops;
        s->s_con.peer_name.type = CEPH_ENTITY_TYPE_MDS;
@@ -599,7 +604,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
        } else if (req->r_dentry) {
                struct inode *dir = req->r_dentry->d_parent->d_inode;
 
-               if (dir->i_sb != mdsc->client->sb) {
+               if (dir->i_sb != mdsc->fsc->sb) {
                        /* not this fs! */
                        inode = req->r_dentry->d_inode;
                } else if (ceph_snap(dir) != CEPH_NOSNAP) {
@@ -884,7 +889,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
        __ceph_remove_cap(cap);
        if (!__ceph_is_any_real_caps(ci)) {
                struct ceph_mds_client *mdsc =
-                       &ceph_sb_to_client(inode->i_sb)->mdsc;
+                       ceph_sb_to_client(inode->i_sb)->mdsc;
 
                spin_lock(&mdsc->cap_dirty_lock);
                if (!list_empty(&ci->i_dirty_item)) {
@@ -1146,7 +1151,7 @@ int ceph_add_cap_releases(struct ceph_mds_client *mdsc,
        struct ceph_msg *msg, *partial = NULL;
        struct ceph_mds_cap_release *head;
        int err = -ENOMEM;
-       int extra = mdsc->client->mount_args->cap_release_safety;
+       int extra = mdsc->fsc->mount_options->cap_release_safety;
        int num;
 
        dout("add_cap_releases %p mds%d extra %d\n", session, session->s_mds,
@@ -2085,7 +2090,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
 
        /* insert trace into our cache */
        mutex_lock(&req->r_fill_mutex);
-       err = ceph_fill_trace(mdsc->client->sb, req, req->r_session);
+       err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session);
        if (err == 0) {
                if (result == 0 && rinfo->dir_nr)
                        ceph_readdir_prepopulate(req, req->r_session);
@@ -2361,19 +2366,37 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
 
        if (recon_state->flock) {
                int num_fcntl_locks, num_flock_locks;
-
-               lock_kernel();
-               ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
-               rec.v2.flock_len = (2*sizeof(u32) +
-                                   (num_fcntl_locks+num_flock_locks) *
-                                   sizeof(struct ceph_filelock));
-
+               struct ceph_pagelist_cursor trunc_point;
+
+               ceph_pagelist_set_cursor(pagelist, &trunc_point);
+               do {
+                       lock_flocks();
+                       ceph_count_locks(inode, &num_fcntl_locks,
+                                        &num_flock_locks);
+                       rec.v2.flock_len = (2*sizeof(u32) +
+                                           (num_fcntl_locks+num_flock_locks) *
+                                           sizeof(struct ceph_filelock));
+                       unlock_flocks();
+
+                       /* pre-alloc pagelist */
+                       ceph_pagelist_truncate(pagelist, &trunc_point);
+                       err = ceph_pagelist_append(pagelist, &rec, reclen);
+                       if (!err)
+                               err = ceph_pagelist_reserve(pagelist,
+                                                           rec.v2.flock_len);
+
+                       /* encode locks */
+                       if (!err) {
+                               lock_flocks();
+                               err = ceph_encode_locks(inode,
+                                                       pagelist,
+                                                       num_fcntl_locks,
+                                                       num_flock_locks);
+                               unlock_flocks();
+                       }
+               } while (err == -ENOSPC);
+       } else {
                err = ceph_pagelist_append(pagelist, &rec, reclen);
-               if (!err)
-                       err = ceph_encode_locks(inode, pagelist,
-                                               num_fcntl_locks,
-                                               num_flock_locks);
-               unlock_kernel();
        }
 
 out_free:
@@ -2611,7 +2634,7 @@ static void handle_lease(struct ceph_mds_client *mdsc,
                         struct ceph_mds_session *session,
                         struct ceph_msg *msg)
 {
-       struct super_block *sb = mdsc->client->sb;
+       struct super_block *sb = mdsc->fsc->sb;
        struct inode *inode;
        struct ceph_inode_info *ci;
        struct dentry *parent, *dentry;
@@ -2889,10 +2912,16 @@ static void delayed_work(struct work_struct *work)
        schedule_delayed(mdsc);
 }
 
+int ceph_mdsc_init(struct ceph_fs_client *fsc)
 
-int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client)
 {
-       mdsc->client = client;
+       struct ceph_mds_client *mdsc;
+
+       mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS);
+       if (!mdsc)
+               return -ENOMEM;
+       mdsc->fsc = fsc;
+       fsc->mdsc = mdsc;
        mutex_init(&mdsc->mutex);
        mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
        if (mdsc->mdsmap == NULL)
@@ -2925,7 +2954,7 @@ int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client)
        INIT_LIST_HEAD(&mdsc->dentry_lru);
 
        ceph_caps_init(mdsc);
-       ceph_adjust_min_caps(mdsc, client->min_caps);
+       ceph_adjust_min_caps(mdsc, fsc->min_caps);
 
        return 0;
 }
@@ -2937,7 +2966,7 @@ int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client)
 static void wait_requests(struct ceph_mds_client *mdsc)
 {
        struct ceph_mds_request *req;
-       struct ceph_client *client = mdsc->client;
+       struct ceph_fs_client *fsc = mdsc->fsc;
 
        mutex_lock(&mdsc->mutex);
        if (__get_oldest_req(mdsc)) {
@@ -2945,7 +2974,7 @@ static void wait_requests(struct ceph_mds_client *mdsc)
 
                dout("wait_requests waiting for requests\n");
                wait_for_completion_timeout(&mdsc->safe_umount_waiters,
-                                   client->mount_args->mount_timeout * HZ);
+                                   fsc->client->options->mount_timeout * HZ);
 
                /* tear down remaining requests */
                mutex_lock(&mdsc->mutex);
@@ -3028,7 +3057,7 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
 {
        u64 want_tid, want_flush;
 
-       if (mdsc->client->mount_state == CEPH_MOUNT_SHUTDOWN)
+       if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN)
                return;
 
        dout("sync\n");
@@ -3051,7 +3080,7 @@ bool done_closing_sessions(struct ceph_mds_client *mdsc)
 {
        int i, n = 0;
 
-       if (mdsc->client->mount_state == CEPH_MOUNT_SHUTDOWN)
+       if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN)
                return true;
 
        mutex_lock(&mdsc->mutex);
@@ -3069,8 +3098,8 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
 {
        struct ceph_mds_session *session;
        int i;
-       struct ceph_client *client = mdsc->client;
-       unsigned long timeout = client->mount_args->mount_timeout * HZ;
+       struct ceph_fs_client *fsc = mdsc->fsc;
+       unsigned long timeout = fsc->client->options->mount_timeout * HZ;
 
        dout("close_sessions\n");
 
@@ -3117,7 +3146,7 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
        dout("stopped\n");
 }
 
-void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
+static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
 {
        dout("stop\n");
        cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
@@ -3127,6 +3156,15 @@ void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
        ceph_caps_finalize(mdsc);
 }
 
+void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
+{
+       struct ceph_mds_client *mdsc = fsc->mdsc;
+
+       ceph_mdsc_stop(mdsc);
+       fsc->mdsc = NULL;
+       kfree(mdsc);
+}
+
 
 /*
  * handle mds map update.
@@ -3143,14 +3181,14 @@ void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
 
        ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
        ceph_decode_copy(&p, &fsid, sizeof(fsid));
-       if (ceph_check_fsid(mdsc->client, &fsid) < 0)
+       if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0)
                return;
        epoch = ceph_decode_32(&p);
        maplen = ceph_decode_32(&p);
        dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
 
        /* do we need it? */
-       ceph_monc_got_mdsmap(&mdsc->client->monc, epoch);
+       ceph_monc_got_mdsmap(&mdsc->fsc->client->monc, epoch);
        mutex_lock(&mdsc->mutex);
        if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
                dout("handle_map epoch %u <= our %u\n",
@@ -3174,7 +3212,7 @@ void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
        } else {
                mdsc->mdsmap = newmap;  /* first mds map */
        }
-       mdsc->client->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size;
+       mdsc->fsc->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size;
 
        __wake_requests(mdsc, &mdsc->waiting_for_map);
 
@@ -3275,7 +3313,7 @@ static int get_authorizer(struct ceph_connection *con,
 {
        struct ceph_mds_session *s = con->private;
        struct ceph_mds_client *mdsc = s->s_mdsc;
-       struct ceph_auth_client *ac = mdsc->client->monc.auth;
+       struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
        int ret = 0;
 
        if (force_new && s->s_authorizer) {
@@ -3309,7 +3347,7 @@ static int verify_authorizer_reply(struct ceph_connection *con, int len)
 {
        struct ceph_mds_session *s = con->private;
        struct ceph_mds_client *mdsc = s->s_mdsc;
-       struct ceph_auth_client *ac = mdsc->client->monc.auth;
+       struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
 
        return ac->ops->verify_authorizer_reply(ac, s->s_authorizer, len);
 }
@@ -3318,12 +3356,12 @@ static int invalidate_authorizer(struct ceph_connection *con)
 {
        struct ceph_mds_session *s = con->private;
        struct ceph_mds_client *mdsc = s->s_mdsc;
-       struct ceph_auth_client *ac = mdsc->client->monc.auth;
+       struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
 
        if (ac->ops->invalidate_authorizer)
                ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS);
 
-       return ceph_monc_validate_auth(&mdsc->client->monc);
+       return ceph_monc_validate_auth(&mdsc->fsc->client->monc);
 }
 
 static const struct ceph_connection_operations mds_con_ops = {
@@ -3336,7 +3374,4 @@ static const struct ceph_connection_operations mds_con_ops = {
        .peer_reset = peer_reset,
 };
 
-
-
-
 /* eof */
index c98267ce6d2ad97e1d9c86bc0660e2d82d39366c..d66d63c7235526ef63d16ff0ea1e9ba3899df586 100644 (file)
@@ -8,9 +8,9 @@
 #include <linux/rbtree.h>
 #include <linux/spinlock.h>
 
-#include "types.h"
-#include "messenger.h"
-#include "mdsmap.h"
+#include <linux/ceph/types.h>
+#include <linux/ceph/messenger.h>
+#include <linux/ceph/mdsmap.h>
 
 /*
  * Some lock dependencies:
@@ -26,7 +26,7 @@
  *
  */
 
-struct ceph_client;
+struct ceph_fs_client;
 struct ceph_cap;
 
 /*
@@ -230,7 +230,7 @@ struct ceph_mds_request {
  * mds client state
  */
 struct ceph_mds_client {
-       struct ceph_client      *client;
+       struct ceph_fs_client  *fsc;
        struct mutex            mutex;         /* all nested structures */
 
        struct ceph_mdsmap      *mdsmap;
@@ -289,11 +289,6 @@ struct ceph_mds_client {
        int             caps_avail_count;    /* unused, unreserved */
        int             caps_min_count;      /* keep at least this many
                                                (unreserved) */
-
-#ifdef CONFIG_DEBUG_FS
-       struct dentry     *debugfs_file;
-#endif
-
        spinlock_t        dentry_lru_lock;
        struct list_head  dentry_lru;
        int               num_dentry;
@@ -316,10 +311,9 @@ extern void ceph_put_mds_session(struct ceph_mds_session *s);
 extern int ceph_send_msg_mds(struct ceph_mds_client *mdsc,
                             struct ceph_msg *msg, int mds);
 
-extern int ceph_mdsc_init(struct ceph_mds_client *mdsc,
-                          struct ceph_client *client);
+extern int ceph_mdsc_init(struct ceph_fs_client *fsc);
 extern void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc);
-extern void ceph_mdsc_stop(struct ceph_mds_client *mdsc);
+extern void ceph_mdsc_destroy(struct ceph_fs_client *fsc);
 
 extern void ceph_mdsc_sync(struct ceph_mds_client *mdsc);
 
index 040be6d1150be5ace2be71e955ac3b3525b8fd76..73b7d44e8a354264e3f08f66e8cb788851328029 100644 (file)
@@ -1,4 +1,4 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
 
 #include <linux/bug.h>
 #include <linux/err.h>
@@ -6,9 +6,9 @@
 #include <linux/slab.h>
 #include <linux/types.h>
 
-#include "mdsmap.h"
-#include "messenger.h"
-#include "decode.h"
+#include <linux/ceph/mdsmap.h>
+#include <linux/ceph/messenger.h>
+#include <linux/ceph/decode.h>
 
 #include "super.h"
 
@@ -117,7 +117,8 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
                }
 
                dout("mdsmap_decode %d/%d %lld mds%d.%d %s %s\n",
-                    i+1, n, global_id, mds, inc, pr_addr(&addr.in_addr),
+                    i+1, n, global_id, mds, inc,
+                    ceph_pr_addr(&addr.in_addr),
                     ceph_mds_state_name(state));
                if (mds >= 0 && mds < m->m_max_mds && state > 0) {
                        m->m_info[mds].global_id = global_id;
diff --git a/fs/ceph/pagelist.c b/fs/ceph/pagelist.c
deleted file mode 100644 (file)
index b6859f4..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-
-#include <linux/gfp.h>
-#include <linux/pagemap.h>
-#include <linux/highmem.h>
-
-#include "pagelist.h"
-
-int ceph_pagelist_release(struct ceph_pagelist *pl)
-{
-       if (pl->mapped_tail)
-               kunmap(pl->mapped_tail);
-       while (!list_empty(&pl->head)) {
-               struct page *page = list_first_entry(&pl->head, struct page,
-                                                    lru);
-               list_del(&page->lru);
-               __free_page(page);
-       }
-       return 0;
-}
-
-static int ceph_pagelist_addpage(struct ceph_pagelist *pl)
-{
-       struct page *page = __page_cache_alloc(GFP_NOFS);
-       if (!page)
-               return -ENOMEM;
-       pl->room += PAGE_SIZE;
-       list_add_tail(&page->lru, &pl->head);
-       if (pl->mapped_tail)
-               kunmap(pl->mapped_tail);
-       pl->mapped_tail = kmap(page);
-       return 0;
-}
-
-int ceph_pagelist_append(struct ceph_pagelist *pl, void *buf, size_t len)
-{
-       while (pl->room < len) {
-               size_t bit = pl->room;
-               int ret;
-
-               memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK),
-                      buf, bit);
-               pl->length += bit;
-               pl->room -= bit;
-               buf += bit;
-               len -= bit;
-               ret = ceph_pagelist_addpage(pl);
-               if (ret)
-                       return ret;
-       }
-
-       memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK), buf, len);
-       pl->length += len;
-       pl->room -= len;
-       return 0;
-}
index 4868b9dcac5a6cc7a4d00610780572f335ef68f2..39c243acd062c810d33e60da27af51cbcfe058e8 100644 (file)
@@ -1,10 +1,12 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
 
 #include <linux/sort.h>
 #include <linux/slab.h>
 
 #include "super.h"
-#include "decode.h"
+#include "mds_client.h"
+
+#include <linux/ceph/decode.h>
 
 /*
  * Snapshots in ceph are driven in large part by cooperation from the
@@ -119,6 +121,7 @@ static struct ceph_snap_realm *ceph_create_snap_realm(
        INIT_LIST_HEAD(&realm->children);
        INIT_LIST_HEAD(&realm->child_item);
        INIT_LIST_HEAD(&realm->empty_item);
+       INIT_LIST_HEAD(&realm->dirty_item);
        INIT_LIST_HEAD(&realm->inodes_with_caps);
        spin_lock_init(&realm->inodes_with_caps_lock);
        __insert_snap_realm(&mdsc->snap_realms, realm);
@@ -467,7 +470,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
                INIT_LIST_HEAD(&capsnap->ci_item);
                INIT_LIST_HEAD(&capsnap->flushing_item);
 
-               capsnap->follows = snapc->seq - 1;
+               capsnap->follows = snapc->seq;
                capsnap->issued = __ceph_caps_issued(ci, NULL);
                capsnap->dirty = dirty;
 
@@ -525,7 +528,7 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
                            struct ceph_cap_snap *capsnap)
 {
        struct inode *inode = &ci->vfs_inode;
-       struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc;
+       struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
 
        BUG_ON(capsnap->writing);
        capsnap->size = inode->i_size;
@@ -604,6 +607,7 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
        struct ceph_snap_realm *realm;
        int invalidate = 0;
        int err = -ENOMEM;
+       LIST_HEAD(dirty_realms);
 
        dout("update_snap_trace deletion=%d\n", deletion);
 more:
@@ -626,24 +630,6 @@ more:
                }
        }
 
-       if (le64_to_cpu(ri->seq) > realm->seq) {
-               dout("update_snap_trace updating %llx %p %lld -> %lld\n",
-                    realm->ino, realm, realm->seq, le64_to_cpu(ri->seq));
-               /*
-                * if the realm seq has changed, queue a cap_snap for every
-                * inode with open caps.  we do this _before_ we update
-                * the realm info so that we prepare for writeback under the
-                * _previous_ snap context.
-                *
-                * ...unless it's a snap deletion!
-                */
-               if (!deletion)
-                       queue_realm_cap_snaps(realm);
-       } else {
-               dout("update_snap_trace %llx %p seq %lld unchanged\n",
-                    realm->ino, realm, realm->seq);
-       }
-
        /* ensure the parent is correct */
        err = adjust_snap_realm_parent(mdsc, realm, le64_to_cpu(ri->parent));
        if (err < 0)
@@ -651,6 +637,8 @@ more:
        invalidate += err;
 
        if (le64_to_cpu(ri->seq) > realm->seq) {
+               dout("update_snap_trace updating %llx %p %lld -> %lld\n",
+                    realm->ino, realm, realm->seq, le64_to_cpu(ri->seq));
                /* update realm parameters, snap lists */
                realm->seq = le64_to_cpu(ri->seq);
                realm->created = le64_to_cpu(ri->created);
@@ -668,9 +656,17 @@ more:
                if (err < 0)
                        goto fail;
 
+               /* queue realm for cap_snap creation */
+               list_add(&realm->dirty_item, &dirty_realms);
+
                invalidate = 1;
        } else if (!realm->cached_context) {
+               dout("update_snap_trace %llx %p seq %lld new\n",
+                    realm->ino, realm, realm->seq);
                invalidate = 1;
+       } else {
+               dout("update_snap_trace %llx %p seq %lld unchanged\n",
+                    realm->ino, realm, realm->seq);
        }
 
        dout("done with %llx %p, invalidated=%d, %p %p\n", realm->ino,
@@ -683,6 +679,14 @@ more:
        if (invalidate)
                rebuild_snap_realms(realm);
 
+       /*
+        * queue cap snaps _after_ we've built the new snap contexts,
+        * so that i_head_snapc can be set appropriately.
+        */
+       list_for_each_entry(realm, &dirty_realms, dirty_item) {
+               queue_realm_cap_snaps(realm);
+       }
+
        __cleanup_empty_realms(mdsc);
        return 0;
 
@@ -715,7 +719,7 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
                igrab(inode);
                spin_unlock(&mdsc->snap_flush_lock);
                spin_lock(&inode->i_lock);
-               __ceph_flush_snaps(ci, &session);
+               __ceph_flush_snaps(ci, &session, 0);
                spin_unlock(&inode->i_lock);
                iput(inode);
                spin_lock(&mdsc->snap_flush_lock);
@@ -745,7 +749,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
                      struct ceph_mds_session *session,
                      struct ceph_msg *msg)
 {
-       struct super_block *sb = mdsc->client->sb;
+       struct super_block *sb = mdsc->fsc->sb;
        int mds = session->s_mds;
        u64 split;
        int op;
@@ -816,6 +820,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
                        };
                        struct inode *inode = ceph_find_inode(sb, vino);
                        struct ceph_inode_info *ci;
+                       struct ceph_snap_realm *oldrealm;
 
                        if (!inode)
                                continue;
@@ -841,18 +846,19 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
                        dout(" will move %p to split realm %llx %p\n",
                             inode, realm->ino, realm);
                        /*
-                        * Remove the inode from the realm's inode
-                        * list, but don't add it to the new realm
-                        * yet.  We don't want the cap_snap to be
-                        * queued (again) by ceph_update_snap_trace()
-                        * below.  Queue it _now_, under the old context.
+                        * Move the inode to the new realm
                         */
                        spin_lock(&realm->inodes_with_caps_lock);
                        list_del_init(&ci->i_snap_realm_item);
+                       list_add(&ci->i_snap_realm_item,
+                                &realm->inodes_with_caps);
+                       oldrealm = ci->i_snap_realm;
+                       ci->i_snap_realm = realm;
                        spin_unlock(&realm->inodes_with_caps_lock);
                        spin_unlock(&inode->i_lock);
 
-                       ceph_queue_cap_snap(ci);
+                       ceph_get_snap_realm(mdsc, realm);
+                       ceph_put_snap_realm(mdsc, oldrealm);
 
                        iput(inode);
                        continue;
@@ -880,43 +886,9 @@ skip_inode:
        ceph_update_snap_trace(mdsc, p, e,
                               op == CEPH_SNAP_OP_DESTROY);
 
-       if (op == CEPH_SNAP_OP_SPLIT) {
-               /*
-                * ok, _now_ add the inodes into the new realm.
-                */
-               for (i = 0; i < num_split_inos; i++) {
-                       struct ceph_vino vino = {
-                               .ino = le64_to_cpu(split_inos[i]),
-                               .snap = CEPH_NOSNAP,
-                       };
-                       struct inode *inode = ceph_find_inode(sb, vino);
-                       struct ceph_inode_info *ci;
-
-                       if (!inode)
-                               continue;
-                       ci = ceph_inode(inode);
-                       spin_lock(&inode->i_lock);
-                       if (list_empty(&ci->i_snap_realm_item)) {
-                               struct ceph_snap_realm *oldrealm =
-                                       ci->i_snap_realm;
-
-                               dout(" moving %p to split realm %llx %p\n",
-                                    inode, realm->ino, realm);
-                               spin_lock(&realm->inodes_with_caps_lock);
-                               list_add(&ci->i_snap_realm_item,
-                                        &realm->inodes_with_caps);
-                               ci->i_snap_realm = realm;
-                               spin_unlock(&realm->inodes_with_caps_lock);
-                               ceph_get_snap_realm(mdsc, realm);
-                               ceph_put_snap_realm(mdsc, oldrealm);
-                       }
-                       spin_unlock(&inode->i_lock);
-                       iput(inode);
-               }
-
+       if (op == CEPH_SNAP_OP_SPLIT)
                /* we took a reference when we created the realm, above */
                ceph_put_snap_realm(mdsc, realm);
-       }
 
        __cleanup_empty_realms(mdsc);
 
similarity index 59%
rename from fs/ceph/ceph_strings.c
rename to fs/ceph/strings.c
index c6179d3a26a216cb7b8f6ee0119884162887c778..cd5097d7c804e5897eeed60716c962d1bbd59b5c 100644 (file)
@@ -1,71 +1,9 @@
 /*
- * Ceph string constants
+ * Ceph fs string constants
  */
-#include "types.h"
+#include <linux/module.h>
+#include <linux/ceph/types.h>
 
-const char *ceph_entity_type_name(int type)
-{
-       switch (type) {
-       case CEPH_ENTITY_TYPE_MDS: return "mds";
-       case CEPH_ENTITY_TYPE_OSD: return "osd";
-       case CEPH_ENTITY_TYPE_MON: return "mon";
-       case CEPH_ENTITY_TYPE_CLIENT: return "client";
-       case CEPH_ENTITY_TYPE_AUTH: return "auth";
-       default: return "unknown";
-       }
-}
-
-const char *ceph_osd_op_name(int op)
-{
-       switch (op) {
-       case CEPH_OSD_OP_READ: return "read";
-       case CEPH_OSD_OP_STAT: return "stat";
-
-       case CEPH_OSD_OP_MASKTRUNC: return "masktrunc";
-
-       case CEPH_OSD_OP_WRITE: return "write";
-       case CEPH_OSD_OP_DELETE: return "delete";
-       case CEPH_OSD_OP_TRUNCATE: return "truncate";
-       case CEPH_OSD_OP_ZERO: return "zero";
-       case CEPH_OSD_OP_WRITEFULL: return "writefull";
-       case CEPH_OSD_OP_ROLLBACK: return "rollback";
-
-       case CEPH_OSD_OP_APPEND: return "append";
-       case CEPH_OSD_OP_STARTSYNC: return "startsync";
-       case CEPH_OSD_OP_SETTRUNC: return "settrunc";
-       case CEPH_OSD_OP_TRIMTRUNC: return "trimtrunc";
-
-       case CEPH_OSD_OP_TMAPUP: return "tmapup";
-       case CEPH_OSD_OP_TMAPGET: return "tmapget";
-       case CEPH_OSD_OP_TMAPPUT: return "tmapput";
-
-       case CEPH_OSD_OP_GETXATTR: return "getxattr";
-       case CEPH_OSD_OP_GETXATTRS: return "getxattrs";
-       case CEPH_OSD_OP_SETXATTR: return "setxattr";
-       case CEPH_OSD_OP_SETXATTRS: return "setxattrs";
-       case CEPH_OSD_OP_RESETXATTRS: return "resetxattrs";
-       case CEPH_OSD_OP_RMXATTR: return "rmxattr";
-       case CEPH_OSD_OP_CMPXATTR: return "cmpxattr";
-
-       case CEPH_OSD_OP_PULL: return "pull";
-       case CEPH_OSD_OP_PUSH: return "push";
-       case CEPH_OSD_OP_BALANCEREADS: return "balance-reads";
-       case CEPH_OSD_OP_UNBALANCEREADS: return "unbalance-reads";
-       case CEPH_OSD_OP_SCRUB: return "scrub";
-
-       case CEPH_OSD_OP_WRLOCK: return "wrlock";
-       case CEPH_OSD_OP_WRUNLOCK: return "wrunlock";
-       case CEPH_OSD_OP_RDLOCK: return "rdlock";
-       case CEPH_OSD_OP_RDUNLOCK: return "rdunlock";
-       case CEPH_OSD_OP_UPLOCK: return "uplock";
-       case CEPH_OSD_OP_DNLOCK: return "dnlock";
-
-       case CEPH_OSD_OP_CALL: return "call";
-
-       case CEPH_OSD_OP_PGLS: return "pgls";
-       }
-       return "???";
-}
 
 const char *ceph_mds_state_name(int s)
 {
@@ -177,17 +115,3 @@ const char *ceph_snap_op_name(int o)
        }
        return "???";
 }
-
-const char *ceph_pool_op_name(int op)
-{
-       switch (op) {
-       case POOL_OP_CREATE: return "create";
-       case POOL_OP_DELETE: return "delete";
-       case POOL_OP_AUID_CHANGE: return "auid change";
-       case POOL_OP_CREATE_SNAP: return "create snap";
-       case POOL_OP_DELETE_SNAP: return "delete snap";
-       case POOL_OP_CREATE_UNMANAGED_SNAP: return "create unmanaged snap";
-       case POOL_OP_DELETE_UNMANAGED_SNAP: return "delete unmanaged snap";
-       }
-       return "???";
-}
index 9922628532b2c649dee6761710405adc552ca248..d6e0e042189184183b4ccf82da8787622e7f7d11 100644 (file)
@@ -1,5 +1,5 @@
 
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
 
 #include <linux/backing-dev.h>
 #include <linux/ctype.h>
 #include <linux/statfs.h>
 #include <linux/string.h>
 
-#include "decode.h"
 #include "super.h"
-#include "mon_client.h"
-#include "auth.h"
+#include "mds_client.h"
+
+#include <linux/ceph/decode.h>
+#include <linux/ceph/mon_client.h>
+#include <linux/ceph/auth.h>
+#include <linux/ceph/debugfs.h>
 
 /*
  * Ceph superblock operations
  * Handle the basics of mounting, unmounting.
  */
 
-
-/*
- * find filename portion of a path (/foo/bar/baz -> baz)
- */
-const char *ceph_file_part(const char *s, int len)
-{
-       const char *e = s + len;
-
-       while (e != s && *(e-1) != '/')
-               e--;
-       return e;
-}
-
-
 /*
  * super ops
  */
 static void ceph_put_super(struct super_block *s)
 {
-       struct ceph_client *client = ceph_sb_to_client(s);
+       struct ceph_fs_client *fsc = ceph_sb_to_client(s);
 
        dout("put_super\n");
-       ceph_mdsc_close_sessions(&client->mdsc);
+       ceph_mdsc_close_sessions(fsc->mdsc);
 
        /*
         * ensure we release the bdi before put_anon_super releases
         * the device name.
         */
-       if (s->s_bdi == &client->backing_dev_info) {
-               bdi_unregister(&client->backing_dev_info);
+       if (s->s_bdi == &fsc->backing_dev_info) {
+               bdi_unregister(&fsc->backing_dev_info);
                s->s_bdi = NULL;
        }
 
@@ -64,14 +53,14 @@ static void ceph_put_super(struct super_block *s)
 
 static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
-       struct ceph_client *client = ceph_inode_to_client(dentry->d_inode);
-       struct ceph_monmap *monmap = client->monc.monmap;
+       struct ceph_fs_client *fsc = ceph_inode_to_client(dentry->d_inode);
+       struct ceph_monmap *monmap = fsc->client->monc.monmap;
        struct ceph_statfs st;
        u64 fsid;
        int err;
 
        dout("statfs\n");
-       err = ceph_monc_do_statfs(&client->monc, &st);
+       err = ceph_monc_do_statfs(&fsc->client->monc, &st);
        if (err < 0)
                return err;
 
@@ -104,238 +93,28 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
 
 static int ceph_sync_fs(struct super_block *sb, int wait)
 {
-       struct ceph_client *client = ceph_sb_to_client(sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
 
        if (!wait) {
                dout("sync_fs (non-blocking)\n");
-               ceph_flush_dirty_caps(&client->mdsc);
+               ceph_flush_dirty_caps(fsc->mdsc);
                dout("sync_fs (non-blocking) done\n");
                return 0;
        }
 
        dout("sync_fs (blocking)\n");
-       ceph_osdc_sync(&ceph_sb_to_client(sb)->osdc);
-       ceph_mdsc_sync(&ceph_sb_to_client(sb)->mdsc);
+       ceph_osdc_sync(&fsc->client->osdc);
+       ceph_mdsc_sync(fsc->mdsc);
        dout("sync_fs (blocking) done\n");
        return 0;
 }
 
-static int default_congestion_kb(void)
-{
-       int congestion_kb;
-
-       /*
-        * Copied from NFS
-        *
-        * congestion size, scale with available memory.
-        *
-        *  64MB:    8192k
-        * 128MB:   11585k
-        * 256MB:   16384k
-        * 512MB:   23170k
-        *   1GB:   32768k
-        *   2GB:   46340k
-        *   4GB:   65536k
-        *   8GB:   92681k
-        *  16GB:  131072k
-        *
-        * This allows larger machines to have larger/more transfers.
-        * Limit the default to 256M
-        */
-       congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
-       if (congestion_kb > 256*1024)
-               congestion_kb = 256*1024;
-
-       return congestion_kb;
-}
-
-/**
- * ceph_show_options - Show mount options in /proc/mounts
- * @m: seq_file to write to
- * @mnt: mount descriptor
- */
-static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
-{
-       struct ceph_client *client = ceph_sb_to_client(mnt->mnt_sb);
-       struct ceph_mount_args *args = client->mount_args;
-
-       if (args->flags & CEPH_OPT_FSID)
-               seq_printf(m, ",fsid=%pU", &args->fsid);
-       if (args->flags & CEPH_OPT_NOSHARE)
-               seq_puts(m, ",noshare");
-       if (args->flags & CEPH_OPT_DIRSTAT)
-               seq_puts(m, ",dirstat");
-       if ((args->flags & CEPH_OPT_RBYTES) == 0)
-               seq_puts(m, ",norbytes");
-       if (args->flags & CEPH_OPT_NOCRC)
-               seq_puts(m, ",nocrc");
-       if (args->flags & CEPH_OPT_NOASYNCREADDIR)
-               seq_puts(m, ",noasyncreaddir");
-
-       if (args->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT)
-               seq_printf(m, ",mount_timeout=%d", args->mount_timeout);
-       if (args->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT)
-               seq_printf(m, ",osd_idle_ttl=%d", args->osd_idle_ttl);
-       if (args->osd_timeout != CEPH_OSD_TIMEOUT_DEFAULT)
-               seq_printf(m, ",osdtimeout=%d", args->osd_timeout);
-       if (args->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT)
-               seq_printf(m, ",osdkeepalivetimeout=%d",
-                        args->osd_keepalive_timeout);
-       if (args->wsize)
-               seq_printf(m, ",wsize=%d", args->wsize);
-       if (args->rsize != CEPH_MOUNT_RSIZE_DEFAULT)
-               seq_printf(m, ",rsize=%d", args->rsize);
-       if (args->congestion_kb != default_congestion_kb())
-               seq_printf(m, ",write_congestion_kb=%d", args->congestion_kb);
-       if (args->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
-               seq_printf(m, ",caps_wanted_delay_min=%d",
-                        args->caps_wanted_delay_min);
-       if (args->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
-               seq_printf(m, ",caps_wanted_delay_max=%d",
-                          args->caps_wanted_delay_max);
-       if (args->cap_release_safety != CEPH_CAP_RELEASE_SAFETY_DEFAULT)
-               seq_printf(m, ",cap_release_safety=%d",
-                          args->cap_release_safety);
-       if (args->max_readdir != CEPH_MAX_READDIR_DEFAULT)
-               seq_printf(m, ",readdir_max_entries=%d", args->max_readdir);
-       if (args->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
-               seq_printf(m, ",readdir_max_bytes=%d", args->max_readdir_bytes);
-       if (strcmp(args->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
-               seq_printf(m, ",snapdirname=%s", args->snapdir_name);
-       if (args->name)
-               seq_printf(m, ",name=%s", args->name);
-       if (args->secret)
-               seq_puts(m, ",secret=<hidden>");
-       return 0;
-}
-
-/*
- * caches
- */
-struct kmem_cache *ceph_inode_cachep;
-struct kmem_cache *ceph_cap_cachep;
-struct kmem_cache *ceph_dentry_cachep;
-struct kmem_cache *ceph_file_cachep;
-
-static void ceph_inode_init_once(void *foo)
-{
-       struct ceph_inode_info *ci = foo;
-       inode_init_once(&ci->vfs_inode);
-}
-
-static int __init init_caches(void)
-{
-       ceph_inode_cachep = kmem_cache_create("ceph_inode_info",
-                                     sizeof(struct ceph_inode_info),
-                                     __alignof__(struct ceph_inode_info),
-                                     (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD),
-                                     ceph_inode_init_once);
-       if (ceph_inode_cachep == NULL)
-               return -ENOMEM;
-
-       ceph_cap_cachep = KMEM_CACHE(ceph_cap,
-                                    SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
-       if (ceph_cap_cachep == NULL)
-               goto bad_cap;
-
-       ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info,
-                                       SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
-       if (ceph_dentry_cachep == NULL)
-               goto bad_dentry;
-
-       ceph_file_cachep = KMEM_CACHE(ceph_file_info,
-                                     SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
-       if (ceph_file_cachep == NULL)
-               goto bad_file;
-
-       return 0;
-
-bad_file:
-       kmem_cache_destroy(ceph_dentry_cachep);
-bad_dentry:
-       kmem_cache_destroy(ceph_cap_cachep);
-bad_cap:
-       kmem_cache_destroy(ceph_inode_cachep);
-       return -ENOMEM;
-}
-
-static void destroy_caches(void)
-{
-       kmem_cache_destroy(ceph_inode_cachep);
-       kmem_cache_destroy(ceph_cap_cachep);
-       kmem_cache_destroy(ceph_dentry_cachep);
-       kmem_cache_destroy(ceph_file_cachep);
-}
-
-
-/*
- * ceph_umount_begin - initiate forced umount.  Tear down down the
- * mount, skipping steps that may hang while waiting for server(s).
- */
-static void ceph_umount_begin(struct super_block *sb)
-{
-       struct ceph_client *client = ceph_sb_to_client(sb);
-
-       dout("ceph_umount_begin - starting forced umount\n");
-       if (!client)
-               return;
-       client->mount_state = CEPH_MOUNT_SHUTDOWN;
-       return;
-}
-
-static const struct super_operations ceph_super_ops = {
-       .alloc_inode    = ceph_alloc_inode,
-       .destroy_inode  = ceph_destroy_inode,
-       .write_inode    = ceph_write_inode,
-       .sync_fs        = ceph_sync_fs,
-       .put_super      = ceph_put_super,
-       .show_options   = ceph_show_options,
-       .statfs         = ceph_statfs,
-       .umount_begin   = ceph_umount_begin,
-};
-
-
-const char *ceph_msg_type_name(int type)
-{
-       switch (type) {
-       case CEPH_MSG_SHUTDOWN: return "shutdown";
-       case CEPH_MSG_PING: return "ping";
-       case CEPH_MSG_AUTH: return "auth";
-       case CEPH_MSG_AUTH_REPLY: return "auth_reply";
-       case CEPH_MSG_MON_MAP: return "mon_map";
-       case CEPH_MSG_MON_GET_MAP: return "mon_get_map";
-       case CEPH_MSG_MON_SUBSCRIBE: return "mon_subscribe";
-       case CEPH_MSG_MON_SUBSCRIBE_ACK: return "mon_subscribe_ack";
-       case CEPH_MSG_STATFS: return "statfs";
-       case CEPH_MSG_STATFS_REPLY: return "statfs_reply";
-       case CEPH_MSG_MDS_MAP: return "mds_map";
-       case CEPH_MSG_CLIENT_SESSION: return "client_session";
-       case CEPH_MSG_CLIENT_RECONNECT: return "client_reconnect";
-       case CEPH_MSG_CLIENT_REQUEST: return "client_request";
-       case CEPH_MSG_CLIENT_REQUEST_FORWARD: return "client_request_forward";
-       case CEPH_MSG_CLIENT_REPLY: return "client_reply";
-       case CEPH_MSG_CLIENT_CAPS: return "client_caps";
-       case CEPH_MSG_CLIENT_CAPRELEASE: return "client_cap_release";
-       case CEPH_MSG_CLIENT_SNAP: return "client_snap";
-       case CEPH_MSG_CLIENT_LEASE: return "client_lease";
-       case CEPH_MSG_OSD_MAP: return "osd_map";
-       case CEPH_MSG_OSD_OP: return "osd_op";
-       case CEPH_MSG_OSD_OPREPLY: return "osd_opreply";
-       default: return "unknown";
-       }
-}
-
-
 /*
  * mount options
  */
 enum {
        Opt_wsize,
        Opt_rsize,
-       Opt_osdtimeout,
-       Opt_osdkeepalivetimeout,
-       Opt_mount_timeout,
-       Opt_osd_idle_ttl,
        Opt_caps_wanted_delay_min,
        Opt_caps_wanted_delay_max,
        Opt_cap_release_safety,
@@ -344,29 +123,19 @@ enum {
        Opt_congestion_kb,
        Opt_last_int,
        /* int args above */
-       Opt_fsid,
        Opt_snapdirname,
-       Opt_name,
-       Opt_secret,
        Opt_last_string,
        /* string args above */
-       Opt_ip,
-       Opt_noshare,
        Opt_dirstat,
        Opt_nodirstat,
        Opt_rbytes,
        Opt_norbytes,
-       Opt_nocrc,
        Opt_noasyncreaddir,
 };
 
-static match_table_t arg_tokens = {
+static match_table_t fsopt_tokens = {
        {Opt_wsize, "wsize=%d"},
        {Opt_rsize, "rsize=%d"},
-       {Opt_osdtimeout, "osdtimeout=%d"},
-       {Opt_osdkeepalivetimeout, "osdkeepalive=%d"},
-       {Opt_mount_timeout, "mount_timeout=%d"},
-       {Opt_osd_idle_ttl, "osd_idle_ttl=%d"},
        {Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"},
        {Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"},
        {Opt_cap_release_safety, "cap_release_safety=%d"},
@@ -374,403 +143,459 @@ static match_table_t arg_tokens = {
        {Opt_readdir_max_bytes, "readdir_max_bytes=%d"},
        {Opt_congestion_kb, "write_congestion_kb=%d"},
        /* int args above */
-       {Opt_fsid, "fsid=%s"},
        {Opt_snapdirname, "snapdirname=%s"},
-       {Opt_name, "name=%s"},
-       {Opt_secret, "secret=%s"},
        /* string args above */
-       {Opt_ip, "ip=%s"},
-       {Opt_noshare, "noshare"},
        {Opt_dirstat, "dirstat"},
        {Opt_nodirstat, "nodirstat"},
        {Opt_rbytes, "rbytes"},
        {Opt_norbytes, "norbytes"},
-       {Opt_nocrc, "nocrc"},
        {Opt_noasyncreaddir, "noasyncreaddir"},
        {-1, NULL}
 };
 
-static int parse_fsid(const char *str, struct ceph_fsid *fsid)
+static int parse_fsopt_token(char *c, void *private)
 {
-       int i = 0;
-       char tmp[3];
-       int err = -EINVAL;
-       int d;
-
-       dout("parse_fsid '%s'\n", str);
-       tmp[2] = 0;
-       while (*str && i < 16) {
-               if (ispunct(*str)) {
-                       str++;
-                       continue;
+       struct ceph_mount_options *fsopt = private;
+       substring_t argstr[MAX_OPT_ARGS];
+       int token, intval, ret;
+
+       token = match_token((char *)c, fsopt_tokens, argstr);
+       if (token < 0)
+               return -EINVAL;
+
+       if (token < Opt_last_int) {
+               ret = match_int(&argstr[0], &intval);
+               if (ret < 0) {
+                       pr_err("bad mount option arg (not int) "
+                              "at '%s'\n", c);
+                       return ret;
                }
-               if (!isxdigit(str[0]) || !isxdigit(str[1]))
-                       break;
-               tmp[0] = str[0];
-               tmp[1] = str[1];
-               if (sscanf(tmp, "%x", &d) < 1)
-                       break;
-               fsid->fsid[i] = d & 0xff;
-               i++;
-               str += 2;
+               dout("got int token %d val %d\n", token, intval);
+       } else if (token > Opt_last_int && token < Opt_last_string) {
+               dout("got string token %d val %s\n", token,
+                    argstr[0].from);
+       } else {
+               dout("got token %d\n", token);
        }
 
-       if (i == 16)
-               err = 0;
-       dout("parse_fsid ret %d got fsid %pU", err, fsid);
-       return err;
+       switch (token) {
+       case Opt_snapdirname:
+               kfree(fsopt->snapdir_name);
+               fsopt->snapdir_name = kstrndup(argstr[0].from,
+                                              argstr[0].to-argstr[0].from,
+                                              GFP_KERNEL);
+               if (!fsopt->snapdir_name)
+                       return -ENOMEM;
+               break;
+
+               /* misc */
+       case Opt_wsize:
+               fsopt->wsize = intval;
+               break;
+       case Opt_rsize:
+               fsopt->rsize = intval;
+               break;
+       case Opt_caps_wanted_delay_min:
+               fsopt->caps_wanted_delay_min = intval;
+               break;
+       case Opt_caps_wanted_delay_max:
+               fsopt->caps_wanted_delay_max = intval;
+               break;
+       case Opt_readdir_max_entries:
+               fsopt->max_readdir = intval;
+               break;
+       case Opt_readdir_max_bytes:
+               fsopt->max_readdir_bytes = intval;
+               break;
+       case Opt_congestion_kb:
+               fsopt->congestion_kb = intval;
+               break;
+       case Opt_dirstat:
+               fsopt->flags |= CEPH_MOUNT_OPT_DIRSTAT;
+               break;
+       case Opt_nodirstat:
+               fsopt->flags &= ~CEPH_MOUNT_OPT_DIRSTAT;
+               break;
+       case Opt_rbytes:
+               fsopt->flags |= CEPH_MOUNT_OPT_RBYTES;
+               break;
+       case Opt_norbytes:
+               fsopt->flags &= ~CEPH_MOUNT_OPT_RBYTES;
+               break;
+       case Opt_noasyncreaddir:
+               fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR;
+               break;
+       default:
+               BUG_ON(token);
+       }
+       return 0;
 }
 
-static struct ceph_mount_args *parse_mount_args(int flags, char *options,
-                                               const char *dev_name,
-                                               const char **path)
+static void destroy_mount_options(struct ceph_mount_options *args)
 {
-       struct ceph_mount_args *args;
-       const char *c;
-       int err = -ENOMEM;
-       substring_t argstr[MAX_OPT_ARGS];
+       dout("destroy_mount_options %p\n", args);
+       kfree(args->snapdir_name);
+       kfree(args);
+}
 
-       args = kzalloc(sizeof(*args), GFP_KERNEL);
-       if (!args)
-               return ERR_PTR(-ENOMEM);
-       args->mon_addr = kcalloc(CEPH_MAX_MON, sizeof(*args->mon_addr),
-                                GFP_KERNEL);
-       if (!args->mon_addr)
-               goto out;
+static int strcmp_null(const char *s1, const char *s2)
+{
+       if (!s1 && !s2)
+               return 0;
+       if (s1 && !s2)
+               return -1;
+       if (!s1 && s2)
+               return 1;
+       return strcmp(s1, s2);
+}
 
-       dout("parse_mount_args %p, dev_name '%s'\n", args, dev_name);
-
-       /* start with defaults */
-       args->sb_flags = flags;
-       args->flags = CEPH_OPT_DEFAULT;
-       args->osd_timeout = CEPH_OSD_TIMEOUT_DEFAULT;
-       args->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT;
-       args->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; /* seconds */
-       args->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT;   /* seconds */
-       args->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
-       args->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
-       args->rsize = CEPH_MOUNT_RSIZE_DEFAULT;
-       args->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
-       args->cap_release_safety = CEPH_CAP_RELEASE_SAFETY_DEFAULT;
-       args->max_readdir = CEPH_MAX_READDIR_DEFAULT;
-       args->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
-       args->congestion_kb = default_congestion_kb();
-
-       /* ip1[:port1][,ip2[:port2]...]:/subdir/in/fs */
-       err = -EINVAL;
-       if (!dev_name)
-               goto out;
-       *path = strstr(dev_name, ":/");
-       if (*path == NULL) {
-               pr_err("device name is missing path (no :/ in %s)\n",
-                      dev_name);
-               goto out;
-       }
+static int compare_mount_options(struct ceph_mount_options *new_fsopt,
+                                struct ceph_options *new_opt,
+                                struct ceph_fs_client *fsc)
+{
+       struct ceph_mount_options *fsopt1 = new_fsopt;
+       struct ceph_mount_options *fsopt2 = fsc->mount_options;
+       int ofs = offsetof(struct ceph_mount_options, snapdir_name);
+       int ret;
 
-       /* get mon ip(s) */
-       err = ceph_parse_ips(dev_name, *path, args->mon_addr,
-                            CEPH_MAX_MON, &args->num_mon);
-       if (err < 0)
-               goto out;
+       ret = memcmp(fsopt1, fsopt2, ofs);
+       if (ret)
+               return ret;
+
+       ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
+       if (ret)
+               return ret;
+
+       return ceph_compare_options(new_opt, fsc->client);
+}
+
+static int parse_mount_options(struct ceph_mount_options **pfsopt,
+                              struct ceph_options **popt,
+                              int flags, char *options,
+                              const char *dev_name,
+                              const char **path)
+{
+       struct ceph_mount_options *fsopt;
+       const char *dev_name_end;
+       int err = -ENOMEM;
+
+       fsopt = kzalloc(sizeof(*fsopt), GFP_KERNEL);
+       if (!fsopt)
+               return -ENOMEM;
+
+       dout("parse_mount_options %p, dev_name '%s'\n", fsopt, dev_name);
+
+        fsopt->sb_flags = flags;
+        fsopt->flags = CEPH_MOUNT_OPT_DEFAULT;
+
+        fsopt->rsize = CEPH_MOUNT_RSIZE_DEFAULT;
+        fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
+        fsopt->cap_release_safety = CEPH_CAP_RELEASE_SAFETY_DEFAULT;
+        fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
+        fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
+        fsopt->congestion_kb = default_congestion_kb();
+       
+        /* ip1[:port1][,ip2[:port2]...]:/subdir/in/fs */
+        err = -EINVAL;
+        if (!dev_name)
+                goto out;
+        *path = strstr(dev_name, ":/");
+        if (*path == NULL) {
+                pr_err("device name is missing path (no :/ in %s)\n",
+                       dev_name);
+                goto out;
+        }
+       dev_name_end = *path;
+       dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
 
        /* path on server */
        *path += 2;
        dout("server path '%s'\n", *path);
 
-       /* parse mount options */
-       while ((c = strsep(&options, ",")) != NULL) {
-               int token, intval, ret;
-               if (!*c)
-                       continue;
-               err = -EINVAL;
-               token = match_token((char *)c, arg_tokens, argstr);
-               if (token < 0) {
-                       pr_err("bad mount option at '%s'\n", c);
-                       goto out;
-               }
-               if (token < Opt_last_int) {
-                       ret = match_int(&argstr[0], &intval);
-                       if (ret < 0) {
-                               pr_err("bad mount option arg (not int) "
-                                      "at '%s'\n", c);
-                               continue;
-                       }
-                       dout("got int token %d val %d\n", token, intval);
-               } else if (token > Opt_last_int && token < Opt_last_string) {
-                       dout("got string token %d val %s\n", token,
-                            argstr[0].from);
-               } else {
-                       dout("got token %d\n", token);
-               }
-               switch (token) {
-               case Opt_ip:
-                       err = ceph_parse_ips(argstr[0].from,
-                                            argstr[0].to,
-                                            &args->my_addr,
-                                            1, NULL);
-                       if (err < 0)
-                               goto out;
-                       args->flags |= CEPH_OPT_MYIP;
-                       break;
-
-               case Opt_fsid:
-                       err = parse_fsid(argstr[0].from, &args->fsid);
-                       if (err == 0)
-                               args->flags |= CEPH_OPT_FSID;
-                       break;
-               case Opt_snapdirname:
-                       kfree(args->snapdir_name);
-                       args->snapdir_name = kstrndup(argstr[0].from,
-                                             argstr[0].to-argstr[0].from,
-                                             GFP_KERNEL);
-                       break;
-               case Opt_name:
-                       args->name = kstrndup(argstr[0].from,
-                                             argstr[0].to-argstr[0].from,
-                                             GFP_KERNEL);
-                       break;
-               case Opt_secret:
-                       args->secret = kstrndup(argstr[0].from,
-                                               argstr[0].to-argstr[0].from,
-                                               GFP_KERNEL);
-                       break;
-
-                       /* misc */
-               case Opt_wsize:
-                       args->wsize = intval;
-                       break;
-               case Opt_rsize:
-                       args->rsize = intval;
-                       break;
-               case Opt_osdtimeout:
-                       args->osd_timeout = intval;
-                       break;
-               case Opt_osdkeepalivetimeout:
-                       args->osd_keepalive_timeout = intval;
-                       break;
-               case Opt_osd_idle_ttl:
-                       args->osd_idle_ttl = intval;
-                       break;
-               case Opt_mount_timeout:
-                       args->mount_timeout = intval;
-                       break;
-               case Opt_caps_wanted_delay_min:
-                       args->caps_wanted_delay_min = intval;
-                       break;
-               case Opt_caps_wanted_delay_max:
-                       args->caps_wanted_delay_max = intval;
-                       break;
-               case Opt_readdir_max_entries:
-                       args->max_readdir = intval;
-                       break;
-               case Opt_readdir_max_bytes:
-                       args->max_readdir_bytes = intval;
-                       break;
-               case Opt_congestion_kb:
-                       args->congestion_kb = intval;
-                       break;
-
-               case Opt_noshare:
-                       args->flags |= CEPH_OPT_NOSHARE;
-                       break;
-
-               case Opt_dirstat:
-                       args->flags |= CEPH_OPT_DIRSTAT;
-                       break;
-               case Opt_nodirstat:
-                       args->flags &= ~CEPH_OPT_DIRSTAT;
-                       break;
-               case Opt_rbytes:
-                       args->flags |= CEPH_OPT_RBYTES;
-                       break;
-               case Opt_norbytes:
-                       args->flags &= ~CEPH_OPT_RBYTES;
-                       break;
-               case Opt_nocrc:
-                       args->flags |= CEPH_OPT_NOCRC;
-                       break;
-               case Opt_noasyncreaddir:
-                       args->flags |= CEPH_OPT_NOASYNCREADDIR;
-                       break;
-
-               default:
-                       BUG_ON(token);
-               }
-       }
-       return args;
+       err = ceph_parse_options(popt, options, dev_name, dev_name_end,
+                                parse_fsopt_token, (void *)fsopt);
+       if (err)
+               goto out;
+
+       /* success */
+       *pfsopt = fsopt;
+       return 0;
 
 out:
-       kfree(args->mon_addr);
-       kfree(args);
-       return ERR_PTR(err);
+       destroy_mount_options(fsopt);
+       return err;
 }
 
-static void destroy_mount_args(struct ceph_mount_args *args)
+/**
+ * ceph_show_options - Show mount options in /proc/mounts
+ * @m: seq_file to write to
+ * @mnt: mount descriptor
+ */
+static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
 {
-       dout("destroy_mount_args %p\n", args);
-       kfree(args->snapdir_name);
-       args->snapdir_name = NULL;
-       kfree(args->name);
-       args->name = NULL;
-       kfree(args->secret);
-       args->secret = NULL;
-       kfree(args);
+       struct ceph_fs_client *fsc = ceph_sb_to_client(mnt->mnt_sb);
+       struct ceph_mount_options *fsopt = fsc->mount_options;
+       struct ceph_options *opt = fsc->client->options;
+
+       if (opt->flags & CEPH_OPT_FSID)
+               seq_printf(m, ",fsid=%pU", &opt->fsid);
+       if (opt->flags & CEPH_OPT_NOSHARE)
+               seq_puts(m, ",noshare");
+       if (opt->flags & CEPH_OPT_NOCRC)
+               seq_puts(m, ",nocrc");
+
+       if (opt->name)
+               seq_printf(m, ",name=%s", opt->name);
+       if (opt->secret)
+               seq_puts(m, ",secret=<hidden>");
+
+       if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT)
+               seq_printf(m, ",mount_timeout=%d", opt->mount_timeout);
+       if (opt->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT)
+               seq_printf(m, ",osd_idle_ttl=%d", opt->osd_idle_ttl);
+       if (opt->osd_timeout != CEPH_OSD_TIMEOUT_DEFAULT)
+               seq_printf(m, ",osdtimeout=%d", opt->osd_timeout);
+       if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT)
+               seq_printf(m, ",osdkeepalivetimeout=%d",
+                          opt->osd_keepalive_timeout);
+
+       if (fsopt->flags & CEPH_MOUNT_OPT_DIRSTAT)
+               seq_puts(m, ",dirstat");
+       if ((fsopt->flags & CEPH_MOUNT_OPT_RBYTES) == 0)
+               seq_puts(m, ",norbytes");
+       if (fsopt->flags & CEPH_MOUNT_OPT_NOASYNCREADDIR)
+               seq_puts(m, ",noasyncreaddir");
+
+       if (fsopt->wsize)
+               seq_printf(m, ",wsize=%d", fsopt->wsize);
+       if (fsopt->rsize != CEPH_MOUNT_RSIZE_DEFAULT)
+               seq_printf(m, ",rsize=%d", fsopt->rsize);
+       if (fsopt->congestion_kb != default_congestion_kb())
+               seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
+       if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
+               seq_printf(m, ",caps_wanted_delay_min=%d",
+                        fsopt->caps_wanted_delay_min);
+       if (fsopt->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
+               seq_printf(m, ",caps_wanted_delay_max=%d",
+                          fsopt->caps_wanted_delay_max);
+       if (fsopt->cap_release_safety != CEPH_CAP_RELEASE_SAFETY_DEFAULT)
+               seq_printf(m, ",cap_release_safety=%d",
+                          fsopt->cap_release_safety);
+       if (fsopt->max_readdir != CEPH_MAX_READDIR_DEFAULT)
+               seq_printf(m, ",readdir_max_entries=%d", fsopt->max_readdir);
+       if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
+               seq_printf(m, ",readdir_max_bytes=%d", fsopt->max_readdir_bytes);
+       if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
+               seq_printf(m, ",snapdirname=%s", fsopt->snapdir_name);
+       return 0;
 }
 
 /*
- * create a fresh client instance
+ * handle any mon messages the standard library doesn't understand.
+ * return error if we don't either.
  */
-static struct ceph_client *ceph_create_client(struct ceph_mount_args *args)
+static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
 {
-       struct ceph_client *client;
+       struct ceph_fs_client *fsc = client->private;
+       int type = le16_to_cpu(msg->hdr.type);
+
+       switch (type) {
+       case CEPH_MSG_MDS_MAP:
+               ceph_mdsc_handle_map(fsc->mdsc, msg);
+               return 0;
+
+       default:
+               return -1;
+       }
+}
+
+/*
+ * create a new fs client
+ */
+struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
+                                       struct ceph_options *opt)
+{
+       struct ceph_fs_client *fsc;
        int err = -ENOMEM;
 
-       client = kzalloc(sizeof(*client), GFP_KERNEL);
-       if (client == NULL)
+       fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
+       if (!fsc)
                return ERR_PTR(-ENOMEM);
 
-       mutex_init(&client->mount_mutex);
-
-       init_waitqueue_head(&client->auth_wq);
+       fsc->client = ceph_create_client(opt, fsc);
+       if (IS_ERR(fsc->client)) {
+               err = PTR_ERR(fsc->client);
+               goto fail;
+       }
+       fsc->client->extra_mon_dispatch = extra_mon_dispatch;
+       fsc->client->supported_features |= CEPH_FEATURE_FLOCK;
+       fsc->client->monc.want_mdsmap = 1;
 
-       client->sb = NULL;
-       client->mount_state = CEPH_MOUNT_MOUNTING;
-       client->mount_args = args;
+       fsc->mount_options = fsopt;
 
-       client->msgr = NULL;
+       fsc->sb = NULL;
+       fsc->mount_state = CEPH_MOUNT_MOUNTING;
 
-       client->auth_err = 0;
-       atomic_long_set(&client->writeback_count, 0);
+       atomic_long_set(&fsc->writeback_count, 0);
 
-       err = bdi_init(&client->backing_dev_info);
+       err = bdi_init(&fsc->backing_dev_info);
        if (err < 0)
-               goto fail;
+               goto fail_client;
 
        err = -ENOMEM;
-       client->wb_wq = create_workqueue("ceph-writeback");
-       if (client->wb_wq == NULL)
+       fsc->wb_wq = create_workqueue("ceph-writeback");
+       if (fsc->wb_wq == NULL)
                goto fail_bdi;
-       client->pg_inv_wq = create_singlethread_workqueue("ceph-pg-invalid");
-       if (client->pg_inv_wq == NULL)
+       fsc->pg_inv_wq = create_singlethread_workqueue("ceph-pg-invalid");
+       if (fsc->pg_inv_wq == NULL)
                goto fail_wb_wq;
-       client->trunc_wq = create_singlethread_workqueue("ceph-trunc");
-       if (client->trunc_wq == NULL)
+       fsc->trunc_wq = create_singlethread_workqueue("ceph-trunc");
+       if (fsc->trunc_wq == NULL)
                goto fail_pg_inv_wq;
 
        /* set up mempools */
        err = -ENOMEM;
-       client->wb_pagevec_pool = mempool_create_kmalloc_pool(10,
-                             client->mount_args->wsize >> PAGE_CACHE_SHIFT);
-       if (!client->wb_pagevec_pool)
+       fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10,
+                             fsc->mount_options->wsize >> PAGE_CACHE_SHIFT);
+       if (!fsc->wb_pagevec_pool)
                goto fail_trunc_wq;
 
        /* caps */
-       client->min_caps = args->max_readdir;
+       fsc->min_caps = fsopt->max_readdir;
+
+       return fsc;
 
-       /* subsystems */
-       err = ceph_monc_init(&client->monc, client);
-       if (err < 0)
-               goto fail_mempool;
-       err = ceph_osdc_init(&client->osdc, client);
-       if (err < 0)
-               goto fail_monc;
-       err = ceph_mdsc_init(&client->mdsc, client);
-       if (err < 0)
-               goto fail_osdc;
-       return client;
-
-fail_osdc:
-       ceph_osdc_stop(&client->osdc);
-fail_monc:
-       ceph_monc_stop(&client->monc);
-fail_mempool:
-       mempool_destroy(client->wb_pagevec_pool);
 fail_trunc_wq:
-       destroy_workqueue(client->trunc_wq);
+       destroy_workqueue(fsc->trunc_wq);
 fail_pg_inv_wq:
-       destroy_workqueue(client->pg_inv_wq);
+       destroy_workqueue(fsc->pg_inv_wq);
 fail_wb_wq:
-       destroy_workqueue(client->wb_wq);
+       destroy_workqueue(fsc->wb_wq);
 fail_bdi:
-       bdi_destroy(&client->backing_dev_info);
+       bdi_destroy(&fsc->backing_dev_info);
+fail_client:
+       ceph_destroy_client(fsc->client);
 fail:
-       kfree(client);
+       kfree(fsc);
        return ERR_PTR(err);
 }
 
-static void ceph_destroy_client(struct ceph_client *client)
+void destroy_fs_client(struct ceph_fs_client *fsc)
 {
-       dout("destroy_client %p\n", client);
+       dout("destroy_fs_client %p\n", fsc);
 
-       /* unmount */
-       ceph_mdsc_stop(&client->mdsc);
-       ceph_osdc_stop(&client->osdc);
+       destroy_workqueue(fsc->wb_wq);
+       destroy_workqueue(fsc->pg_inv_wq);
+       destroy_workqueue(fsc->trunc_wq);
 
-       /*
-        * make sure mds and osd connections close out before destroying
-        * the auth module, which is needed to free those connections'
-        * ceph_authorizers.
-        */
-       ceph_msgr_flush();
-
-       ceph_monc_stop(&client->monc);
+       bdi_destroy(&fsc->backing_dev_info);
 
-       ceph_debugfs_client_cleanup(client);
-       destroy_workqueue(client->wb_wq);
-       destroy_workqueue(client->pg_inv_wq);
-       destroy_workqueue(client->trunc_wq);
+       mempool_destroy(fsc->wb_pagevec_pool);
 
-       bdi_destroy(&client->backing_dev_info);
+       destroy_mount_options(fsc->mount_options);
 
-       if (client->msgr)
-               ceph_messenger_destroy(client->msgr);
-       mempool_destroy(client->wb_pagevec_pool);
+       ceph_fs_debugfs_cleanup(fsc);
 
-       destroy_mount_args(client->mount_args);
+       ceph_destroy_client(fsc->client);
 
-       kfree(client);
-       dout("destroy_client %p done\n", client);
+       kfree(fsc);
+       dout("destroy_fs_client %p done\n", fsc);
 }
 
 /*
- * Initially learn our fsid, or verify an fsid matches.
+ * caches
  */
-int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid)
+struct kmem_cache *ceph_inode_cachep;
+struct kmem_cache *ceph_cap_cachep;
+struct kmem_cache *ceph_dentry_cachep;
+struct kmem_cache *ceph_file_cachep;
+
+static void ceph_inode_init_once(void *foo)
 {
-       if (client->have_fsid) {
-               if (ceph_fsid_compare(&client->fsid, fsid)) {
-                       pr_err("bad fsid, had %pU got %pU",
-                              &client->fsid, fsid);
-                       return -1;
-               }
-       } else {
-               pr_info("client%lld fsid %pU\n", client->monc.auth->global_id,
-                       fsid);
-               memcpy(&client->fsid, fsid, sizeof(*fsid));
-               ceph_debugfs_client_init(client);
-               client->have_fsid = true;
-       }
+       struct ceph_inode_info *ci = foo;
+       inode_init_once(&ci->vfs_inode);
+}
+
+static int __init init_caches(void)
+{
+       ceph_inode_cachep = kmem_cache_create("ceph_inode_info",
+                                     sizeof(struct ceph_inode_info),
+                                     __alignof__(struct ceph_inode_info),
+                                     (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD),
+                                     ceph_inode_init_once);
+       if (ceph_inode_cachep == NULL)
+               return -ENOMEM;
+
+       ceph_cap_cachep = KMEM_CACHE(ceph_cap,
+                                    SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
+       if (ceph_cap_cachep == NULL)
+               goto bad_cap;
+
+       ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info,
+                                       SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
+       if (ceph_dentry_cachep == NULL)
+               goto bad_dentry;
+
+       ceph_file_cachep = KMEM_CACHE(ceph_file_info,
+                                     SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
+       if (ceph_file_cachep == NULL)
+               goto bad_file;
+
        return 0;
+
+bad_file:
+       kmem_cache_destroy(ceph_dentry_cachep);
+bad_dentry:
+       kmem_cache_destroy(ceph_cap_cachep);
+bad_cap:
+       kmem_cache_destroy(ceph_inode_cachep);
+       return -ENOMEM;
 }
 
+static void destroy_caches(void)
+{
+       kmem_cache_destroy(ceph_inode_cachep);
+       kmem_cache_destroy(ceph_cap_cachep);
+       kmem_cache_destroy(ceph_dentry_cachep);
+       kmem_cache_destroy(ceph_file_cachep);
+}
+
+
 /*
- * true if we have the mon map (and have thus joined the cluster)
+ * ceph_umount_begin - initiate forced umount.  Tear down down the
+ * mount, skipping steps that may hang while waiting for server(s).
  */
-static int have_mon_and_osd_map(struct ceph_client *client)
+static void ceph_umount_begin(struct super_block *sb)
 {
-       return client->monc.monmap && client->monc.monmap->epoch &&
-              client->osdc.osdmap && client->osdc.osdmap->epoch;
+       struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
+
+       dout("ceph_umount_begin - starting forced umount\n");
+       if (!fsc)
+               return;
+       fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
+       return;
 }
 
+static const struct super_operations ceph_super_ops = {
+       .alloc_inode    = ceph_alloc_inode,
+       .destroy_inode  = ceph_destroy_inode,
+       .write_inode    = ceph_write_inode,
+       .sync_fs        = ceph_sync_fs,
+       .put_super      = ceph_put_super,
+       .show_options   = ceph_show_options,
+       .statfs         = ceph_statfs,
+       .umount_begin   = ceph_umount_begin,
+};
+
 /*
  * Bootstrap mount by opening the root directory.  Note the mount
  * @started time from caller, and time out if this takes too long.
  */
-static struct dentry *open_root_dentry(struct ceph_client *client,
+static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
                                       const char *path,
                                       unsigned long started)
 {
-       struct ceph_mds_client *mdsc = &client->mdsc;
+       struct ceph_mds_client *mdsc = fsc->mdsc;
        struct ceph_mds_request *req = NULL;
        int err;
        struct dentry *root;
@@ -784,14 +609,14 @@ static struct dentry *open_root_dentry(struct ceph_client *client,
        req->r_ino1.ino = CEPH_INO_ROOT;
        req->r_ino1.snap = CEPH_NOSNAP;
        req->r_started = started;
-       req->r_timeout = client->mount_args->mount_timeout * HZ;
+       req->r_timeout = fsc->client->options->mount_timeout * HZ;
        req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
        req->r_num_caps = 2;
        err = ceph_mdsc_do_request(mdsc, NULL, req);
        if (err == 0) {
                dout("open_root_inode success\n");
                if (ceph_ino(req->r_target_inode) == CEPH_INO_ROOT &&
-                   client->sb->s_root == NULL)
+                   fsc->sb->s_root == NULL)
                        root = d_alloc_root(req->r_target_inode);
                else
                        root = d_obtain_alias(req->r_target_inode);
@@ -804,105 +629,86 @@ static struct dentry *open_root_dentry(struct ceph_client *client,
        return root;
 }
 
+
+
+
 /*
  * mount: join the ceph cluster, and open root directory.
  */
-static int ceph_mount(struct ceph_client *client, struct vfsmount *mnt,
+static int ceph_mount(struct ceph_fs_client *fsc, struct vfsmount *mnt,
                      const char *path)
 {
-       struct ceph_entity_addr *myaddr = NULL;
        int err;
-       unsigned long timeout = client->mount_args->mount_timeout * HZ;
        unsigned long started = jiffies;  /* note the start time */
        struct dentry *root;
+       int first = 0;   /* first vfsmount for this super_block */
 
        dout("mount start\n");
-       mutex_lock(&client->mount_mutex);
-
-       /* initialize the messenger */
-       if (client->msgr == NULL) {
-               if (ceph_test_opt(client, MYIP))
-                       myaddr = &client->mount_args->my_addr;
-               client->msgr = ceph_messenger_create(myaddr);
-               if (IS_ERR(client->msgr)) {
-                       err = PTR_ERR(client->msgr);
-                       client->msgr = NULL;
-                       goto out;
-               }
-               client->msgr->nocrc = ceph_test_opt(client, NOCRC);
-       }
+       mutex_lock(&fsc->client->mount_mutex);
 
-       /* open session, and wait for mon, mds, and osd maps */
-       err = ceph_monc_open_session(&client->monc);
+       err = __ceph_open_session(fsc->client, started);
        if (err < 0)
                goto out;
 
-       while (!have_mon_and_osd_map(client)) {
-               err = -EIO;
-               if (timeout && time_after_eq(jiffies, started + timeout))
-                       goto out;
-
-               /* wait */
-               dout("mount waiting for mon_map\n");
-               err = wait_event_interruptible_timeout(client->auth_wq,
-                      have_mon_and_osd_map(client) || (client->auth_err < 0),
-                      timeout);
-               if (err == -EINTR || err == -ERESTARTSYS)
-                       goto out;
-               if (client->auth_err < 0) {
-                       err = client->auth_err;
-                       goto out;
-               }
-       }
-
        dout("mount opening root\n");
-       root = open_root_dentry(client, "", started);
+       root = open_root_dentry(fsc, "", started);
        if (IS_ERR(root)) {
                err = PTR_ERR(root);
                goto out;
        }
-       if (client->sb->s_root)
+       if (fsc->sb->s_root) {
                dput(root);
-       else
-               client->sb->s_root = root;
+       } else {
+               fsc->sb->s_root = root;
+               first = 1;
+
+               err = ceph_fs_debugfs_init(fsc);
+               if (err < 0)
+                       goto fail;
+       }
 
        if (path[0] == 0) {
                dget(root);
        } else {
                dout("mount opening base mountpoint\n");
-               root = open_root_dentry(client, path, started);
+               root = open_root_dentry(fsc, path, started);
                if (IS_ERR(root)) {
                        err = PTR_ERR(root);
-                       dput(client->sb->s_root);
-                       client->sb->s_root = NULL;
-                       goto out;
+                       goto fail;
                }
        }
 
        mnt->mnt_root = root;
-       mnt->mnt_sb = client->sb;
+       mnt->mnt_sb = fsc->sb;
 
-       client->mount_state = CEPH_MOUNT_MOUNTED;
+       fsc->mount_state = CEPH_MOUNT_MOUNTED;
        dout("mount success\n");
        err = 0;
 
 out:
-       mutex_unlock(&client->mount_mutex);
+       mutex_unlock(&fsc->client->mount_mutex);
        return err;
+
+fail:
+       if (first) {
+               dput(fsc->sb->s_root);
+               fsc->sb->s_root = NULL;
+       }
+       goto out;
 }
 
 static int ceph_set_super(struct super_block *s, void *data)
 {
-       struct ceph_client *client = data;
+       struct ceph_fs_client *fsc = data;
        int ret;
 
        dout("set_super %p data %p\n", s, data);
 
-       s->s_flags = client->mount_args->sb_flags;
+       s->s_flags = fsc->mount_options->sb_flags;
        s->s_maxbytes = 1ULL << 40;  /* temp value until we get mdsmap */
 
-       s->s_fs_info = client;
-       client->sb = s;
+       s->s_fs_info = fsc;
+       fsc->sb = s;
 
        s->s_op = &ceph_super_ops;
        s->s_export_op = &ceph_export_ops;
@@ -917,7 +723,7 @@ static int ceph_set_super(struct super_block *s, void *data)
 
 fail:
        s->s_fs_info = NULL;
-       client->sb = NULL;
+       fsc->sb = NULL;
        return ret;
 }
 
@@ -926,30 +732,23 @@ fail:
  */
 static int ceph_compare_super(struct super_block *sb, void *data)
 {
-       struct ceph_client *new = data;
-       struct ceph_mount_args *args = new->mount_args;
-       struct ceph_client *other = ceph_sb_to_client(sb);
-       int i;
+       struct ceph_fs_client *new = data;
+       struct ceph_mount_options *fsopt = new->mount_options;
+       struct ceph_options *opt = new->client->options;
+       struct ceph_fs_client *other = ceph_sb_to_client(sb);
 
        dout("ceph_compare_super %p\n", sb);
-       if (args->flags & CEPH_OPT_FSID) {
-               if (ceph_fsid_compare(&args->fsid, &other->fsid)) {
-                       dout("fsid doesn't match\n");
-                       return 0;
-               }
-       } else {
-               /* do we share (a) monitor? */
-               for (i = 0; i < new->monc.monmap->num_mon; i++)
-                       if (ceph_monmap_contains(other->monc.monmap,
-                                        &new->monc.monmap->mon_inst[i].addr))
-                               break;
-               if (i == new->monc.monmap->num_mon) {
-                       dout("mon ip not part of monmap\n");
-                       return 0;
-               }
-               dout("mon ip matches existing sb %p\n", sb);
+
+       if (compare_mount_options(fsopt, opt, other)) {
+               dout("monitor(s)/mount options don't match\n");
+               return 0;
        }
-       if (args->sb_flags != other->mount_args->sb_flags) {
+       if ((opt->flags & CEPH_OPT_FSID) &&
+           ceph_fsid_compare(&opt->fsid, &other->client->fsid)) {
+               dout("fsid doesn't match\n");
+               return 0;
+       }
+       if (fsopt->sb_flags != other->mount_options->sb_flags) {
                dout("flags differ\n");
                return 0;
        }
@@ -961,19 +760,20 @@ static int ceph_compare_super(struct super_block *sb, void *data)
  */
 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
 
-static int ceph_register_bdi(struct super_block *sb, struct ceph_client *client)
+static int ceph_register_bdi(struct super_block *sb,
+                            struct ceph_fs_client *fsc)
 {
        int err;
 
        /* set ra_pages based on rsize mount option? */
-       if (client->mount_args->rsize >= PAGE_CACHE_SIZE)
-               client->backing_dev_info.ra_pages =
-                       (client->mount_args->rsize + PAGE_CACHE_SIZE - 1)
+       if (fsc->mount_options->rsize >= PAGE_CACHE_SIZE)
+               fsc->backing_dev_info.ra_pages =
+                       (fsc->mount_options->rsize + PAGE_CACHE_SIZE - 1)
                        >> PAGE_SHIFT;
-       err = bdi_register(&client->backing_dev_info, NULL, "ceph-%d",
+       err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%d",
                           atomic_long_inc_return(&bdi_seq));
        if (!err)
-               sb->s_bdi = &client->backing_dev_info;
+               sb->s_bdi = &fsc->backing_dev_info;
        return err;
 }
 
@@ -982,46 +782,52 @@ static int ceph_get_sb(struct file_system_type *fs_type,
                       struct vfsmount *mnt)
 {
        struct super_block *sb;
-       struct ceph_client *client;
+       struct ceph_fs_client *fsc;
        int err;
        int (*compare_super)(struct super_block *, void *) = ceph_compare_super;
        const char *path = NULL;
-       struct ceph_mount_args *args;
+       struct ceph_mount_options *fsopt = NULL;
+       struct ceph_options *opt = NULL;
 
        dout("ceph_get_sb\n");
-       args = parse_mount_args(flags, data, dev_name, &path);
-       if (IS_ERR(args)) {
-               err = PTR_ERR(args);
+       err = parse_mount_options(&fsopt, &opt, flags, data, dev_name, &path);
+       if (err < 0)
                goto out_final;
-       }
 
        /* create client (which we may/may not use) */
-       client = ceph_create_client(args);
-       if (IS_ERR(client)) {
-               err = PTR_ERR(client);
+       fsc = create_fs_client(fsopt, opt);
+       if (IS_ERR(fsc)) {
+               err = PTR_ERR(fsc);
+               kfree(fsopt);
+               kfree(opt);
                goto out_final;
        }
 
-       if (client->mount_args->flags & CEPH_OPT_NOSHARE)
+       err = ceph_mdsc_init(fsc);
+       if (err < 0)
+               goto out;
+
+       if (ceph_test_opt(fsc->client, NOSHARE))
                compare_super = NULL;
-       sb = sget(fs_type, compare_super, ceph_set_super, client);
+       sb = sget(fs_type, compare_super, ceph_set_super, fsc);
        if (IS_ERR(sb)) {
                err = PTR_ERR(sb);
                goto out;
        }
 
-       if (ceph_sb_to_client(sb) != client) {
-               ceph_destroy_client(client);
-               client = ceph_sb_to_client(sb);
-               dout("get_sb got existing client %p\n", client);
+       if (ceph_sb_to_client(sb) != fsc) {
+               ceph_mdsc_destroy(fsc);
+               destroy_fs_client(fsc);
+               fsc = ceph_sb_to_client(sb);
+               dout("get_sb got existing client %p\n", fsc);
        } else {
-               dout("get_sb using new client %p\n", client);
-               err = ceph_register_bdi(sb, client);
+               dout("get_sb using new client %p\n", fsc);
+               err = ceph_register_bdi(sb, fsc);
                if (err < 0)
                        goto out_splat;
        }
 
-       err = ceph_mount(client, mnt, path);
+       err = ceph_mount(fsc, mnt, path);
        if (err < 0)
                goto out_splat;
        dout("root %p inode %p ino %llx.%llx\n", mnt->mnt_root,
@@ -1029,12 +835,13 @@ static int ceph_get_sb(struct file_system_type *fs_type,
        return 0;
 
 out_splat:
-       ceph_mdsc_close_sessions(&client->mdsc);
+       ceph_mdsc_close_sessions(fsc->mdsc);
        deactivate_locked_super(sb);
        goto out_final;
 
 out:
-       ceph_destroy_client(client);
+       ceph_mdsc_destroy(fsc);
+       destroy_fs_client(fsc);
 out_final:
        dout("ceph_get_sb fail %d\n", err);
        return err;
@@ -1042,11 +849,12 @@ out_final:
 
 static void ceph_kill_sb(struct super_block *s)
 {
-       struct ceph_client *client = ceph_sb_to_client(s);
+       struct ceph_fs_client *fsc = ceph_sb_to_client(s);
        dout("kill_sb %p\n", s);
-       ceph_mdsc_pre_umount(&client->mdsc);
+       ceph_mdsc_pre_umount(fsc->mdsc);
        kill_anon_super(s);    /* will call put_super after sb is r/o */
-       ceph_destroy_client(client);
+       ceph_mdsc_destroy(fsc);
+       destroy_fs_client(fsc);
 }
 
 static struct file_system_type ceph_fs_type = {
@@ -1062,36 +870,20 @@ static struct file_system_type ceph_fs_type = {
 
 static int __init init_ceph(void)
 {
-       int ret = 0;
-
-       ret = ceph_debugfs_init();
-       if (ret < 0)
-               goto out;
-
-       ret = ceph_msgr_init();
-       if (ret < 0)
-               goto out_debugfs;
-
-       ret = init_caches();
+       int ret = init_caches();
        if (ret)
-               goto out_msgr;
+               goto out;
 
        ret = register_filesystem(&ceph_fs_type);
        if (ret)
                goto out_icache;
 
-       pr_info("loaded (mon/mds/osd proto %d/%d/%d, osdmap %d/%d %d/%d)\n",
-               CEPH_MONC_PROTOCOL, CEPH_MDSC_PROTOCOL, CEPH_OSDC_PROTOCOL,
-               CEPH_OSDMAP_VERSION, CEPH_OSDMAP_VERSION_EXT,
-               CEPH_OSDMAP_INC_VERSION, CEPH_OSDMAP_INC_VERSION_EXT);
+       pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL);
+
        return 0;
 
 out_icache:
        destroy_caches();
-out_msgr:
-       ceph_msgr_exit();
-out_debugfs:
-       ceph_debugfs_cleanup();
 out:
        return ret;
 }
@@ -1101,8 +893,6 @@ static void __exit exit_ceph(void)
        dout("exit_ceph\n");
        unregister_filesystem(&ceph_fs_type);
        destroy_caches();
-       ceph_msgr_exit();
-       ceph_debugfs_cleanup();
 }
 
 module_init(init_ceph);
index c33897ae5725e82ca269606b78d54214d8abf7af..1886294e12f7a3106c00f3376d92e43330514081 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef _FS_CEPH_SUPER_H
 #define _FS_CEPH_SUPER_H
 
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
 
 #include <asm/unaligned.h>
 #include <linux/backing-dev.h>
 #include <linux/writeback.h>
 #include <linux/slab.h>
 
-#include "types.h"
-#include "messenger.h"
-#include "msgpool.h"
-#include "mon_client.h"
-#include "mds_client.h"
-#include "osd_client.h"
-#include "ceph_fs.h"
+#include <linux/ceph/libceph.h>
 
 /* f_type in struct statfs */
 #define CEPH_SUPER_MAGIC 0x00c36400
 #define CEPH_BLOCK_SHIFT   20  /* 1 MB */
 #define CEPH_BLOCK         (1 << CEPH_BLOCK_SHIFT)
 
-/*
- * Supported features
- */
-#define CEPH_FEATURE_SUPPORTED CEPH_FEATURE_NOSRCADDR | CEPH_FEATURE_FLOCK
-#define CEPH_FEATURE_REQUIRED  CEPH_FEATURE_NOSRCADDR
+#define CEPH_MOUNT_OPT_DIRSTAT         (1<<4) /* `cat dirname` for stats */
+#define CEPH_MOUNT_OPT_RBYTES          (1<<5) /* dir st_bytes = rbytes */
+#define CEPH_MOUNT_OPT_NOASYNCREADDIR  (1<<7) /* no dcache readdir */
 
-/*
- * mount options
- */
-#define CEPH_OPT_FSID             (1<<0)
-#define CEPH_OPT_NOSHARE          (1<<1) /* don't share client with other sbs */
-#define CEPH_OPT_MYIP             (1<<2) /* specified my ip */
-#define CEPH_OPT_DIRSTAT          (1<<4) /* funky `cat dirname` for stats */
-#define CEPH_OPT_RBYTES           (1<<5) /* dir st_bytes = rbytes */
-#define CEPH_OPT_NOCRC            (1<<6) /* no data crc on writes */
-#define CEPH_OPT_NOASYNCREADDIR   (1<<7) /* no dcache readdir */
+#define CEPH_MOUNT_OPT_DEFAULT    (CEPH_MOUNT_OPT_RBYTES)
 
-#define CEPH_OPT_DEFAULT   (CEPH_OPT_RBYTES)
+#define ceph_set_mount_opt(fsc, opt) \
+       (fsc)->mount_options->flags |= CEPH_MOUNT_OPT_##opt;
+#define ceph_test_mount_opt(fsc, opt) \
+       (!!((fsc)->mount_options->flags & CEPH_MOUNT_OPT_##opt))
 
-#define ceph_set_opt(client, opt) \
-       (client)->mount_args->flags |= CEPH_OPT_##opt;
-#define ceph_test_opt(client, opt) \
-       (!!((client)->mount_args->flags & CEPH_OPT_##opt))
+#define CEPH_MAX_READDIR_DEFAULT        1024
+#define CEPH_MAX_READDIR_BYTES_DEFAULT  (512*1024)
+#define CEPH_SNAPDIRNAME_DEFAULT        ".snap"
 
-
-struct ceph_mount_args {
-       int sb_flags;
+struct ceph_mount_options {
        int flags;
-       struct ceph_fsid fsid;
-       struct ceph_entity_addr my_addr;
-       int num_mon;
-       struct ceph_entity_addr *mon_addr;
-       int mount_timeout;
-       int osd_idle_ttl;
-       int osd_timeout;
-       int osd_keepalive_timeout;
+       int sb_flags;
+
        int wsize;
        int rsize;            /* max readahead */
        int congestion_kb;    /* max writeback in flight */
@@ -73,82 +50,25 @@ struct ceph_mount_args {
        int cap_release_safety;
        int max_readdir;       /* max readdir result (entires) */
        int max_readdir_bytes; /* max readdir result (bytes) */
-       char *snapdir_name;   /* default ".snap" */
-       char *name;
-       char *secret;
-};
 
-/*
- * defaults
- */
-#define CEPH_MOUNT_TIMEOUT_DEFAULT  60
-#define CEPH_OSD_TIMEOUT_DEFAULT    60  /* seconds */
-#define CEPH_OSD_KEEPALIVE_DEFAULT  5
-#define CEPH_OSD_IDLE_TTL_DEFAULT    60
-#define CEPH_MOUNT_RSIZE_DEFAULT    (512*1024) /* readahead */
-#define CEPH_MAX_READDIR_DEFAULT    1024
-#define CEPH_MAX_READDIR_BYTES_DEFAULT    (512*1024)
-
-#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
-#define CEPH_MSG_MAX_DATA_LEN  (16*1024*1024)
-
-#define CEPH_SNAPDIRNAME_DEFAULT ".snap"
-#define CEPH_AUTH_NAME_DEFAULT   "guest"
-/*
- * Delay telling the MDS we no longer want caps, in case we reopen
- * the file.  Delay a minimum amount of time, even if we send a cap
- * message for some other reason.  Otherwise, take the oppotunity to
- * update the mds to avoid sending another message later.
- */
-#define CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT      5  /* cap release delay */
-#define CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT     60  /* cap release delay */
-
-#define CEPH_CAP_RELEASE_SAFETY_DEFAULT        (CEPH_CAPS_PER_RELEASE * 4)
-
-/* mount state */
-enum {
-       CEPH_MOUNT_MOUNTING,
-       CEPH_MOUNT_MOUNTED,
-       CEPH_MOUNT_UNMOUNTING,
-       CEPH_MOUNT_UNMOUNTED,
-       CEPH_MOUNT_SHUTDOWN,
-};
-
-/*
- * subtract jiffies
- */
-static inline unsigned long time_sub(unsigned long a, unsigned long b)
-{
-       BUG_ON(time_after(b, a));
-       return (long)a - (long)b;
-}
-
-/*
- * per-filesystem client state
- *
- * possibly shared by multiple mount points, if they are
- * mounting the same ceph filesystem/cluster.
- */
-struct ceph_client {
-       struct ceph_fsid fsid;
-       bool have_fsid;
+       /*
+        * everything above this point can be memcmp'd; everything below
+        * is handled in compare_mount_options()
+        */
 
-       struct mutex mount_mutex;       /* serialize mount attempts */
-       struct ceph_mount_args *mount_args;
+       char *snapdir_name;   /* default ".snap" */
+};
 
+struct ceph_fs_client {
        struct super_block *sb;
 
-       unsigned long mount_state;
-       wait_queue_head_t auth_wq;
-
-       int auth_err;
+       struct ceph_mount_options *mount_options;
+       struct ceph_client *client;
 
+       unsigned long mount_state;
        int min_caps;                  /* min caps i added */
 
-       struct ceph_messenger *msgr;   /* messenger instance */
-       struct ceph_mon_client monc;
-       struct ceph_mds_client mdsc;
-       struct ceph_osd_client osdc;
+       struct ceph_mds_client *mdsc;
 
        /* writeback */
        mempool_t *wb_pagevec_pool;
@@ -160,14 +80,14 @@ struct ceph_client {
        struct backing_dev_info backing_dev_info;
 
 #ifdef CONFIG_DEBUG_FS
-       struct dentry *debugfs_monmap;
-       struct dentry *debugfs_mdsmap, *debugfs_osdmap;
-       struct dentry *debugfs_dir, *debugfs_dentry_lru, *debugfs_caps;
+       struct dentry *debugfs_dentry_lru, *debugfs_caps;
        struct dentry *debugfs_congestion_kb;
        struct dentry *debugfs_bdi;
+       struct dentry *debugfs_mdsc, *debugfs_mdsmap;
 #endif
 };
 
+
 /*
  * File i/o capability.  This tracks shared state with the metadata
  * server that allows us to cache or writeback attributes or to read
@@ -275,6 +195,20 @@ struct ceph_inode_xattr {
        int should_free_val;
 };
 
+/*
+ * Ceph dentry state
+ */
+struct ceph_dentry_info {
+       struct ceph_mds_session *lease_session;
+       u32 lease_gen, lease_shared_gen;
+       u32 lease_seq;
+       unsigned long lease_renew_after, lease_renew_from;
+       struct list_head lru;
+       struct dentry *dentry;
+       u64 time;
+       u64 offset;
+};
+
 struct ceph_inode_xattrs_info {
        /*
         * (still encoded) xattr blob. we avoid the overhead of parsing
@@ -296,11 +230,6 @@ struct ceph_inode_xattrs_info {
 /*
  * Ceph inode.
  */
-#define CEPH_I_COMPLETE  1  /* we have complete directory cached */
-#define CEPH_I_NODELAY   4  /* do not delay cap release */
-#define CEPH_I_FLUSH     8  /* do not delay flush of dirty metadata */
-#define CEPH_I_NOFLUSH  16  /* do not flush dirty caps */
-
 struct ceph_inode_info {
        struct ceph_vino i_vino;   /* ceph ino + snap */
 
@@ -391,6 +320,63 @@ static inline struct ceph_inode_info *ceph_inode(struct inode *inode)
        return container_of(inode, struct ceph_inode_info, vfs_inode);
 }
 
+static inline struct ceph_vino ceph_vino(struct inode *inode)
+{
+       return ceph_inode(inode)->i_vino;
+}
+
+/*
+ * ino_t is <64 bits on many architectures, blech.
+ *
+ * don't include snap in ino hash, at least for now.
+ */
+static inline ino_t ceph_vino_to_ino(struct ceph_vino vino)
+{
+       ino_t ino = (ino_t)vino.ino;  /* ^ (vino.snap << 20); */
+#if BITS_PER_LONG == 32
+       ino ^= vino.ino >> (sizeof(u64)-sizeof(ino_t)) * 8;
+       if (!ino)
+               ino = 1;
+#endif
+       return ino;
+}
+
+/* for printf-style formatting */
+#define ceph_vinop(i) ceph_inode(i)->i_vino.ino, ceph_inode(i)->i_vino.snap
+
+static inline u64 ceph_ino(struct inode *inode)
+{
+       return ceph_inode(inode)->i_vino.ino;
+}
+static inline u64 ceph_snap(struct inode *inode)
+{
+       return ceph_inode(inode)->i_vino.snap;
+}
+
+static inline int ceph_ino_compare(struct inode *inode, void *data)
+{
+       struct ceph_vino *pvino = (struct ceph_vino *)data;
+       struct ceph_inode_info *ci = ceph_inode(inode);
+       return ci->i_vino.ino == pvino->ino &&
+               ci->i_vino.snap == pvino->snap;
+}
+
+static inline struct inode *ceph_find_inode(struct super_block *sb,
+                                           struct ceph_vino vino)
+{
+       ino_t t = ceph_vino_to_ino(vino);
+       return ilookup5(sb, t, ceph_ino_compare, &vino);
+}
+
+
+/*
+ * Ceph inode.
+ */
+#define CEPH_I_COMPLETE  1  /* we have complete directory cached */
+#define CEPH_I_NODELAY   4  /* do not delay cap release */
+#define CEPH_I_FLUSH     8  /* do not delay flush of dirty metadata */
+#define CEPH_I_NOFLUSH  16  /* do not flush dirty caps */
+
 static inline void ceph_i_clear(struct inode *inode, unsigned mask)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
@@ -414,8 +400,9 @@ static inline bool ceph_i_test(struct inode *inode, unsigned mask)
        struct ceph_inode_info *ci = ceph_inode(inode);
        bool r;
 
-       smp_mb();
+       spin_lock(&inode->i_lock);
        r = (ci->i_ceph_flags & mask) == mask;
+       spin_unlock(&inode->i_lock);
        return r;
 }
 
@@ -432,20 +419,6 @@ extern u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
                            struct ceph_inode_frag *pfrag,
                            int *found);
 
-/*
- * Ceph dentry state
- */
-struct ceph_dentry_info {
-       struct ceph_mds_session *lease_session;
-       u32 lease_gen, lease_shared_gen;
-       u32 lease_seq;
-       unsigned long lease_renew_after, lease_renew_from;
-       struct list_head lru;
-       struct dentry *dentry;
-       u64 time;
-       u64 offset;
-};
-
 static inline struct ceph_dentry_info *ceph_dentry(struct dentry *dentry)
 {
        return (struct ceph_dentry_info *)dentry->d_fsdata;
@@ -456,22 +429,6 @@ static inline loff_t ceph_make_fpos(unsigned frag, unsigned off)
        return ((loff_t)frag << 32) | (loff_t)off;
 }
 
-/*
- * ino_t is <64 bits on many architectures, blech.
- *
- * don't include snap in ino hash, at least for now.
- */
-static inline ino_t ceph_vino_to_ino(struct ceph_vino vino)
-{
-       ino_t ino = (ino_t)vino.ino;  /* ^ (vino.snap << 20); */
-#if BITS_PER_LONG == 32
-       ino ^= vino.ino >> (sizeof(u64)-sizeof(ino_t)) * 8;
-       if (!ino)
-               ino = 1;
-#endif
-       return ino;
-}
-
 static inline int ceph_set_ino_cb(struct inode *inode, void *data)
 {
        ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
@@ -479,39 +436,6 @@ static inline int ceph_set_ino_cb(struct inode *inode, void *data)
        return 0;
 }
 
-static inline struct ceph_vino ceph_vino(struct inode *inode)
-{
-       return ceph_inode(inode)->i_vino;
-}
-
-/* for printf-style formatting */
-#define ceph_vinop(i) ceph_inode(i)->i_vino.ino, ceph_inode(i)->i_vino.snap
-
-static inline u64 ceph_ino(struct inode *inode)
-{
-       return ceph_inode(inode)->i_vino.ino;
-}
-static inline u64 ceph_snap(struct inode *inode)
-{
-       return ceph_inode(inode)->i_vino.snap;
-}
-
-static inline int ceph_ino_compare(struct inode *inode, void *data)
-{
-       struct ceph_vino *pvino = (struct ceph_vino *)data;
-       struct ceph_inode_info *ci = ceph_inode(inode);
-       return ci->i_vino.ino == pvino->ino &&
-               ci->i_vino.snap == pvino->snap;
-}
-
-static inline struct inode *ceph_find_inode(struct super_block *sb,
-                                           struct ceph_vino vino)
-{
-       ino_t t = ceph_vino_to_ino(vino);
-       return ilookup5(sb, t, ceph_ino_compare, &vino);
-}
-
-
 /*
  * caps helpers
  */
@@ -576,18 +500,18 @@ extern int ceph_reserve_caps(struct ceph_mds_client *mdsc,
                             struct ceph_cap_reservation *ctx, int need);
 extern int ceph_unreserve_caps(struct ceph_mds_client *mdsc,
                               struct ceph_cap_reservation *ctx);
-extern void ceph_reservation_status(struct ceph_client *client,
+extern void ceph_reservation_status(struct ceph_fs_client *client,
                                    int *total, int *avail, int *used,
                                    int *reserved, int *min);
 
-static inline struct ceph_client *ceph_inode_to_client(struct inode *inode)
+static inline struct ceph_fs_client *ceph_inode_to_client(struct inode *inode)
 {
-       return (struct ceph_client *)inode->i_sb->s_fs_info;
+       return (struct ceph_fs_client *)inode->i_sb->s_fs_info;
 }
 
-static inline struct ceph_client *ceph_sb_to_client(struct super_block *sb)
+static inline struct ceph_fs_client *ceph_sb_to_client(struct super_block *sb)
 {
-       return (struct ceph_client *)sb->s_fs_info;
+       return (struct ceph_fs_client *)sb->s_fs_info;
 }
 
 
@@ -616,51 +540,6 @@ struct ceph_file_info {
 
 
 
-/*
- * snapshots
- */
-
-/*
- * A "snap context" is the set of existing snapshots when we
- * write data.  It is used by the OSD to guide its COW behavior.
- *
- * The ceph_snap_context is refcounted, and attached to each dirty
- * page, indicating which context the dirty data belonged when it was
- * dirtied.
- */
-struct ceph_snap_context {
-       atomic_t nref;
-       u64 seq;
-       int num_snaps;
-       u64 snaps[];
-};
-
-static inline struct ceph_snap_context *
-ceph_get_snap_context(struct ceph_snap_context *sc)
-{
-       /*
-       printk("get_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref),
-              atomic_read(&sc->nref)+1);
-       */
-       if (sc)
-               atomic_inc(&sc->nref);
-       return sc;
-}
-
-static inline void ceph_put_snap_context(struct ceph_snap_context *sc)
-{
-       if (!sc)
-               return;
-       /*
-       printk("put_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref),
-              atomic_read(&sc->nref)-1);
-       */
-       if (atomic_dec_and_test(&sc->nref)) {
-               /*printk(" deleting snap_context %p\n", sc);*/
-               kfree(sc);
-       }
-}
-
 /*
  * A "snap realm" describes a subset of the file hierarchy sharing
  * the same set of snapshots that apply to it.  The realms themselves
@@ -690,6 +569,8 @@ struct ceph_snap_realm {
 
        struct list_head empty_item;     /* if i have ref==0 */
 
+       struct list_head dirty_item;     /* if realm needs new context */
+
        /* the current set of snaps for this realm */
        struct ceph_snap_context *cached_context;
 
@@ -697,16 +578,33 @@ struct ceph_snap_realm {
        spinlock_t inodes_with_caps_lock;
 };
 
-
-
-/*
- * calculate the number of pages a given length and offset map onto,
- * if we align the data.
- */
-static inline int calc_pages_for(u64 off, u64 len)
+static inline int default_congestion_kb(void)
 {
-       return ((off+len+PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT) -
-               (off >> PAGE_CACHE_SHIFT);
+       int congestion_kb;
+
+       /*
+        * Copied from NFS
+        *
+        * congestion size, scale with available memory.
+        *
+        *  64MB:    8192k
+        * 128MB:   11585k
+        * 256MB:   16384k
+        * 512MB:   23170k
+        *   1GB:   32768k
+        *   2GB:   46340k
+        *   4GB:   65536k
+        *   8GB:   92681k
+        *  16GB:  131072k
+        *
+        * This allows larger machines to have larger/more transfers.
+        * Limit the default to 256M
+        */
+       congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
+       if (congestion_kb > 256*1024)
+               congestion_kb = 256*1024;
+
+       return congestion_kb;
 }
 
 
@@ -739,16 +637,6 @@ static inline bool __ceph_have_pending_cap_snap(struct ceph_inode_info *ci)
                           ci_item)->writing;
 }
 
-
-/* super.c */
-extern struct kmem_cache *ceph_inode_cachep;
-extern struct kmem_cache *ceph_cap_cachep;
-extern struct kmem_cache *ceph_dentry_cachep;
-extern struct kmem_cache *ceph_file_cachep;
-
-extern const char *ceph_msg_type_name(int type);
-extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid);
-
 /* inode.c */
 extern const struct inode_operations ceph_file_iops;
 
@@ -826,7 +714,8 @@ extern void ceph_put_cap_refs(struct ceph_inode_info *ci, int had);
 extern void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
                                       struct ceph_snap_context *snapc);
 extern void __ceph_flush_snaps(struct ceph_inode_info *ci,
-                              struct ceph_mds_session **psession);
+                              struct ceph_mds_session **psession,
+                              int again);
 extern void ceph_check_caps(struct ceph_inode_info *ci, int flags,
                            struct ceph_mds_session *session);
 extern void ceph_check_delayed_caps(struct ceph_mds_client *mdsc);
@@ -854,12 +743,18 @@ extern int ceph_mmap(struct file *file, struct vm_area_struct *vma);
 /* file.c */
 extern const struct file_operations ceph_file_fops;
 extern const struct address_space_operations ceph_aops;
+extern int ceph_copy_to_page_vector(struct page **pages,
+                                   const char *data,
+                                   loff_t off, size_t len);
+extern int ceph_copy_from_page_vector(struct page **pages,
+                                   char *data,
+                                   loff_t off, size_t len);
+extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
 extern int ceph_open(struct inode *inode, struct file *file);
 extern struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry,
                                       struct nameidata *nd, int mode,
                                       int locked_dir);
 extern int ceph_release(struct inode *inode, struct file *filp);
-extern void ceph_release_page_vector(struct page **pages, int num_pages);
 
 /* dir.c */
 extern const struct file_operations ceph_dir_fops;
@@ -889,12 +784,6 @@ extern long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
 /* export.c */
 extern const struct export_operations ceph_export_ops;
 
-/* debugfs.c */
-extern int ceph_debugfs_init(void);
-extern void ceph_debugfs_cleanup(void);
-extern int ceph_debugfs_client_init(struct ceph_client *client);
-extern void ceph_debugfs_client_cleanup(struct ceph_client *client);
-
 /* locks.c */
 extern int ceph_lock(struct file *file, int cmd, struct file_lock *fl);
 extern int ceph_flock(struct file *file, int cmd, struct file_lock *fl);
@@ -911,4 +800,8 @@ static inline struct inode *get_dentry_parent_inode(struct dentry *dentry)
        return NULL;
 }
 
+/* debugfs.c */
+extern int ceph_fs_debugfs_init(struct ceph_fs_client *client);
+extern void ceph_fs_debugfs_cleanup(struct ceph_fs_client *client);
+
 #endif /* _FS_CEPH_SUPER_H */
index 9578af610b73fb48b69872ddeed222e58c8340f0..6e12a6ba5f79daabc1bc455a3a4db464b240c00a 100644 (file)
@@ -1,6 +1,9 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
+
 #include "super.h"
-#include "decode.h"
+#include "mds_client.h"
+
+#include <linux/ceph/decode.h>
 
 #include <linux/xattr.h>
 #include <linux/slab.h>
@@ -620,12 +623,12 @@ out:
 static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
                              const char *value, size_t size, int flags)
 {
-       struct ceph_client *client = ceph_sb_to_client(dentry->d_sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
        struct inode *inode = dentry->d_inode;
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct inode *parent_inode = dentry->d_parent->d_inode;
        struct ceph_mds_request *req;
-       struct ceph_mds_client *mdsc = &client->mdsc;
+       struct ceph_mds_client *mdsc = fsc->mdsc;
        int err;
        int i, nr_pages;
        struct page **pages = NULL;
@@ -713,10 +716,9 @@ int ceph_setxattr(struct dentry *dentry, const char *name,
 
        /* preallocate memory for xattr name, value, index node */
        err = -ENOMEM;
-       newname = kmalloc(name_len + 1, GFP_NOFS);
+       newname = kmemdup(name, name_len + 1, GFP_NOFS);
        if (!newname)
                goto out;
-       memcpy(newname, name, name_len + 1);
 
        if (val_len) {
                newval = kmalloc(val_len + 1, GFP_NOFS);
@@ -777,8 +779,8 @@ out:
 
 static int ceph_send_removexattr(struct dentry *dentry, const char *name)
 {
-       struct ceph_client *client = ceph_sb_to_client(dentry->d_sb);
-       struct ceph_mds_client *mdsc = &client->mdsc;
+       struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
+       struct ceph_mds_client *mdsc = fsc->mdsc;
        struct inode *inode = dentry->d_inode;
        struct inode *parent_inode = dentry->d_parent->d_inode;
        struct ceph_mds_request *req;
index f80a4f25123c3fa912daa1eb4080e69dab3d58fb..143d393881cbd86865063649dd9b27773a5dcf57 100644 (file)
@@ -40,7 +40,9 @@ struct backing_dev_info directly_mappable_cdev_bdi = {
 #endif
                /* permit direct mmap, for read, write or exec */
                BDI_CAP_MAP_DIRECT |
-               BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP),
+               BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP |
+               /* no writeback happens */
+               BDI_CAP_NO_ACCT_AND_WRITEBACK),
 };
 
 static struct kobj_map *cdev_map;
index 0da1debd499d1845420753f50ef7b7da6765c237..917b7d449bb2a6248c28c23284cf82d9c032f285 100644 (file)
@@ -2,8 +2,6 @@ config CIFS
        tristate "CIFS support (advanced network filesystem, SMBFS successor)"
        depends on INET
        select NLS
-       select CRYPTO_MD5
-       select CRYPTO_ARC4
        help
          This is the client VFS module for the Common Internet File System
          (CIFS) protocol which is the successor to the Server Message Block
index 21f0fbd86989a6811dd6323668f0836079f7e621..cfd1ce34e0bc7b8c4794c81e1aa937e7f009ca75 100644 (file)
@@ -597,13 +597,13 @@ decode_negTokenInit(unsigned char *security_blob, int length,
                                if (compare_oid(oid, oidlen, MSKRB5_OID,
                                                MSKRB5_OID_LEN))
                                        server->sec_mskerberos = true;
-                               if (compare_oid(oid, oidlen, KRB5U2U_OID,
+                               else if (compare_oid(oid, oidlen, KRB5U2U_OID,
                                                     KRB5U2U_OID_LEN))
                                        server->sec_kerberosu2u = true;
-                               if (compare_oid(oid, oidlen, KRB5_OID,
+                               else if (compare_oid(oid, oidlen, KRB5_OID,
                                                     KRB5_OID_LEN))
                                        server->sec_kerberos = true;
-                               if (compare_oid(oid, oidlen, NTLMSSP_OID,
+                               else if (compare_oid(oid, oidlen, NTLMSSP_OID,
                                                     NTLMSSP_OID_LEN))
                                        server->sec_ntlmssp = true;
 
index 709f2296bdb4930b2eee0dcb7dd341f94ca42e9b..35042d8f733865c77b31ee8d10dd097d4f0a16d8 100644 (file)
@@ -27,7 +27,6 @@
 #include "md5.h"
 #include "cifs_unicode.h"
 #include "cifsproto.h"
-#include "ntlmssp.h"
 #include <linux/ctype.h>
 #include <linux/random.h>
 
@@ -43,43 +42,21 @@ extern void SMBencrypt(unsigned char *passwd, const unsigned char *c8,
                       unsigned char *p24);
 
 static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu,
-                       struct TCP_Server_Info *server, char *signature)
+                                   const struct mac_key *key, char *signature)
 {
-       int rc;
+       struct  MD5Context context;
 
-       if (cifs_pdu == NULL || server == NULL || signature == NULL)
+       if ((cifs_pdu == NULL) || (signature == NULL) || (key == NULL))
                return -EINVAL;
 
-       if (!server->ntlmssp.sdescmd5) {
-               cERROR(1,
-                       "cifs_calculate_signature: can't generate signature\n");
-               return -1;
-       }
-
-       rc = crypto_shash_init(&server->ntlmssp.sdescmd5->shash);
-       if (rc) {
-               cERROR(1, "cifs_calculate_signature: oould not init md5\n");
-               return rc;
-       }
-
-       if (server->secType == RawNTLMSSP)
-               crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
-                       server->session_key.data.ntlmv2.key,
-                       CIFS_NTLMV2_SESSKEY_SIZE);
-       else
-               crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
-                       (char *)&server->session_key.data,
-                       server->session_key.len);
-
-       crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
-                       cifs_pdu->Protocol, cifs_pdu->smb_buf_length);
+       cifs_MD5_init(&context);
+       cifs_MD5_update(&context, (char *)&key->data, key->len);
+       cifs_MD5_update(&context, cifs_pdu->Protocol, cifs_pdu->smb_buf_length);
 
-       rc = crypto_shash_final(&server->ntlmssp.sdescmd5->shash, signature);
-
-       return rc;
+       cifs_MD5_final(signature, &context);
+       return 0;
 }
 
-
 int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
                  __u32 *pexpected_response_sequence_number)
 {
@@ -101,7 +78,8 @@ int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
        server->sequence_number++;
        spin_unlock(&GlobalMid_Lock);
 
-       rc = cifs_calculate_signature(cifs_pdu, server, smb_signature);
+       rc = cifs_calculate_signature(cifs_pdu, &server->mac_signing_key,
+                                     smb_signature);
        if (rc)
                memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
        else
@@ -111,39 +89,21 @@ int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
 }
 
 static int cifs_calc_signature2(const struct kvec *iov, int n_vec,
-                       struct TCP_Server_Info *server, char *signature)
+                               const struct mac_key *key, char *signature)
 {
+       struct  MD5Context context;
        int i;
-       int rc;
 
-       if (iov == NULL || server == NULL || signature == NULL)
+       if ((iov == NULL) || (signature == NULL) || (key == NULL))
                return -EINVAL;
 
-       if (!server->ntlmssp.sdescmd5) {
-               cERROR(1, "cifs_calc_signature2: can't generate signature\n");
-               return -1;
-       }
-
-       rc = crypto_shash_init(&server->ntlmssp.sdescmd5->shash);
-       if (rc) {
-               cERROR(1, "cifs_calc_signature2: oould not init md5\n");
-               return rc;
-       }
-
-       if (server->secType == RawNTLMSSP)
-               crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
-                       server->session_key.data.ntlmv2.key,
-                       CIFS_NTLMV2_SESSKEY_SIZE);
-       else
-               crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
-                       (char *)&server->session_key.data,
-                       server->session_key.len);
-
+       cifs_MD5_init(&context);
+       cifs_MD5_update(&context, (char *)&key->data, key->len);
        for (i = 0; i < n_vec; i++) {
                if (iov[i].iov_len == 0)
                        continue;
                if (iov[i].iov_base == NULL) {
-                       cERROR(1, "cifs_calc_signature2: null iovec entry");
+                       cERROR(1, "null iovec entry");
                        return -EIO;
                }
                /* The first entry includes a length field (which does not get
@@ -151,18 +111,18 @@ static int cifs_calc_signature2(const struct kvec *iov, int n_vec,
                if (i == 0) {
                        if (iov[0].iov_len <= 8) /* cmd field at offset 9 */
                                break; /* nothing to sign or corrupt header */
-                       crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
-                               iov[i].iov_base + 4, iov[i].iov_len - 4);
+                       cifs_MD5_update(&context, iov[0].iov_base+4,
+                                 iov[0].iov_len-4);
                } else
-                       crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
-                               iov[i].iov_base, iov[i].iov_len);
+                       cifs_MD5_update(&context, iov[i].iov_base, iov[i].iov_len);
        }
 
-       rc = crypto_shash_final(&server->ntlmssp.sdescmd5->shash, signature);
+       cifs_MD5_final(signature, &context);
 
-       return rc;
+       return 0;
 }
 
+
 int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
                   __u32 *pexpected_response_sequence_number)
 {
@@ -185,7 +145,8 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
        server->sequence_number++;
        spin_unlock(&GlobalMid_Lock);
 
-       rc = cifs_calc_signature2(iov, n_vec, server, smb_signature);
+       rc = cifs_calc_signature2(iov, n_vec, &server->mac_signing_key,
+                                     smb_signature);
        if (rc)
                memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
        else
@@ -195,14 +156,14 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
 }
 
 int cifs_verify_signature(struct smb_hdr *cifs_pdu,
-                         struct TCP_Server_Info *server,
+                         const struct mac_key *mac_key,
                          __u32 expected_sequence_number)
 {
-       int rc;
+       unsigned int rc;
        char server_response_sig[8];
        char what_we_think_sig_should_be[20];
 
-       if (cifs_pdu == NULL || server == NULL)
+       if ((cifs_pdu == NULL) || (mac_key == NULL))
                return -EINVAL;
 
        if (cifs_pdu->Command == SMB_COM_NEGOTIATE)
@@ -231,7 +192,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
                                        cpu_to_le32(expected_sequence_number);
        cifs_pdu->Signature.Sequence.Reserved = 0;
 
-       rc = cifs_calculate_signature(cifs_pdu, server,
+       rc = cifs_calculate_signature(cifs_pdu, mac_key,
                what_we_think_sig_should_be);
 
        if (rc)
@@ -248,7 +209,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
 }
 
 /* We fill in key by putting in 40 byte array which was allocated by caller */
-int cifs_calculate_session_key(struct session_key *key, const char *rn,
+int cifs_calculate_mac_key(struct mac_key *key, const char *rn,
                           const char *password)
 {
        char temp_key[16];
@@ -306,52 +267,38 @@ static int calc_ntlmv2_hash(struct cifsSesInfo *ses,
 {
        int rc = 0;
        int len;
-       char nt_hash[CIFS_NTHASH_SIZE];
+       char nt_hash[16];
+       struct HMACMD5Context *pctxt;
        wchar_t *user;
        wchar_t *domain;
-       wchar_t *server;
 
-       if (!ses->server->ntlmssp.sdeschmacmd5) {
-               cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n");
-               return -1;
-       }
+       pctxt = kmalloc(sizeof(struct HMACMD5Context), GFP_KERNEL);
+
+       if (pctxt == NULL)
+               return -ENOMEM;
 
        /* calculate md4 hash of password */
        E_md4hash(ses->password, nt_hash);
 
-       crypto_shash_setkey(ses->server->ntlmssp.hmacmd5, nt_hash,
-                               CIFS_NTHASH_SIZE);
-
-       rc = crypto_shash_init(&ses->server->ntlmssp.sdeschmacmd5->shash);
-       if (rc) {
-               cERROR(1, "calc_ntlmv2_hash: could not init hmacmd5\n");
-               return rc;
-       }
+       /* convert Domainname to unicode and uppercase */
+       hmac_md5_init_limK_to_64(nt_hash, 16, pctxt);
 
        /* convert ses->userName to unicode and uppercase */
        len = strlen(ses->userName);
        user = kmalloc(2 + (len * 2), GFP_KERNEL);
-       if (user == NULL) {
-               cERROR(1, "calc_ntlmv2_hash: user mem alloc failure\n");
-               rc = -ENOMEM;
+       if (user == NULL)
                goto calc_exit_2;
-       }
        len = cifs_strtoUCS((__le16 *)user, ses->userName, len, nls_cp);
        UniStrupr(user);
-
-       crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash,
-                               (char *)user, 2 * len);
+       hmac_md5_update((char *)user, 2*len, pctxt);
 
        /* convert ses->domainName to unicode and uppercase */
        if (ses->domainName) {
                len = strlen(ses->domainName);
 
                domain = kmalloc(2 + (len * 2), GFP_KERNEL);
-               if (domain == NULL) {
-                       cERROR(1, "calc_ntlmv2_hash: domain mem alloc failure");
-                       rc = -ENOMEM;
+               if (domain == NULL)
                        goto calc_exit_1;
-               }
                len = cifs_strtoUCS((__le16 *)domain, ses->domainName, len,
                                        nls_cp);
                /* the following line was removed since it didn't work well
@@ -359,292 +306,65 @@ static int calc_ntlmv2_hash(struct cifsSesInfo *ses,
                   Maybe converting the domain name earlier makes sense */
                /* UniStrupr(domain); */
 
-               crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash,
-                                       (char *)domain, 2 * len);
+               hmac_md5_update((char *)domain, 2*len, pctxt);
 
                kfree(domain);
-       } else if (ses->serverName) {
-               len = strlen(ses->serverName);
-
-               server = kmalloc(2 + (len * 2), GFP_KERNEL);
-               if (server == NULL) {
-                       cERROR(1, "calc_ntlmv2_hash: server mem alloc failure");
-                       rc = -ENOMEM;
-                       goto calc_exit_1;
-               }
-               len = cifs_strtoUCS((__le16 *)server, ses->serverName, len,
-                                       nls_cp);
-               /* the following line was removed since it didn't work well
-                  with lower cased domain name that passed as an option.
-                  Maybe converting the domain name earlier makes sense */
-               /* UniStrupr(domain); */
-
-               crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash,
-                                       (char *)server, 2 * len);
-
-               kfree(server);
        }
-
-       rc = crypto_shash_final(&ses->server->ntlmssp.sdeschmacmd5->shash,
-                                       ses->server->ntlmv2_hash);
-
 calc_exit_1:
        kfree(user);
 calc_exit_2:
        /* BB FIXME what about bytes 24 through 40 of the signing key?
           compare with the NTLM example */
+       hmac_md5_final(ses->server->ntlmv2_hash, pctxt);
 
+       kfree(pctxt);
        return rc;
 }
 
-static int
-find_domain_name(struct cifsSesInfo *ses)
-{
-       int rc = 0;
-       unsigned int attrsize;
-       unsigned int type;
-       unsigned char *blobptr;
-       struct ntlmssp2_name *attrptr;
-
-       if (ses->server->tiblob) {
-               blobptr = ses->server->tiblob;
-               attrptr = (struct ntlmssp2_name *) blobptr;
-
-               while ((type = attrptr->type) != 0) {
-                       blobptr += 2; /* advance attr type */
-                       attrsize = attrptr->length;
-                       blobptr += 2; /* advance attr size */
-                       if (type == NTLMSSP_AV_NB_DOMAIN_NAME) {
-                               if (!ses->domainName) {
-                                       ses->domainName =
-                                               kmalloc(attrptr->length + 1,
-                                                               GFP_KERNEL);
-                                       if (!ses->domainName)
-                                                       return -ENOMEM;
-                                       cifs_from_ucs2(ses->domainName,
-                                               (__le16 *)blobptr,
-                                               attrptr->length,
-                                               attrptr->length,
-                                               load_nls_default(), false);
-                               }
-                       }
-                       blobptr += attrsize; /* advance attr  value */
-                       attrptr = (struct ntlmssp2_name *) blobptr;
-               }
-       } else {
-               ses->server->tilen = 2 * sizeof(struct ntlmssp2_name);
-               ses->server->tiblob = kmalloc(ses->server->tilen, GFP_KERNEL);
-               if (!ses->server->tiblob) {
-                       ses->server->tilen = 0;
-                       cERROR(1, "Challenge target info allocation failure");
-                       return -ENOMEM;
-               }
-               memset(ses->server->tiblob, 0x0, ses->server->tilen);
-               attrptr = (struct ntlmssp2_name *) ses->server->tiblob;
-               attrptr->type = cpu_to_le16(NTLMSSP_DOMAIN_TYPE);
-       }
-
-       return rc;
-}
-
-static int
-CalcNTLMv2_response(const struct TCP_Server_Info *server,
-                        char *v2_session_response)
-{
-       int rc;
-
-       if (!server->ntlmssp.sdeschmacmd5) {
-               cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n");
-               return -1;
-       }
-
-       crypto_shash_setkey(server->ntlmssp.hmacmd5, server->ntlmv2_hash,
-               CIFS_HMAC_MD5_HASH_SIZE);
-
-       rc = crypto_shash_init(&server->ntlmssp.sdeschmacmd5->shash);
-       if (rc) {
-               cERROR(1, "CalcNTLMv2_response: could not init hmacmd5");
-               return rc;
-       }
-
-       memcpy(v2_session_response + CIFS_SERVER_CHALLENGE_SIZE,
-               server->cryptKey, CIFS_SERVER_CHALLENGE_SIZE);
-       crypto_shash_update(&server->ntlmssp.sdeschmacmd5->shash,
-               v2_session_response + CIFS_SERVER_CHALLENGE_SIZE,
-               sizeof(struct ntlmv2_resp) - CIFS_SERVER_CHALLENGE_SIZE);
-
-       if (server->tilen)
-               crypto_shash_update(&server->ntlmssp.sdeschmacmd5->shash,
-                                       server->tiblob, server->tilen);
-
-       rc = crypto_shash_final(&server->ntlmssp.sdeschmacmd5->shash,
-                                       v2_session_response);
-
-       return rc;
-}
-
-int
-setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf,
+void setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf,
                      const struct nls_table *nls_cp)
 {
-       int rc = 0;
+       int rc;
        struct ntlmv2_resp *buf = (struct ntlmv2_resp *)resp_buf;
+       struct HMACMD5Context context;
 
        buf->blob_signature = cpu_to_le32(0x00000101);
        buf->reserved = 0;
        buf->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
        get_random_bytes(&buf->client_chal, sizeof(buf->client_chal));
        buf->reserved2 = 0;
-
-       if (!ses->domainName) {
-               rc = find_domain_name(ses);
-               if (rc) {
-                       cERROR(1, "could not get domain/server name rc %d", rc);
-                       return rc;
-               }
-       }
+       buf->names[0].type = cpu_to_le16(NTLMSSP_DOMAIN_TYPE);
+       buf->names[0].length = 0;
+       buf->names[1].type = 0;
+       buf->names[1].length = 0;
 
        /* calculate buf->ntlmv2_hash */
        rc = calc_ntlmv2_hash(ses, nls_cp);
-       if (rc) {
-               cERROR(1, "could not get v2 hash rc %d", rc);
-               return rc;
-       }
-       rc = CalcNTLMv2_response(ses->server, resp_buf);
-       if (rc) {
+       if (rc)
                cERROR(1, "could not get v2 hash rc %d", rc);
-               return rc;
-       }
-
-       if (!ses->server->ntlmssp.sdeschmacmd5) {
-               cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n");
-               return -1;
-       }
-
-       crypto_shash_setkey(ses->server->ntlmssp.hmacmd5,
-                       ses->server->ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE);
+       CalcNTLMv2_response(ses, resp_buf);
 
-       rc = crypto_shash_init(&ses->server->ntlmssp.sdeschmacmd5->shash);
-       if (rc) {
-               cERROR(1, "setup_ntlmv2_rsp: could not init hmacmd5\n");
-               return rc;
-       }
+       /* now calculate the MAC key for NTLMv2 */
+       hmac_md5_init_limK_to_64(ses->server->ntlmv2_hash, 16, &context);
+       hmac_md5_update(resp_buf, 16, &context);
+       hmac_md5_final(ses->server->mac_signing_key.data.ntlmv2.key, &context);
 
-       crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash,
-                               resp_buf, CIFS_HMAC_MD5_HASH_SIZE);
-
-       rc = crypto_shash_final(&ses->server->ntlmssp.sdeschmacmd5->shash,
-               ses->server->session_key.data.ntlmv2.key);
-
-       memcpy(&ses->server->session_key.data.ntlmv2.resp, resp_buf,
-                       sizeof(struct ntlmv2_resp));
-       ses->server->session_key.len = 16 + sizeof(struct ntlmv2_resp);
-
-       return rc;
+       memcpy(&ses->server->mac_signing_key.data.ntlmv2.resp, resp_buf,
+              sizeof(struct ntlmv2_resp));
+       ses->server->mac_signing_key.len = 16 + sizeof(struct ntlmv2_resp);
 }
 
-int
-calc_seckey(struct TCP_Server_Info *server)
-{
-       int rc;
-       unsigned char sec_key[CIFS_NTLMV2_SESSKEY_SIZE];
-       struct crypto_blkcipher *tfm_arc4;
-       struct scatterlist sgin, sgout;
-       struct blkcipher_desc desc;
-
-       get_random_bytes(sec_key, CIFS_NTLMV2_SESSKEY_SIZE);
-
-       tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)",
-                                               0, CRYPTO_ALG_ASYNC);
-       if (!tfm_arc4 || IS_ERR(tfm_arc4)) {
-               cERROR(1, "could not allocate " "master crypto API arc4\n");
-               return 1;
-       }
-
-       desc.tfm = tfm_arc4;
-
-       crypto_blkcipher_setkey(tfm_arc4,
-               server->session_key.data.ntlmv2.key, CIFS_CPHTXT_SIZE);
-       sg_init_one(&sgin, sec_key, CIFS_CPHTXT_SIZE);
-       sg_init_one(&sgout, server->ntlmssp.ciphertext, CIFS_CPHTXT_SIZE);
-       rc = crypto_blkcipher_encrypt(&desc, &sgout, &sgin, CIFS_CPHTXT_SIZE);
-
-       if (!rc)
-               memcpy(server->session_key.data.ntlmv2.key,
-                               sec_key, CIFS_NTLMV2_SESSKEY_SIZE);
-
-       crypto_free_blkcipher(tfm_arc4);
-
-       return 0;
-}
-
-void
-cifs_crypto_shash_release(struct TCP_Server_Info *server)
-{
-       if (server->ntlmssp.md5)
-               crypto_free_shash(server->ntlmssp.md5);
-
-       if (server->ntlmssp.hmacmd5)
-               crypto_free_shash(server->ntlmssp.hmacmd5);
-
-       kfree(server->ntlmssp.sdeschmacmd5);
-
-       kfree(server->ntlmssp.sdescmd5);
-}
-
-int
-cifs_crypto_shash_allocate(struct TCP_Server_Info *server)
+void CalcNTLMv2_response(const struct cifsSesInfo *ses,
+                        char *v2_session_response)
 {
-       int rc;
-       unsigned int size;
-
-       server->ntlmssp.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0);
-       if (!server->ntlmssp.hmacmd5 ||
-                       IS_ERR(server->ntlmssp.hmacmd5)) {
-               cERROR(1, "could not allocate crypto hmacmd5\n");
-               return 1;
-       }
-
-       server->ntlmssp.md5 = crypto_alloc_shash("md5", 0, 0);
-       if (!server->ntlmssp.md5 || IS_ERR(server->ntlmssp.md5)) {
-               cERROR(1, "could not allocate crypto md5\n");
-               rc = 1;
-               goto cifs_crypto_shash_allocate_ret1;
-       }
-
-       size = sizeof(struct shash_desc) +
-                       crypto_shash_descsize(server->ntlmssp.hmacmd5);
-       server->ntlmssp.sdeschmacmd5 = kmalloc(size, GFP_KERNEL);
-       if (!server->ntlmssp.sdeschmacmd5) {
-               cERROR(1, "cifs_crypto_shash_allocate: can't alloc hmacmd5\n");
-               rc = -ENOMEM;
-               goto cifs_crypto_shash_allocate_ret2;
-       }
-       server->ntlmssp.sdeschmacmd5->shash.tfm = server->ntlmssp.hmacmd5;
-       server->ntlmssp.sdeschmacmd5->shash.flags = 0x0;
+       struct HMACMD5Context context;
+       /* rest of v2 struct already generated */
+       memcpy(v2_session_response + 8, ses->server->cryptKey, 8);
+       hmac_md5_init_limK_to_64(ses->server->ntlmv2_hash, 16, &context);
 
+       hmac_md5_update(v2_session_response+8,
+                       sizeof(struct ntlmv2_resp) - 8, &context);
 
-       size = sizeof(struct shash_desc) +
-                       crypto_shash_descsize(server->ntlmssp.md5);
-       server->ntlmssp.sdescmd5 = kmalloc(size, GFP_KERNEL);
-       if (!server->ntlmssp.sdescmd5) {
-               cERROR(1, "cifs_crypto_shash_allocate: can't alloc md5\n");
-               rc = -ENOMEM;
-               goto cifs_crypto_shash_allocate_ret3;
-       }
-       server->ntlmssp.sdescmd5->shash.tfm = server->ntlmssp.md5;
-       server->ntlmssp.sdescmd5->shash.flags = 0x0;
-
-       return 0;
-
-cifs_crypto_shash_allocate_ret3:
-       kfree(server->ntlmssp.sdeschmacmd5);
-
-cifs_crypto_shash_allocate_ret2:
-       crypto_free_shash(server->ntlmssp.md5);
-
-cifs_crypto_shash_allocate_ret1:
-       crypto_free_shash(server->ntlmssp.hmacmd5);
-
-       return rc;
+       hmac_md5_final(v2_session_response, &context);
+/*     cifs_dump_mem("v2_sess_rsp: ", v2_session_response, 32); */
 }
index c9d0cfc086ebcb609b504afdec6431e89ac75db9..0cdfb8c32ac68c34a98f5cdf412250c0eacdec2b 100644 (file)
@@ -25,9 +25,6 @@
 #include <linux/workqueue.h>
 #include "cifs_fs_sb.h"
 #include "cifsacl.h"
-#include <crypto/internal/hash.h>
-#include <linux/scatterlist.h>
-
 /*
  * The sizes of various internal tables and strings
  */
@@ -100,7 +97,7 @@ enum protocolEnum {
        /* Netbios frames protocol not supported at this time */
 };
 
-struct session_key {
+struct mac_key {
        unsigned int len;
        union {
                char ntlm[CIFS_SESS_KEY_SIZE + 16];
@@ -123,21 +120,6 @@ struct cifs_cred {
        struct cifs_ace *aces;
 };
 
-struct sdesc {
-       struct shash_desc shash;
-       char ctx[];
-};
-
-struct ntlmssp_auth {
-       __u32 client_flags;
-       __u32 server_flags;
-       unsigned char ciphertext[CIFS_CPHTXT_SIZE];
-       struct crypto_shash *hmacmd5;
-       struct crypto_shash *md5;
-       struct sdesc *sdeschmacmd5;
-       struct sdesc *sdescmd5;
-};
-
 /*
  *****************************************************************
  * Except the CIFS PDUs themselves all the
@@ -200,14 +182,11 @@ struct TCP_Server_Info {
        /* 16th byte of RFC1001 workstation name is always null */
        char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
        __u32 sequence_number; /* needed for CIFS PDU signature */
-       struct session_key session_key;
+       struct mac_key mac_signing_key;
        char ntlmv2_hash[16];
        unsigned long lstrp; /* when we got last response from this server */
        u16 dialect; /* dialect index that server chose */
        /* extended security flavors that server supports */
-       unsigned int tilen; /* length of the target info blob */
-       unsigned char *tiblob; /* target info blob in challenge response */
-       struct ntlmssp_auth ntlmssp; /* various keys, ciphers, flags */
        bool    sec_kerberos;           /* supports plain Kerberos */
        bool    sec_mskerberos;         /* supports legacy MS Kerberos */
        bool    sec_kerberosu2u;        /* supports U2U Kerberos */
index 320e0fd0ba7b5f988b559e060173dc61c2aa064c..14d036d8db111f2719f9e50576e94024a105adfc 100644 (file)
  * Size of the session key (crypto key encrypted with the password
  */
 #define CIFS_SESS_KEY_SIZE (24)
-#define CIFS_CLIENT_CHALLENGE_SIZE (8)
-#define CIFS_SERVER_CHALLENGE_SIZE (8)
-#define CIFS_HMAC_MD5_HASH_SIZE (16)
-#define CIFS_CPHTXT_SIZE (16)
-#define CIFS_NTLMV2_SESSKEY_SIZE (16)
-#define CIFS_NTHASH_SIZE (16)
 
 /*
  * Maximum user name length
@@ -669,6 +663,7 @@ struct ntlmv2_resp {
        __le64  time;
        __u64  client_chal; /* random */
        __u32  reserved2;
+       struct ntlmssp2_name names[2];
        /* array of name entries could follow ending in minimum 4 byte struct */
 } __attribute__((packed));
 
index 1378d9133844f08a369057608ed9d312bebc732f..1d60c655e3e0b70fc54ab5a48c2b6f3ffd01477c 100644 (file)
@@ -87,8 +87,9 @@ extern unsigned int smbCalcSize_LE(struct smb_hdr *ptr);
 extern int decode_negTokenInit(unsigned char *security_blob, int length,
                        struct TCP_Server_Info *server);
 extern int cifs_convert_address(struct sockaddr *dst, const char *src, int len);
+extern int cifs_set_port(struct sockaddr *addr, const unsigned short int port);
 extern int cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len,
-                               unsigned short int port);
+                               const unsigned short int port);
 extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr);
 extern void header_assemble(struct smb_hdr *, char /* command */ ,
                            const struct cifsTconInfo *, int /* length of
@@ -361,15 +362,13 @@ extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *, __u32 *);
 extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *,
                          __u32 *);
 extern int cifs_verify_signature(struct smb_hdr *,
-                                struct TCP_Server_Info *server,
+                                const struct mac_key *mac_key,
                                __u32 expected_sequence_number);
-extern int cifs_calculate_session_key(struct session_key *key, const char *rn,
+extern int cifs_calculate_mac_key(struct mac_key *key, const char *rn,
                                 const char *pass);
-extern int setup_ntlmv2_rsp(struct cifsSesInfo *, char *,
+extern void CalcNTLMv2_response(const struct cifsSesInfo *, char *);
+extern void setup_ntlmv2_rsp(struct cifsSesInfo *, char *,
                             const struct nls_table *);
-extern int cifs_crypto_shash_allocate(struct TCP_Server_Info *);
-extern void cifs_crypto_shash_release(struct TCP_Server_Info *);
-extern int calc_seckey(struct TCP_Server_Info *);
 #ifdef CONFIG_CIFS_WEAK_PW_HASH
 extern void calc_lanman_hash(const char *password, const char *cryptkey,
                                bool encrypt, char *lnm_session_key);
index 4bda920d1f754548ea705b94b13be9a1db321699..7e83b356cc9e3a93c2bc1b0e915d118884170474 100644 (file)
@@ -232,7 +232,7 @@ static int
 small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
                void **request_buf)
 {
-       int rc = 0;
+       int rc;
 
        rc = cifs_reconnect_tcon(tcon, smb_command);
        if (rc)
@@ -250,7 +250,7 @@ small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
        if (tcon != NULL)
                cifs_stats_inc(&tcon->num_smbs_sent);
 
-       return rc;
+       return 0;
 }
 
 int
@@ -281,16 +281,9 @@ small_smb_init_no_tc(const int smb_command, const int wct,
 
 /* If the return code is zero, this function must fill in request_buf pointer */
 static int
-smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
-        void **request_buf /* returned */ ,
-        void **response_buf /* returned */ )
+__smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
+                       void **request_buf, void **response_buf)
 {
-       int rc = 0;
-
-       rc = cifs_reconnect_tcon(tcon, smb_command);
-       if (rc)
-               return rc;
-
        *request_buf = cifs_buf_get();
        if (*request_buf == NULL) {
                /* BB should we add a retry in here if not a writepage? */
@@ -309,7 +302,31 @@ smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
        if (tcon != NULL)
                cifs_stats_inc(&tcon->num_smbs_sent);
 
-       return rc;
+       return 0;
+}
+
+/* If the return code is zero, this function must fill in request_buf pointer */
+static int
+smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
+        void **request_buf, void **response_buf)
+{
+       int rc;
+
+       rc = cifs_reconnect_tcon(tcon, smb_command);
+       if (rc)
+               return rc;
+
+       return __smb_init(smb_command, wct, tcon, request_buf, response_buf);
+}
+
+static int
+smb_init_no_reconnect(int smb_command, int wct, struct cifsTconInfo *tcon,
+                       void **request_buf, void **response_buf)
+{
+       if (tcon->ses->need_reconnect || tcon->need_reconnect)
+               return -EHOSTDOWN;
+
+       return __smb_init(smb_command, wct, tcon, request_buf, response_buf);
 }
 
 static int validate_t2(struct smb_t2_rsp *pSMB)
@@ -604,14 +621,11 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
                        else
                                rc = -EINVAL;
 
-                       if (server->secType == Kerberos) {
-                               if (!server->sec_kerberos &&
-                                               !server->sec_mskerberos)
-                                       rc = -EOPNOTSUPP;
-                       } else if (server->secType == RawNTLMSSP) {
-                               if (!server->sec_ntlmssp)
-                                       rc = -EOPNOTSUPP;
-                       } else
+                       if (server->sec_kerberos || server->sec_mskerberos)
+                               server->secType = Kerberos;
+                       else if (server->sec_ntlmssp)
+                               server->secType = RawNTLMSSP;
+                       else
                                rc = -EOPNOTSUPP;
                }
        } else
@@ -4537,8 +4551,8 @@ CIFSSMBQFSUnixInfo(const int xid, struct cifsTconInfo *tcon)
 
        cFYI(1, "In QFSUnixInfo");
 QFSUnixRetry:
-       rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
-                     (void **) &pSMBr);
+       rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon,
+                                  (void **) &pSMB, (void **) &pSMBr);
        if (rc)
                return rc;
 
@@ -4607,8 +4621,8 @@ CIFSSMBSetFSUnixInfo(const int xid, struct cifsTconInfo *tcon, __u64 cap)
        cFYI(1, "In SETFSUnixInfo");
 SETFSUnixRetry:
        /* BB switch to small buf init to save memory */
-       rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
-                     (void **) &pSMBr);
+       rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon,
+                                       (void **) &pSMB, (void **) &pSMBr);
        if (rc)
                return rc;
 
index ec0ea4a43bdb4efc0f3734f439b6af78af97a2f4..88c84a38bccb182c313316a172bbae39ba2ee38f 100644 (file)
@@ -400,7 +400,9 @@ incomplete_rcv:
                        cFYI(1, "call to reconnect done");
                        csocket = server->ssocket;
                        continue;
-               } else if ((length == -ERESTARTSYS) || (length == -EAGAIN)) {
+               } else if (length == -ERESTARTSYS ||
+                          length == -EAGAIN ||
+                          length == -EINTR) {
                        msleep(1); /* minimum sleep to prevent looping
                                allowing socket to clear and app threads to set
                                tcpStatus CifsNeedReconnect if server hung */
@@ -414,18 +416,6 @@ incomplete_rcv:
                        } else
                                continue;
                } else if (length <= 0) {
-                       if (server->tcpStatus == CifsNew) {
-                               cFYI(1, "tcp session abend after SMBnegprot");
-                               /* some servers kill the TCP session rather than
-                                  returning an SMB negprot error, in which
-                                  case reconnecting here is not going to help,
-                                  and so simply return error to mount */
-                               break;
-                       }
-                       if (!try_to_freeze() && (length == -EINTR)) {
-                               cFYI(1, "cifsd thread killed");
-                               break;
-                       }
                        cFYI(1, "Reconnect after unexpected peek error %d",
                                length);
                        cifs_reconnect(server);
@@ -466,27 +456,19 @@ incomplete_rcv:
                           an error on SMB negprot response */
                        cFYI(1, "Negative RFC1002 Session Response Error 0x%x)",
                                pdu_length);
-                       if (server->tcpStatus == CifsNew) {
-                               /* if nack on negprot (rather than
-                               ret of smb negprot error) reconnecting
-                               not going to help, ret error to mount */
-                               break;
-                       } else {
-                               /* give server a second to
-                               clean up before reconnect attempt */
-                               msleep(1000);
-                               /* always try 445 first on reconnect
-                               since we get NACK on some if we ever
-                               connected to port 139 (the NACK is
-                               since we do not begin with RFC1001
-                               session initialize frame) */
-                               server->addr.sockAddr.sin_port =
-                                       htons(CIFS_PORT);
-                               cifs_reconnect(server);
-                               csocket = server->ssocket;
-                               wake_up(&server->response_q);
-                               continue;
-                       }
+                       /* give server a second to clean up  */
+                       msleep(1000);
+                       /* always try 445 first on reconnect since we get NACK
+                        * on some if we ever connected to port 139 (the NACK
+                        * is since we do not begin with RFC1001 session
+                        * initialize frame)
+                        */
+                       cifs_set_port((struct sockaddr *)
+                                       &server->addr.sockAddr, CIFS_PORT);
+                       cifs_reconnect(server);
+                       csocket = server->ssocket;
+                       wake_up(&server->response_q);
+                       continue;
                } else if (temp != (char) 0) {
                        cERROR(1, "Unknown RFC 1002 frame");
                        cifs_dump_mem(" Received Data: ", (char *)smb_buffer,
@@ -522,8 +504,7 @@ incomplete_rcv:
                     total_read += length) {
                        length = kernel_recvmsg(csocket, &smb_msg, &iov, 1,
                                                pdu_length - total_read, 0);
-                       if ((server->tcpStatus == CifsExiting) ||
-                           (length == -EINTR)) {
+                       if (server->tcpStatus == CifsExiting) {
                                /* then will exit */
                                reconnect = 2;
                                break;
@@ -534,8 +515,9 @@ incomplete_rcv:
                                /* Now we will reread sock */
                                reconnect = 1;
                                break;
-                       } else if ((length == -ERESTARTSYS) ||
-                                  (length == -EAGAIN)) {
+                       } else if (length == -ERESTARTSYS ||
+                                  length == -EAGAIN ||
+                                  length == -EINTR) {
                                msleep(1); /* minimum sleep to prevent looping,
                                              allowing socket to clear and app
                                              threads to set tcpStatus
@@ -1708,7 +1690,6 @@ cifs_put_smb_ses(struct cifsSesInfo *ses)
                CIFSSMBLogoff(xid, ses);
                _FreeXid(xid);
        }
-       cifs_crypto_shash_release(server);
        sesInfoFree(ses);
        cifs_put_tcp_session(server);
 }
@@ -1725,9 +1706,6 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
        if (ses) {
                cFYI(1, "Existing smb sess found (status=%d)", ses->status);
 
-               /* existing SMB ses has a server reference already */
-               cifs_put_tcp_session(server);
-
                mutex_lock(&ses->session_mutex);
                rc = cifs_negotiate_protocol(xid, ses);
                if (rc) {
@@ -1750,6 +1728,9 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
                        }
                }
                mutex_unlock(&ses->session_mutex);
+
+               /* existing SMB ses has a server reference already */
+               cifs_put_tcp_session(server);
                FreeXid(xid);
                return ses;
        }
@@ -1788,23 +1769,13 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
        ses->linux_uid = volume_info->linux_uid;
        ses->overrideSecFlg = volume_info->secFlg;
 
-       rc = cifs_crypto_shash_allocate(server);
-       if (rc) {
-               cERROR(1, "could not setup hash structures rc %d", rc);
-               goto get_ses_fail;
-       }
-       server->tilen = 0;
-       server->tiblob = NULL;
-
        mutex_lock(&ses->session_mutex);
        rc = cifs_negotiate_protocol(xid, ses);
        if (!rc)
                rc = cifs_setup_session(xid, ses, volume_info->local_nls);
        mutex_unlock(&ses->session_mutex);
-       if (rc) {
-               cifs_crypto_shash_release(ses->server);
+       if (rc)
                goto get_ses_fail;
-       }
 
        /* success, put it on the list */
        write_lock(&cifs_tcp_ses_lock);
index 86a164f08a74a51399c2152799ec178c93902fa6..53cce8cc2224f4abe4754d2cc320a05a6f36d215 100644 (file)
@@ -801,6 +801,8 @@ retry_iget5_locked:
                        inode->i_flags |= S_NOATIME | S_NOCMTIME;
                if (inode->i_state & I_NEW) {
                        inode->i_ino = hash;
+                       if (S_ISREG(inode->i_mode))
+                               inode->i_data.backing_dev_info = sb->s_bdi;
 #ifdef CONFIG_CIFS_FSCACHE
                        /* initialize per-inode cache cookie pointer */
                        CIFS_I(inode)->fscache = NULL;
@@ -1462,28 +1464,17 @@ int cifs_rename(struct inode *source_dir, struct dentry *source_dentry,
 {
        char *fromName = NULL;
        char *toName = NULL;
-       struct cifs_sb_info *cifs_sb_source;
-       struct cifs_sb_info *cifs_sb_target;
+       struct cifs_sb_info *cifs_sb;
        struct cifsTconInfo *tcon;
        FILE_UNIX_BASIC_INFO *info_buf_source = NULL;
        FILE_UNIX_BASIC_INFO *info_buf_target;
        int xid, rc, tmprc;
 
-       cifs_sb_target = CIFS_SB(target_dir->i_sb);
-       cifs_sb_source = CIFS_SB(source_dir->i_sb);
-       tcon = cifs_sb_source->tcon;
+       cifs_sb = CIFS_SB(source_dir->i_sb);
+       tcon = cifs_sb->tcon;
 
        xid = GetXid();
 
-       /*
-        * BB: this might be allowed if same server, but different share.
-        * Consider adding support for this
-        */
-       if (tcon != cifs_sb_target->tcon) {
-               rc = -EXDEV;
-               goto cifs_rename_exit;
-       }
-
        /*
         * we already have the rename sem so we do not need to
         * grab it again here to protect the path integrity
@@ -1519,17 +1510,16 @@ int cifs_rename(struct inode *source_dir, struct dentry *source_dentry,
                info_buf_target = info_buf_source + 1;
                tmprc = CIFSSMBUnixQPathInfo(xid, tcon, fromName,
                                        info_buf_source,
-                                       cifs_sb_source->local_nls,
-                                       cifs_sb_source->mnt_cifs_flags &
+                                       cifs_sb->local_nls,
+                                       cifs_sb->mnt_cifs_flags &
                                        CIFS_MOUNT_MAP_SPECIAL_CHR);
                if (tmprc != 0)
                        goto unlink_target;
 
-               tmprc = CIFSSMBUnixQPathInfo(xid, tcon,
-                                       toName, info_buf_target,
-                                       cifs_sb_target->local_nls,
-                                       /* remap based on source sb */
-                                       cifs_sb_source->mnt_cifs_flags &
+               tmprc = CIFSSMBUnixQPathInfo(xid, tcon, toName,
+                                       info_buf_target,
+                                       cifs_sb->local_nls,
+                                       cifs_sb->mnt_cifs_flags &
                                        CIFS_MOUNT_MAP_SPECIAL_CHR);
 
                if (tmprc == 0 && (info_buf_source->UniqueId ==
index f97851119e6c1b965530f57062b3ae97b53f4e00..9aad47a2d62f6d861035e1ce0b8b6876804aed61 100644 (file)
@@ -206,26 +206,30 @@ cifs_convert_address(struct sockaddr *dst, const char *src, int len)
 }
 
 int
-cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len,
-                  const unsigned short int port)
+cifs_set_port(struct sockaddr *addr, const unsigned short int port)
 {
-       if (!cifs_convert_address(dst, src, len))
-               return 0;
-
-       switch (dst->sa_family) {
+       switch (addr->sa_family) {
        case AF_INET:
-               ((struct sockaddr_in *)dst)->sin_port = htons(port);
+               ((struct sockaddr_in *)addr)->sin_port = htons(port);
                break;
        case AF_INET6:
-               ((struct sockaddr_in6 *)dst)->sin6_port = htons(port);
+               ((struct sockaddr_in6 *)addr)->sin6_port = htons(port);
                break;
        default:
                return 0;
        }
-
        return 1;
 }
 
+int
+cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len,
+                  const unsigned short int port)
+{
+       if (!cifs_convert_address(dst, src, len))
+               return 0;
+       return cifs_set_port(dst, port);
+}
+
 /*****************************************************************************
 convert a NT status code to a dos class/code
  *****************************************************************************/
index 1db0f0746a5b4242f927e9203d8749711180918c..49c9a4e7531979c3e65615dd277ad4b0815ed0ae 100644 (file)
 #define NTLMSSP_NEGOTIATE_KEY_XCH   0x40000000
 #define NTLMSSP_NEGOTIATE_56        0x80000000
 
-/* Define AV Pair Field IDs */
-#define NTLMSSP_AV_EOL                 0
-#define NTLMSSP_AV_NB_COMPUTER_NAME    1
-#define NTLMSSP_AV_NB_DOMAIN_NAME      2
-#define NTLMSSP_AV_DNS_COMPUTER_NAME   3
-#define NTLMSSP_AV_DNS_DOMAIN_NAME     4
-#define NTLMSSP_AV_DNS_TREE_NAME       5
-#define NTLMSSP_AV_FLAGS               6
-#define NTLMSSP_AV_TIMESTAMP           7
-#define NTLMSSP_AV_RESTRICTION         8
-#define NTLMSSP_AV_TARGET_NAME         9
-#define NTLMSSP_AV_CHANNEL_BINDINGS    10
-
 /* Although typedefs are not commonly used for structure definitions */
 /* in the Linux kernel, in this particular case they are useful      */
 /* to more closely match the standards document for NTLMSSP from     */
index 795095f4eac69ba204257a597522e465dafac371..0a57cb7db5dd7554030e599cd111379e343083df 100644 (file)
@@ -383,9 +383,6 @@ static int decode_ascii_ssetup(char **pbcc_area, int bleft,
 static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
                                    struct cifsSesInfo *ses)
 {
-       unsigned int tioffset; /* challeng message target info area */
-       unsigned int tilen; /* challeng message target info area length  */
-
        CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr;
 
        if (blob_len < sizeof(CHALLENGE_MESSAGE)) {
@@ -408,20 +405,6 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
        /* BB spec says that if AvId field of MsvAvTimestamp is populated then
                we must set the MIC field of the AUTHENTICATE_MESSAGE */
 
-       ses->server->ntlmssp.server_flags = le32_to_cpu(pblob->NegotiateFlags);
-
-       tioffset = cpu_to_le16(pblob->TargetInfoArray.BufferOffset);
-       tilen = cpu_to_le16(pblob->TargetInfoArray.Length);
-       ses->server->tilen = tilen;
-       if (tilen) {
-               ses->server->tiblob = kmalloc(tilen, GFP_KERNEL);
-               if (!ses->server->tiblob) {
-                       cERROR(1, "Challenge target info allocation failure");
-                       return -ENOMEM;
-               }
-               memcpy(ses->server->tiblob,  bcc_ptr + tioffset, tilen);
-       }
-
        return 0;
 }
 
@@ -442,13 +425,12 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
        /* BB is NTLMV2 session security format easier to use here? */
        flags = NTLMSSP_NEGOTIATE_56 |  NTLMSSP_REQUEST_TARGET |
                NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
-               NTLMSSP_NEGOTIATE_NTLM;
+               NTLMSSP_NEGOTIATE_NT_ONLY | NTLMSSP_NEGOTIATE_NTLM;
        if (ses->server->secMode &
-          (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
-               flags |= NTLMSSP_NEGOTIATE_SIGN |
-                       NTLMSSP_NEGOTIATE_KEY_XCH |
-                       NTLMSSP_NEGOTIATE_EXTENDED_SEC;
-       }
+          (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+               flags |= NTLMSSP_NEGOTIATE_SIGN;
+       if (ses->server->secMode & SECMODE_SIGN_REQUIRED)
+               flags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN;
 
        sec_blob->NegotiateFlags |= cpu_to_le32(flags);
 
@@ -469,12 +451,10 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
                                   struct cifsSesInfo *ses,
                                   const struct nls_table *nls_cp, bool first)
 {
-       int rc;
-       unsigned int size;
        AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer;
        __u32 flags;
        unsigned char *tmp;
-       struct ntlmv2_resp ntlmv2_response = {};
+       char ntlm_session_key[CIFS_SESS_KEY_SIZE];
 
        memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
        sec_blob->MessageType = NtLmAuthenticate;
@@ -497,25 +477,19 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
        sec_blob->LmChallengeResponse.Length = 0;
        sec_blob->LmChallengeResponse.MaximumLength = 0;
 
-       sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
-       rc = setup_ntlmv2_rsp(ses, (char *)&ntlmv2_response, nls_cp);
-       if (rc) {
-               cERROR(1, "error rc: %d during ntlmssp ntlmv2 setup", rc);
-               goto setup_ntlmv2_ret;
-       }
-       size =  sizeof(struct ntlmv2_resp);
-       memcpy(tmp, (char *)&ntlmv2_response, size);
-       tmp += size;
-       if (ses->server->tilen > 0) {
-               memcpy(tmp, ses->server->tiblob, ses->server->tilen);
-               tmp += ses->server->tilen;
-       } else
-               ses->server->tilen = 0;
+       /* calculate session key,  BB what about adding similar ntlmv2 path? */
+       SMBNTencrypt(ses->password, ses->server->cryptKey, ntlm_session_key);
+       if (first)
+               cifs_calculate_mac_key(&ses->server->mac_signing_key,
+                                      ntlm_session_key, ses->password);
 
-       sec_blob->NtChallengeResponse.Length = cpu_to_le16(size +
-                               ses->server->tilen);
+       memcpy(tmp, ntlm_session_key, CIFS_SESS_KEY_SIZE);
+       sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
+       sec_blob->NtChallengeResponse.Length = cpu_to_le16(CIFS_SESS_KEY_SIZE);
        sec_blob->NtChallengeResponse.MaximumLength =
-               cpu_to_le16(size + ses->server->tilen);
+                               cpu_to_le16(CIFS_SESS_KEY_SIZE);
+
+       tmp += CIFS_SESS_KEY_SIZE;
 
        if (ses->domainName == NULL) {
                sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
@@ -527,6 +501,7 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
                len = cifs_strtoUCS((__le16 *)tmp, ses->domainName,
                                    MAX_USERNAME_SIZE, nls_cp);
                len *= 2; /* unicode is 2 bytes each */
+               len += 2; /* trailing null */
                sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
                sec_blob->DomainName.Length = cpu_to_le16(len);
                sec_blob->DomainName.MaximumLength = cpu_to_le16(len);
@@ -543,6 +518,7 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
                len = cifs_strtoUCS((__le16 *)tmp, ses->userName,
                                    MAX_USERNAME_SIZE, nls_cp);
                len *= 2; /* unicode is 2 bytes each */
+               len += 2; /* trailing null */
                sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
                sec_blob->UserName.Length = cpu_to_le16(len);
                sec_blob->UserName.MaximumLength = cpu_to_le16(len);
@@ -554,26 +530,9 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
        sec_blob->WorkstationName.MaximumLength = 0;
        tmp += 2;
 
-       if ((ses->server->ntlmssp.server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) &&
-                       !calc_seckey(ses->server)) {
-               memcpy(tmp, ses->server->ntlmssp.ciphertext, CIFS_CPHTXT_SIZE);
-               sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
-               sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
-               sec_blob->SessionKey.MaximumLength =
-                       cpu_to_le16(CIFS_CPHTXT_SIZE);
-               tmp += CIFS_CPHTXT_SIZE;
-       } else {
-               sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
-               sec_blob->SessionKey.Length = 0;
-               sec_blob->SessionKey.MaximumLength = 0;
-       }
-
-       ses->server->sequence_number = 0;
-
-setup_ntlmv2_ret:
-       if (ses->server->tilen > 0)
-               kfree(ses->server->tiblob);
-
+       sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
+       sec_blob->SessionKey.Length = 0;
+       sec_blob->SessionKey.MaximumLength = 0;
        return tmp - pbuffer;
 }
 
@@ -587,14 +546,15 @@ static void setup_ntlmssp_neg_req(SESSION_SETUP_ANDX *pSMB,
        return;
 }
 
-static int setup_ntlmssp_auth_req(char *ntlmsspblob,
+static int setup_ntlmssp_auth_req(SESSION_SETUP_ANDX *pSMB,
                                  struct cifsSesInfo *ses,
                                  const struct nls_table *nls, bool first_time)
 {
        int bloblen;
 
-       bloblen = build_ntlmssp_auth_blob(ntlmsspblob, ses, nls,
+       bloblen = build_ntlmssp_auth_blob(&pSMB->req.SecurityBlob[0], ses, nls,
                                          first_time);
+       pSMB->req.SecurityBlobLength = cpu_to_le16(bloblen);
 
        return bloblen;
 }
@@ -730,7 +690,7 @@ ssetup_ntlmssp_authenticate:
 
                if (first_time) /* should this be moved into common code
                                  with similar ntlmv2 path? */
-                       cifs_calculate_session_key(&ses->server->session_key,
+                       cifs_calculate_mac_key(&ses->server->mac_signing_key,
                                ntlm_session_key, ses->password);
                /* copy session key */
 
@@ -769,21 +729,12 @@ ssetup_ntlmssp_authenticate:
                        cpu_to_le16(sizeof(struct ntlmv2_resp));
 
                /* calculate session key */
-               rc = setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp);
-               if (rc) {
-                       kfree(v2_sess_key);
-                       goto ssetup_exit;
-               }
+               setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp);
                /* FIXME: calculate MAC key */
                memcpy(bcc_ptr, (char *)v2_sess_key,
                       sizeof(struct ntlmv2_resp));
                bcc_ptr += sizeof(struct ntlmv2_resp);
                kfree(v2_sess_key);
-               if (ses->server->tilen > 0) {
-                       memcpy(bcc_ptr, ses->server->tiblob,
-                               ses->server->tilen);
-                       bcc_ptr += ses->server->tilen;
-               }
                if (ses->capabilities & CAP_UNICODE) {
                        if (iov[0].iov_len % 2) {
                                *bcc_ptr = 0;
@@ -814,15 +765,15 @@ ssetup_ntlmssp_authenticate:
                }
                /* bail out if key is too long */
                if (msg->sesskey_len >
-                   sizeof(ses->server->session_key.data.krb5)) {
+                   sizeof(ses->server->mac_signing_key.data.krb5)) {
                        cERROR(1, "Kerberos signing key too long (%u bytes)",
                                msg->sesskey_len);
                        rc = -EOVERFLOW;
                        goto ssetup_exit;
                }
                if (first_time) {
-                       ses->server->session_key.len = msg->sesskey_len;
-                       memcpy(ses->server->session_key.data.krb5,
+                       ses->server->mac_signing_key.len = msg->sesskey_len;
+                       memcpy(ses->server->mac_signing_key.data.krb5,
                                msg->data, msg->sesskey_len);
                }
                pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
@@ -864,28 +815,12 @@ ssetup_ntlmssp_authenticate:
                        if (phase == NtLmNegotiate) {
                                setup_ntlmssp_neg_req(pSMB, ses);
                                iov[1].iov_len = sizeof(NEGOTIATE_MESSAGE);
-                               iov[1].iov_base = &pSMB->req.SecurityBlob[0];
                        } else if (phase == NtLmAuthenticate) {
                                int blob_len;
-                               char *ntlmsspblob;
-
-                               ntlmsspblob = kmalloc(5 *
-                                       sizeof(struct _AUTHENTICATE_MESSAGE),
-                                       GFP_KERNEL);
-                               if (!ntlmsspblob) {
-                                       cERROR(1, "Can't allocate NTLMSSP");
-                                       rc = -ENOMEM;
-                                       goto ssetup_exit;
-                               }
-
-                               blob_len = setup_ntlmssp_auth_req(ntlmsspblob,
-                                                               ses,
-                                                               nls_cp,
-                                                               first_time);
+                               blob_len = setup_ntlmssp_auth_req(pSMB, ses,
+                                                                 nls_cp,
+                                                                 first_time);
                                iov[1].iov_len = blob_len;
-                               iov[1].iov_base = ntlmsspblob;
-                               pSMB->req.SecurityBlobLength =
-                                       cpu_to_le16(blob_len);
                                /* Make sure that we tell the server that we
                                   are using the uid that it just gave us back
                                   on the response (challenge) */
@@ -895,6 +830,7 @@ ssetup_ntlmssp_authenticate:
                                rc = -ENOSYS;
                                goto ssetup_exit;
                        }
+                       iov[1].iov_base = &pSMB->req.SecurityBlob[0];
                        /* unicode strings must be word aligned */
                        if ((iov[0].iov_len + iov[1].iov_len) % 2) {
                                *bcc_ptr = 0;
index e0588cdf4cc5d5a1e8a73c1190c21f2d6cbe4986..82f78c4d6978ceafdab5789182193b899a435202 100644 (file)
@@ -543,7 +543,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
                    (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
                                             SECMODE_SIGN_ENABLED))) {
                        rc = cifs_verify_signature(midQ->resp_buf,
-                                               ses->server,
+                                               &ses->server->mac_signing_key,
                                                midQ->sequence_number+1);
                        if (rc) {
                                cERROR(1, "Unexpected SMB signature");
@@ -731,7 +731,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
                    (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
                                             SECMODE_SIGN_ENABLED))) {
                        rc = cifs_verify_signature(out_buf,
-                                               ses->server,
+                                               &ses->server->mac_signing_key,
                                                midQ->sequence_number+1);
                        if (rc) {
                                cERROR(1, "Unexpected SMB signature");
@@ -981,7 +981,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
            (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
                                     SECMODE_SIGN_ENABLED))) {
                rc = cifs_verify_signature(out_buf,
-                                          ses->server,
+                                          &ses->server->mac_signing_key,
                                           midQ->sequence_number+1);
                if (rc) {
                        cERROR(1, "Unexpected SMB signature");
index de89645777c7c2b06cb62b3657cd72b26a9c4f9e..116af7546cf0de6ef076e04ad74b4a2e9aa4bade 100644 (file)
@@ -184,8 +184,8 @@ static ssize_t coda_psdev_write(struct file *file, const char __user *buf,
        }
 
        /* adjust outsize. is this useful ?? */
-        req->uc_outSize = nbytes;      
-        req->uc_flags |= REQ_WRITE;
+       req->uc_outSize = nbytes;
+       req->uc_flags |= CODA_REQ_WRITE;
        count = nbytes;
 
        /* Convert filedescriptor into a file handle */
index 718c7062aec129844cda361e2f4b8a143aa31861..0644a154672b93012a4d7a7f864ba7f505b43213 100644 (file)
@@ -1153,7 +1153,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
 {
        compat_ssize_t tot_len;
        struct iovec iovstack[UIO_FASTIOV];
-       struct iovec *iov;
+       struct iovec *iov = iovstack;
        ssize_t ret;
        io_fn_t fn;
        iov_fn_t fnv;
index 51f270b479b6938a4a730ea56f9011c30f99563f..48d74c7391d13f4f07393c45d19825e937ecbcd1 100644 (file)
@@ -634,7 +634,7 @@ static int dio_send_cur_page(struct dio *dio)
        int ret = 0;
 
        if (dio->bio) {
-               loff_t cur_offset = dio->block_in_file << dio->blkbits;
+               loff_t cur_offset = dio->cur_page_fs_offset;
                loff_t bio_next_offset = dio->logical_offset_in_bio +
                        dio->bio->bi_size;
 
@@ -659,7 +659,7 @@ static int dio_send_cur_page(struct dio *dio)
                 * Submit now if the underlying fs is about to perform a
                 * metadata read
                 */
-               if (dio->boundary)
+               else if (dio->boundary)
                        dio_bio_submit(dio);
        }
 
index 2d9455282744bce582e48e0ecec4f4a6d332a28c..6d2b6f93685813ba2b2119dc71c14a941061cf46 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -376,6 +376,9 @@ static int count(const char __user * const __user * argv, int max)
                        argv++;
                        if (i++ >= max)
                                return -E2BIG;
+
+                       if (fatal_signal_pending(current))
+                               return -ERESTARTNOHAND;
                        cond_resched();
                }
        }
@@ -419,6 +422,12 @@ static int copy_strings(int argc, const char __user *const __user *argv,
                while (len > 0) {
                        int offset, bytes_to_copy;
 
+                       if (fatal_signal_pending(current)) {
+                               ret = -ERESTARTNOHAND;
+                               goto out;
+                       }
+                       cond_resched();
+
                        offset = pos % PAGE_SIZE;
                        if (offset == 0)
                                offset = PAGE_SIZE;
@@ -594,6 +603,11 @@ int setup_arg_pages(struct linux_binprm *bprm,
 #else
        stack_top = arch_align_stack(stack_top);
        stack_top = PAGE_ALIGN(stack_top);
+
+       if (unlikely(stack_top < mmap_min_addr) ||
+           unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
+               return -ENOMEM;
+
        stack_shift = vma->vm_end - stack_top;
 
        bprm->p -= stack_shift;
@@ -2000,3 +2014,43 @@ fail_creds:
 fail:
        return;
 }
+
+/*
+ * Core dumping helper functions.  These are the only things you should
+ * do on a core-file: use only these functions to write out all the
+ * necessary info.
+ */
+int dump_write(struct file *file, const void *addr, int nr)
+{
+       return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
+}
+EXPORT_SYMBOL(dump_write);
+
+int dump_seek(struct file *file, loff_t off)
+{
+       int ret = 1;
+
+       if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
+               if (file->f_op->llseek(file, off, SEEK_CUR) < 0)
+                       return 0;
+       } else {
+               char *buf = (char *)get_zeroed_page(GFP_KERNEL);
+
+               if (!buf)
+                       return 0;
+               while (off > 0) {
+                       unsigned long n = off;
+
+                       if (n > PAGE_SIZE)
+                               n = PAGE_SIZE;
+                       if (!dump_write(file, buf, n)) {
+                               ret = 0;
+                               break;
+                       }
+                       off -= n;
+               }
+               free_page((unsigned long)buf);
+       }
+       return ret;
+}
+EXPORT_SYMBOL(dump_seek);
index eb7368ebd8cdc294c7ec4b2d1ad0d4953f699981..3eadd97324b140e679f269480b3737823fd008cb 100644 (file)
@@ -54,6 +54,9 @@ struct page_collect {
        unsigned nr_pages;
        unsigned long length;
        loff_t pg_first; /* keep 64bit also in 32-arches */
+       bool read_4_write; /* This means two things: that the read is sync
+                           * And the pages should not be unlocked.
+                           */
 };
 
 static void _pcol_init(struct page_collect *pcol, unsigned expected_pages,
@@ -71,6 +74,7 @@ static void _pcol_init(struct page_collect *pcol, unsigned expected_pages,
        pcol->nr_pages = 0;
        pcol->length = 0;
        pcol->pg_first = -1;
+       pcol->read_4_write = false;
 }
 
 static void _pcol_reset(struct page_collect *pcol)
@@ -347,7 +351,8 @@ static int readpage_strip(void *data, struct page *page)
                if (PageError(page))
                        ClearPageError(page);
 
-               unlock_page(page);
+               if (!pcol->read_4_write)
+                       unlock_page(page);
                EXOFS_DBGMSG("readpage_strip(0x%lx, 0x%lx) empty page,"
                             " splitting\n", inode->i_ino, page->index);
 
@@ -428,6 +433,7 @@ static int _readpage(struct page *page, bool is_sync)
        /* readpage_strip might call read_exec(,is_sync==false) at several
         * places but not if we have a single page.
         */
+       pcol.read_4_write = is_sync;
        ret = readpage_strip(&pcol, page);
        if (ret) {
                EXOFS_ERR("_readpage => %d\n", ret);
index 6769fd0f35b88373fdb8d0b265668a976a7ab251..f8cc34f542c3a1cc8b4f53309ffae317ecdddf15 100644 (file)
@@ -769,11 +769,15 @@ EXPORT_SYMBOL(kill_fasync);
 
 static int __init fcntl_init(void)
 {
-       /* please add new bits here to ensure allocation uniqueness */
-       BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
+       /*
+        * Please add new bits here to ensure allocation uniqueness.
+        * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
+        * is defined as O_NONBLOCK on some platforms and not on others.
+        */
+       BUILD_BUG_ON(18 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
                O_RDONLY        | O_WRONLY      | O_RDWR        |
                O_CREAT         | O_EXCL        | O_NOCTTY      |
-               O_TRUNC         | O_APPEND      | O_NONBLOCK    |
+               O_TRUNC         | O_APPEND      | /* O_NONBLOCK | */
                __O_SYNC        | O_DSYNC       | FASYNC        |
                O_DIRECT        | O_LARGEFILE   | O_DIRECTORY   |
                O_NOFOLLOW      | O_NOATIME     | O_CLOEXEC     |
index 7d9d06ba184b409e2ae90050ce82ca365cf3ce82..ab38fef1c9a1a52eab7128fa6a7dad217d4ad744 100644 (file)
@@ -52,8 +52,6 @@ struct wb_writeback_work {
 #define CREATE_TRACE_POINTS
 #include <trace/events/writeback.h>
 
-#define inode_to_bdi(inode)    ((inode)->i_mapping->backing_dev_info)
-
 /*
  * We don't actually have pdflush, but this one is exported though /proc...
  */
@@ -71,6 +69,16 @@ int writeback_in_progress(struct backing_dev_info *bdi)
        return test_bit(BDI_writeback_running, &bdi->state);
 }
 
+static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
+{
+       struct super_block *sb = inode->i_sb;
+
+       if (strcmp(sb->s_type->name, "bdev") == 0)
+               return inode->i_mapping->backing_dev_info;
+
+       return sb->s_bdi;
+}
+
 static void bdi_queue_work(struct backing_dev_info *bdi,
                struct wb_writeback_work *work)
 {
@@ -808,7 +816,7 @@ int bdi_writeback_thread(void *data)
                        wb->last_active = jiffies;
 
                set_current_state(TASK_INTERRUPTIBLE);
-               if (!list_empty(&bdi->work_list)) {
+               if (!list_empty(&bdi->work_list) || kthread_should_stop()) {
                        __set_current_state(TASK_RUNNING);
                        continue;
                }
index 69ad053ffd78cb0f2669516b5327571f37d65254..cde755cca5642d41fb9cbbe05ac2d01f70f53c69 100644 (file)
@@ -276,7 +276,7 @@ static void flush_bg_queue(struct fuse_conn *fc)
  * Called with fc->lock, unlocks it
  */
 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
-__releases(&fc->lock)
+__releases(fc->lock)
 {
        void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
        req->end = NULL;
@@ -306,8 +306,8 @@ __releases(&fc->lock)
 
 static void wait_answer_interruptible(struct fuse_conn *fc,
                                      struct fuse_req *req)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        if (signal_pending(current))
                return;
@@ -325,8 +325,8 @@ static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
 }
 
 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        if (!fc->no_interrupt) {
                /* Any signal may interrupt this */
@@ -905,8 +905,8 @@ static int request_pending(struct fuse_conn *fc)
 
 /* Wait until a request is available on the pending list */
 static void request_wait(struct fuse_conn *fc)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        DECLARE_WAITQUEUE(wait, current);
 
@@ -934,7 +934,7 @@ __acquires(&fc->lock)
  */
 static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
                               size_t nbytes, struct fuse_req *req)
-__releases(&fc->lock)
+__releases(fc->lock)
 {
        struct fuse_in_header ih;
        struct fuse_interrupt_in arg;
@@ -1354,7 +1354,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
        loff_t file_size;
        unsigned int num;
        unsigned int offset;
-       size_t total_len;
+       size_t total_len = 0;
 
        req = fuse_get_req(fc);
        if (IS_ERR(req))
@@ -1720,8 +1720,8 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
  * This function releases and reacquires fc->lock
  */
 static void end_requests(struct fuse_conn *fc, struct list_head *head)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        while (!list_empty(head)) {
                struct fuse_req *req;
@@ -1744,8 +1744,8 @@ __acquires(&fc->lock)
  * locked).
  */
 static void end_io_requests(struct fuse_conn *fc)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        while (!list_empty(&fc->io)) {
                struct fuse_req *req =
@@ -1769,6 +1769,16 @@ __acquires(&fc->lock)
        }
 }
 
+static void end_queued_requests(struct fuse_conn *fc)
+__releases(fc->lock)
+__acquires(fc->lock)
+{
+       fc->max_background = UINT_MAX;
+       flush_bg_queue(fc);
+       end_requests(fc, &fc->pending);
+       end_requests(fc, &fc->processing);
+}
+
 /*
  * Abort all requests.
  *
@@ -1795,8 +1805,7 @@ void fuse_abort_conn(struct fuse_conn *fc)
                fc->connected = 0;
                fc->blocked = 0;
                end_io_requests(fc);
-               end_requests(fc, &fc->pending);
-               end_requests(fc, &fc->processing);
+               end_queued_requests(fc);
                wake_up_all(&fc->waitq);
                wake_up_all(&fc->blocked_waitq);
                kill_fasync(&fc->fasync, SIGIO, POLL_IN);
@@ -1811,8 +1820,9 @@ int fuse_dev_release(struct inode *inode, struct file *file)
        if (fc) {
                spin_lock(&fc->lock);
                fc->connected = 0;
-               end_requests(fc, &fc->pending);
-               end_requests(fc, &fc->processing);
+               fc->blocked = 0;
+               end_queued_requests(fc);
+               wake_up_all(&fc->blocked_waitq);
                spin_unlock(&fc->lock);
                fuse_conn_put(fc);
        }
index 147c1f71bdb9f0213307fd3e63f6e3b30fc3f403..c8224587123f6e2ff84c8933f8d56a50ffd06c80 100644 (file)
@@ -1144,8 +1144,8 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
 
 /* Called under fc->lock, may release and reacquire it */
 static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        struct fuse_inode *fi = get_fuse_inode(req->inode);
        loff_t size = i_size_read(req->inode);
@@ -1183,8 +1183,8 @@ __acquires(&fc->lock)
  * Called with fc->lock
  */
 void fuse_flush_writepages(struct inode *inode)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        struct fuse_conn *fc = get_fuse_conn(inode);
        struct fuse_inode *fi = get_fuse_inode(inode);
index cc9665522148a730b010953596cc24edefc00ed6..c465ae066c62c6392ee22047d2be0ec650b89c98 100644 (file)
@@ -1,6 +1,6 @@
 config GFS2_FS
        tristate "GFS2 file system support"
-       depends on EXPERIMENTAL && (64BIT || LBDAF)
+       depends on (64BIT || LBDAF)
        select DLM if GFS2_FS_LOCKING_DLM
        select CONFIGFS_FS if GFS2_FS_LOCKING_DLM
        select SYSFS if GFS2_FS_LOCKING_DLM
index 194fe16d8418a332a274a74769b15277ff2d6858..6b24afb96aaedade304b48bb427e664eae8e6e53 100644 (file)
@@ -36,8 +36,8 @@
 #include "glops.h"
 
 
-static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
-                                  unsigned int from, unsigned int to)
+void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
+                           unsigned int from, unsigned int to)
 {
        struct buffer_head *head = page_buffers(page);
        unsigned int bsize = head->b_size;
@@ -615,7 +615,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
        unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
        int alloc_required;
        int error = 0;
-       struct gfs2_alloc *al;
+       struct gfs2_alloc *al = NULL;
        pgoff_t index = pos >> PAGE_CACHE_SHIFT;
        unsigned from = pos & (PAGE_CACHE_SIZE - 1);
        unsigned to = from + len;
@@ -663,6 +663,8 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
                rblocks += RES_STATFS + RES_QUOTA;
        if (&ip->i_inode == sdp->sd_rindex)
                rblocks += 2 * RES_STATFS;
+       if (alloc_required)
+               rblocks += gfs2_rg_blocks(al);
 
        error = gfs2_trans_begin(sdp, rblocks,
                                 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
@@ -696,13 +698,11 @@ out:
 
        page_cache_release(page);
 
-       /*
-        * XXX(truncate): the call below should probably be replaced with
-        * a call to the gfs2-specific truncate blocks helper to actually
-        * release disk blocks..
-        */
+       gfs2_trans_end(sdp);
        if (pos + len > ip->i_inode.i_size)
-               truncate_setsize(&ip->i_inode, ip->i_inode.i_size);
+               gfs2_trim_blocks(&ip->i_inode);
+       goto out_trans_fail;
+
 out_endtrans:
        gfs2_trans_end(sdp);
 out_trans_fail:
@@ -802,10 +802,8 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
        page_cache_release(page);
 
        if (copied) {
-               if (inode->i_size < to) {
+               if (inode->i_size < to)
                        i_size_write(inode, to);
-                       ip->i_disksize = inode->i_size;
-               }
                gfs2_dinode_out(ip, di);
                mark_inode_dirty(inode);
        }
@@ -876,8 +874,6 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
 
        ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
        if (ret > 0) {
-               if (inode->i_size > ip->i_disksize)
-                       ip->i_disksize = inode->i_size;
                gfs2_dinode_out(ip, dibh->b_data);
                mark_inode_dirty(inode);
        }
index 6f482809d1a35b4787e9cb62357958d532aeaa30..5476c066d4ee336733445eda2f804561179ecb41 100644 (file)
@@ -50,7 +50,7 @@ struct strip_mine {
  * @ip: the inode
  * @dibh: the dinode buffer
  * @block: the block number that was allocated
- * @private: any locked page held by the caller process
+ * @page: The (optional) page. This is looked up if @page is NULL
  *
  * Returns: errno
  */
@@ -109,8 +109,7 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
 /**
  * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big
  * @ip: The GFS2 inode to unstuff
- * @unstuffer: the routine that handles unstuffing a non-zero length file
- * @private: private data for the unstuffer
+ * @page: The (optional) page. This is looked up if the @page is NULL
  *
  * This routine unstuffs a dinode and returns it to a "normal" state such
  * that the height can be grown in the traditional way.
@@ -132,7 +131,7 @@ int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
        if (error)
                goto out;
 
-       if (ip->i_disksize) {
+       if (i_size_read(&ip->i_inode)) {
                /* Get a free block, fill it with the stuffed data,
                   and write it out to disk */
 
@@ -161,7 +160,7 @@ int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
        di = (struct gfs2_dinode *)dibh->b_data;
        gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
 
-       if (ip->i_disksize) {
+       if (i_size_read(&ip->i_inode)) {
                *(__be64 *)(di + 1) = cpu_to_be64(block);
                gfs2_add_inode_blocks(&ip->i_inode, 1);
                di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
@@ -884,84 +883,15 @@ out:
        return error;
 }
 
-/**
- * do_grow - Make a file look bigger than it is
- * @ip: the inode
- * @size: the size to set the file to
- *
- * Called with an exclusive lock on @ip.
- *
- * Returns: errno
- */
-
-static int do_grow(struct gfs2_inode *ip, u64 size)
-{
-       struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
-       struct gfs2_alloc *al;
-       struct buffer_head *dibh;
-       int error;
-
-       al = gfs2_alloc_get(ip);
-       if (!al)
-               return -ENOMEM;
-
-       error = gfs2_quota_lock_check(ip);
-       if (error)
-               goto out;
-
-       al->al_requested = sdp->sd_max_height + RES_DATA;
-
-       error = gfs2_inplace_reserve(ip);
-       if (error)
-               goto out_gunlock_q;
-
-       error = gfs2_trans_begin(sdp,
-                       sdp->sd_max_height + al->al_rgd->rd_length +
-                       RES_JDATA + RES_DINODE + RES_STATFS + RES_QUOTA, 0);
-       if (error)
-               goto out_ipres;
-
-       error = gfs2_meta_inode_buffer(ip, &dibh);
-       if (error)
-               goto out_end_trans;
-
-       if (size > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
-               if (gfs2_is_stuffed(ip)) {
-                       error = gfs2_unstuff_dinode(ip, NULL);
-                       if (error)
-                               goto out_brelse;
-               }
-       }
-
-       ip->i_disksize = size;
-       ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
-       gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-       gfs2_dinode_out(ip, dibh->b_data);
-
-out_brelse:
-       brelse(dibh);
-out_end_trans:
-       gfs2_trans_end(sdp);
-out_ipres:
-       gfs2_inplace_release(ip);
-out_gunlock_q:
-       gfs2_quota_unlock(ip);
-out:
-       gfs2_alloc_put(ip);
-       return error;
-}
-
-
 /**
  * gfs2_block_truncate_page - Deal with zeroing out data for truncate
  *
  * This is partly borrowed from ext3.
  */
-static int gfs2_block_truncate_page(struct address_space *mapping)
+static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
 {
        struct inode *inode = mapping->host;
        struct gfs2_inode *ip = GFS2_I(inode);
-       loff_t from = inode->i_size;
        unsigned long index = from >> PAGE_CACHE_SHIFT;
        unsigned offset = from & (PAGE_CACHE_SIZE-1);
        unsigned blocksize, iblock, length, pos;
@@ -1023,9 +953,11 @@ unlock:
        return err;
 }
 
-static int trunc_start(struct gfs2_inode *ip, u64 size)
+static int trunc_start(struct inode *inode, u64 oldsize, u64 newsize)
 {
-       struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+       struct gfs2_inode *ip = GFS2_I(inode);
+       struct gfs2_sbd *sdp = GFS2_SB(inode);
+       struct address_space *mapping = inode->i_mapping;
        struct buffer_head *dibh;
        int journaled = gfs2_is_jdata(ip);
        int error;
@@ -1039,31 +971,26 @@ static int trunc_start(struct gfs2_inode *ip, u64 size)
        if (error)
                goto out;
 
+       gfs2_trans_add_bh(ip->i_gl, dibh, 1);
+
        if (gfs2_is_stuffed(ip)) {
-               u64 dsize = size + sizeof(struct gfs2_dinode);
-               ip->i_disksize = size;
-               ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
-               gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-               gfs2_dinode_out(ip, dibh->b_data);
-               if (dsize > dibh->b_size)
-                       dsize = dibh->b_size;
-               gfs2_buffer_clear_tail(dibh, dsize);
-               error = 1;
+               gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
        } else {
-               if (size & (u64)(sdp->sd_sb.sb_bsize - 1))
-                       error = gfs2_block_truncate_page(ip->i_inode.i_mapping);
-
-               if (!error) {
-                       ip->i_disksize = size;
-                       ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
-                       ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
-                       gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-                       gfs2_dinode_out(ip, dibh->b_data);
+               if (newsize & (u64)(sdp->sd_sb.sb_bsize - 1)) {
+                       error = gfs2_block_truncate_page(mapping, newsize);
+                       if (error)
+                               goto out_brelse;
                }
+               ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
        }
 
-       brelse(dibh);
+       i_size_write(inode, newsize);
+       ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
+       gfs2_dinode_out(ip, dibh->b_data);
 
+       truncate_pagecache(inode, oldsize, newsize);
+out_brelse:
+       brelse(dibh);
 out:
        gfs2_trans_end(sdp);
        return error;
@@ -1123,7 +1050,7 @@ static int trunc_end(struct gfs2_inode *ip)
        if (error)
                goto out;
 
-       if (!ip->i_disksize) {
+       if (!i_size_read(&ip->i_inode)) {
                ip->i_height = 0;
                ip->i_goal = ip->i_no_addr;
                gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
@@ -1143,92 +1070,154 @@ out:
 
 /**
  * do_shrink - make a file smaller
- * @ip: the inode
- * @size: the size to make the file
- * @truncator: function to truncate the last partial block
+ * @inode: the inode
+ * @oldsize: the current inode size
+ * @newsize: the size to make the file
  *
- * Called with an exclusive lock on @ip.
+ * Called with an exclusive lock on @inode. The @size must
+ * be equal to or smaller than the current inode size.
  *
  * Returns: errno
  */
 
-static int do_shrink(struct gfs2_inode *ip, u64 size)
+static int do_shrink(struct inode *inode, u64 oldsize, u64 newsize)
 {
+       struct gfs2_inode *ip = GFS2_I(inode);
        int error;
 
-       error = trunc_start(ip, size);
+       error = trunc_start(inode, oldsize, newsize);
        if (error < 0)
                return error;
-       if (error > 0)
+       if (gfs2_is_stuffed(ip))
                return 0;
 
-       error = trunc_dealloc(ip, size);
-       if (!error)
+       error = trunc_dealloc(ip, newsize);
+       if (error == 0)
                error = trunc_end(ip);
 
        return error;
 }
 
-static int do_touch(struct gfs2_inode *ip, u64 size)
+void gfs2_trim_blocks(struct inode *inode)
 {
-       struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+       u64 size = inode->i_size;
+       int ret;
+
+       ret = do_shrink(inode, size, size);
+       WARN_ON(ret != 0);
+}
+
+/**
+ * do_grow - Touch and update inode size
+ * @inode: The inode
+ * @size: The new size
+ *
+ * This function updates the timestamps on the inode and
+ * may also increase the size of the inode. This function
+ * must not be called with @size any smaller than the current
+ * inode size.
+ *
+ * Although it is not strictly required to unstuff files here,
+ * earlier versions of GFS2 have a bug in the stuffed file reading
+ * code which will result in a buffer overrun if the size is larger
+ * than the max stuffed file size. In order to prevent this from
+ * occuring, such files are unstuffed, but in other cases we can
+ * just update the inode size directly.
+ *
+ * Returns: 0 on success, or -ve on error
+ */
+
+static int do_grow(struct inode *inode, u64 size)
+{
+       struct gfs2_inode *ip = GFS2_I(inode);
+       struct gfs2_sbd *sdp = GFS2_SB(inode);
        struct buffer_head *dibh;
+       struct gfs2_alloc *al = NULL;
        int error;
 
-       error = gfs2_trans_begin(sdp, RES_DINODE, 0);
+       if (gfs2_is_stuffed(ip) &&
+           (size > (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)))) {
+               al = gfs2_alloc_get(ip);
+               if (al == NULL)
+                       return -ENOMEM;
+
+               error = gfs2_quota_lock_check(ip);
+               if (error)
+                       goto do_grow_alloc_put;
+
+               al->al_requested = 1;
+               error = gfs2_inplace_reserve(ip);
+               if (error)
+                       goto do_grow_qunlock;
+       }
+
+       error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT, 0);
        if (error)
-               return error;
+               goto do_grow_release;
 
-       down_write(&ip->i_rw_mutex);
+       if (al) {
+               error = gfs2_unstuff_dinode(ip, NULL);
+               if (error)
+                       goto do_end_trans;
+       }
 
        error = gfs2_meta_inode_buffer(ip, &dibh);
        if (error)
-               goto do_touch_out;
+               goto do_end_trans;
 
+       i_size_write(inode, size);
        ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
        gfs2_trans_add_bh(ip->i_gl, dibh, 1);
        gfs2_dinode_out(ip, dibh->b_data);
        brelse(dibh);
 
-do_touch_out:
-       up_write(&ip->i_rw_mutex);
+do_end_trans:
        gfs2_trans_end(sdp);
+do_grow_release:
+       if (al) {
+               gfs2_inplace_release(ip);
+do_grow_qunlock:
+               gfs2_quota_unlock(ip);
+do_grow_alloc_put:
+               gfs2_alloc_put(ip);
+       }
        return error;
 }
 
 /**
- * gfs2_truncatei - make a file a given size
- * @ip: the inode
- * @size: the size to make the file
- * @truncator: function to truncate the last partial block
+ * gfs2_setattr_size - make a file a given size
+ * @inode: the inode
+ * @newsize: the size to make the file
  *
- * The file size can grow, shrink, or stay the same size.
+ * The file size can grow, shrink, or stay the same size. This
+ * is called holding i_mutex and an exclusive glock on the inode
+ * in question.
  *
  * Returns: errno
  */
 
-int gfs2_truncatei(struct gfs2_inode *ip, u64 size)
+int gfs2_setattr_size(struct inode *inode, u64 newsize)
 {
-       int error;
+       int ret;
+       u64 oldsize;
 
-       if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), S_ISREG(ip->i_inode.i_mode)))
-               return -EINVAL;
+       BUG_ON(!S_ISREG(inode->i_mode));
 
-       if (size > ip->i_disksize)
-               error = do_grow(ip, size);
-       else if (size < ip->i_disksize)
-               error = do_shrink(ip, size);
-       else
-               /* update time stamps */
-               error = do_touch(ip, size);
+       ret = inode_newsize_ok(inode, newsize);
+       if (ret)
+               return ret;
 
-       return error;
+       oldsize = inode->i_size;
+       if (newsize >= oldsize)
+               return do_grow(inode, newsize);
+
+       return do_shrink(inode, oldsize, newsize);
 }
 
 int gfs2_truncatei_resume(struct gfs2_inode *ip)
 {
        int error;
-       error = trunc_dealloc(ip, ip->i_disksize);
+       error = trunc_dealloc(ip, i_size_read(&ip->i_inode));
        if (!error)
                error = trunc_end(ip);
        return error;
@@ -1269,7 +1258,7 @@ int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
 
        shift = sdp->sd_sb.sb_bsize_shift;
        BUG_ON(gfs2_is_dir(ip));
-       end_of_file = (ip->i_disksize + sdp->sd_sb.sb_bsize - 1) >> shift;
+       end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
        lblock = offset >> shift;
        lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
        if (lblock_stop > end_of_file)
index a20a5213135a50ae9293da676eb533e9c04063db..42fea03e2bd962b6674747967ac00499009d923f 100644 (file)
@@ -44,14 +44,16 @@ static inline void gfs2_write_calc_reserv(const struct gfs2_inode *ip,
        }
 }
 
-int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page);
-int gfs2_block_map(struct inode *inode, sector_t lblock, struct buffer_head *bh, int create);
-int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen);
-
-int gfs2_truncatei(struct gfs2_inode *ip, u64 size);
-int gfs2_truncatei_resume(struct gfs2_inode *ip);
-int gfs2_file_dealloc(struct gfs2_inode *ip);
-int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
-                             unsigned int len);
+extern int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page);
+extern int gfs2_block_map(struct inode *inode, sector_t lblock,
+                         struct buffer_head *bh, int create);
+extern int gfs2_extent_map(struct inode *inode, u64 lblock, int *new,
+                          u64 *dblock, unsigned *extlen);
+extern int gfs2_setattr_size(struct inode *inode, u64 size);
+extern void gfs2_trim_blocks(struct inode *inode);
+extern int gfs2_truncatei_resume(struct gfs2_inode *ip);
+extern int gfs2_file_dealloc(struct gfs2_inode *ip);
+extern int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
+                                    unsigned int len);
 
 #endif /* __BMAP_DOT_H__ */
index bb7907bde3d81b63b6ae5b198283a52db36e97b0..6798755b3858685b611da5e439393bcffb332563 100644 (file)
@@ -49,7 +49,7 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
                ip = GFS2_I(inode);
        }
 
-       if (sdp->sd_args.ar_localcaching)
+       if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
                goto valid;
 
        had_lock = (gfs2_glock_is_locked_by_me(dip->i_gl) != NULL);
index b9dd88a78dd47073e3af1645a1e0fa3bcb94d1bb..5c356d09c321c10133afc7cf93aba2eddd1cb3c1 100644 (file)
@@ -79,6 +79,9 @@
 #define gfs2_disk_hash2offset(h) (((u64)(h)) >> 1)
 #define gfs2_dir_offset2hash(p) ((u32)(((u64)(p)) << 1))
 
+struct qstr gfs2_qdot __read_mostly;
+struct qstr gfs2_qdotdot __read_mostly;
+
 typedef int (*leaf_call_t) (struct gfs2_inode *dip, u32 index, u32 len,
                            u64 leaf_no, void *data);
 typedef int (*gfs2_dscan_t)(const struct gfs2_dirent *dent,
@@ -127,8 +130,8 @@ static int gfs2_dir_write_stuffed(struct gfs2_inode *ip, const char *buf,
 
        gfs2_trans_add_bh(ip->i_gl, dibh, 1);
        memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size);
-       if (ip->i_disksize < offset + size)
-               ip->i_disksize = offset + size;
+       if (ip->i_inode.i_size < offset + size)
+               i_size_write(&ip->i_inode, offset + size);
        ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
        gfs2_dinode_out(ip, dibh->b_data);
 
@@ -225,8 +228,8 @@ out:
        if (error)
                return error;
 
-       if (ip->i_disksize < offset + copied)
-               ip->i_disksize = offset + copied;
+       if (ip->i_inode.i_size < offset + copied)
+               i_size_write(&ip->i_inode, offset + copied);
        ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
 
        gfs2_trans_add_bh(ip->i_gl, dibh, 1);
@@ -275,12 +278,13 @@ static int gfs2_dir_read_data(struct gfs2_inode *ip, char *buf, u64 offset,
        unsigned int o;
        int copied = 0;
        int error = 0;
+       u64 disksize = i_size_read(&ip->i_inode);
 
-       if (offset >= ip->i_disksize)
+       if (offset >= disksize)
                return 0;
 
-       if (offset + size > ip->i_disksize)
-               size = ip->i_disksize - offset;
+       if (offset + size > disksize)
+               size = disksize - offset;
 
        if (!size)
                return 0;
@@ -727,7 +731,7 @@ static struct gfs2_dirent *gfs2_dirent_search(struct inode *inode,
                unsigned hsize = 1 << ip->i_depth;
                unsigned index;
                u64 ln;
-               if (hsize * sizeof(u64) != ip->i_disksize) {
+               if (hsize * sizeof(u64) != i_size_read(inode)) {
                        gfs2_consist_inode(ip);
                        return ERR_PTR(-EIO);
                }
@@ -879,7 +883,7 @@ static int dir_make_exhash(struct inode *inode)
        for (x = sdp->sd_hash_ptrs; x--; lp++)
                *lp = cpu_to_be64(bn);
 
-       dip->i_disksize = sdp->sd_sb.sb_bsize / 2;
+       i_size_write(inode, sdp->sd_sb.sb_bsize / 2);
        gfs2_add_inode_blocks(&dip->i_inode, 1);
        dip->i_diskflags |= GFS2_DIF_EXHASH;
 
@@ -1057,11 +1061,12 @@ static int dir_double_exhash(struct gfs2_inode *dip)
        u64 *buf;
        u64 *from, *to;
        u64 block;
+       u64 disksize = i_size_read(&dip->i_inode);
        int x;
        int error = 0;
 
        hsize = 1 << dip->i_depth;
-       if (hsize * sizeof(u64) != dip->i_disksize) {
+       if (hsize * sizeof(u64) != disksize) {
                gfs2_consist_inode(dip);
                return -EIO;
        }
@@ -1072,7 +1077,7 @@ static int dir_double_exhash(struct gfs2_inode *dip)
        if (!buf)
                return -ENOMEM;
 
-       for (block = dip->i_disksize >> sdp->sd_hash_bsize_shift; block--;) {
+       for (block = disksize >> sdp->sd_hash_bsize_shift; block--;) {
                error = gfs2_dir_read_data(dip, (char *)buf,
                                            block * sdp->sd_hash_bsize,
                                            sdp->sd_hash_bsize, 1);
@@ -1370,7 +1375,7 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
        unsigned depth = 0;
 
        hsize = 1 << dip->i_depth;
-       if (hsize * sizeof(u64) != dip->i_disksize) {
+       if (hsize * sizeof(u64) != i_size_read(inode)) {
                gfs2_consist_inode(dip);
                return -EIO;
        }
@@ -1784,7 +1789,7 @@ static int foreach_leaf(struct gfs2_inode *dip, leaf_call_t lc, void *data)
        int error = 0;
 
        hsize = 1 << dip->i_depth;
-       if (hsize * sizeof(u64) != dip->i_disksize) {
+       if (hsize * sizeof(u64) != i_size_read(&dip->i_inode)) {
                gfs2_consist_inode(dip);
                return -EIO;
        }
index 4f919440c3be3e20ed49c2acb742db6758096bea..a98f644bd3df33596cf2382767b89ca0cdd08161 100644 (file)
@@ -17,23 +17,24 @@ struct inode;
 struct gfs2_inode;
 struct gfs2_inum;
 
-struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *filename);
-int gfs2_dir_check(struct inode *dir, const struct qstr *filename,
-                  const struct gfs2_inode *ip);
-int gfs2_dir_add(struct inode *inode, const struct qstr *filename,
-                const struct gfs2_inode *ip, unsigned int type);
-int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *filename);
-int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
-                 filldir_t filldir);
-int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
-                  const struct gfs2_inode *nip, unsigned int new_type);
+extern struct inode *gfs2_dir_search(struct inode *dir,
+                                    const struct qstr *filename);
+extern int gfs2_dir_check(struct inode *dir, const struct qstr *filename,
+                         const struct gfs2_inode *ip);
+extern int gfs2_dir_add(struct inode *inode, const struct qstr *filename,
+                       const struct gfs2_inode *ip, unsigned int type);
+extern int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *filename);
+extern int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
+                        filldir_t filldir);
+extern int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
+                         const struct gfs2_inode *nip, unsigned int new_type);
 
-int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip);
+extern int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip);
 
-int gfs2_diradd_alloc_required(struct inode *dir,
-                              const struct qstr *filename);
-int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block,
-                           struct buffer_head **bhp);
+extern int gfs2_diradd_alloc_required(struct inode *dir,
+                                     const struct qstr *filename);
+extern int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block,
+                                  struct buffer_head **bhp);
 
 static inline u32 gfs2_disk_hash(const char *data, int len)
 {
@@ -61,4 +62,7 @@ static inline void gfs2_qstr2dirent(const struct qstr *name, u16 reclen, struct
        memcpy(dent + 1, name->name, name->len);
 }
 
+extern struct qstr gfs2_qdot;
+extern struct qstr gfs2_qdotdot;
+
 #endif /* __DIR_DOT_H__ */
index dfe237a3f8ad9e2a0f11bae403ff1f1d8687cbd0..06d582732d3427a058864d03ca6cb667c5063481 100644 (file)
@@ -126,16 +126,9 @@ static int gfs2_get_name(struct dentry *parent, char *name,
 
 static struct dentry *gfs2_get_parent(struct dentry *child)
 {
-       struct qstr dotdot;
        struct dentry *dentry;
 
-       /*
-        * XXX(hch): it would be a good idea to keep this around as a
-        *           static variable.
-        */
-       gfs2_str2qstr(&dotdot, "..");
-
-       dentry = d_obtain_alias(gfs2_lookupi(child->d_inode, &dotdot, 1));
+       dentry = d_obtain_alias(gfs2_lookupi(child->d_inode, &gfs2_qdotdot, 1));
        if (!IS_ERR(dentry))
                dentry->d_op = &gfs2_dops;
        return dentry;
index 4edd662c8232b2c24d1f1767b937f258071f8211..237ee6a940df23ab0720ed99252657611d0be7ec 100644 (file)
@@ -382,8 +382,10 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        rblocks = RES_DINODE + ind_blocks;
        if (gfs2_is_jdata(ip))
                rblocks += data_blocks ? data_blocks : 1;
-       if (ind_blocks || data_blocks)
+       if (ind_blocks || data_blocks) {
                rblocks += RES_STATFS + RES_QUOTA;
+               rblocks += gfs2_rg_blocks(al);
+       }
        ret = gfs2_trans_begin(sdp, rblocks, 0);
        if (ret)
                goto out_trans_fail;
@@ -491,7 +493,7 @@ static int gfs2_open(struct inode *inode, struct file *file)
                        goto fail;
 
                if (!(file->f_flags & O_LARGEFILE) &&
-                   ip->i_disksize > MAX_NON_LFS) {
+                   i_size_read(inode) > MAX_NON_LFS) {
                        error = -EOVERFLOW;
                        goto fail_gunlock;
                }
index 9adf8f924e08991c32d12938702e570f163c3215..87778857f0994fa504224d93c7c5611d10bc1e8c 100644 (file)
@@ -441,6 +441,8 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
                else
                        gfs2_glock_put_nolock(gl);
        }
+       if (held1 && held2 && list_empty(&gl->gl_holders))
+               clear_bit(GLF_QUEUED, &gl->gl_flags);
 
        gl->gl_state = new_state;
        gl->gl_tchange = jiffies;
@@ -1012,6 +1014,7 @@ fail:
                if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
                        insert_pt = &gh2->gh_list;
        }
+       set_bit(GLF_QUEUED, &gl->gl_flags);
        if (likely(insert_pt == NULL)) {
                list_add_tail(&gh->gh_list, &gl->gl_holders);
                if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
@@ -1310,10 +1313,12 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
 
        gfs2_glock_hold(gl);
        holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
-       if (time_before(now, holdtime))
-               delay = holdtime - now;
-       if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
-               delay = gl->gl_ops->go_min_hold_time;
+       if (test_bit(GLF_QUEUED, &gl->gl_flags)) {
+               if (time_before(now, holdtime))
+                       delay = holdtime - now;
+               if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
+                       delay = gl->gl_ops->go_min_hold_time;
+       }
 
        spin_lock(&gl->gl_spin);
        handle_callback(gl, state, delay);
@@ -1512,7 +1517,7 @@ static void clear_glock(struct gfs2_glock *gl)
        spin_unlock(&lru_lock);
 
        spin_lock(&gl->gl_spin);
-       if (find_first_holder(gl) == NULL && gl->gl_state != LM_ST_UNLOCKED)
+       if (gl->gl_state != LM_ST_UNLOCKED)
                handle_callback(gl, LM_ST_UNLOCKED, 0);
        spin_unlock(&gl->gl_spin);
        gfs2_glock_hold(gl);
@@ -1660,6 +1665,8 @@ static const char *gflags2str(char *buf, const unsigned long *gflags)
                *p++ = 'I';
        if (test_bit(GLF_FROZEN, gflags))
                *p++ = 'F';
+       if (test_bit(GLF_QUEUED, gflags))
+               *p++ = 'q';
        *p = 0;
        return buf;
 }
@@ -1776,10 +1783,12 @@ int __init gfs2_glock_init(void)
        }
 #endif
 
-       glock_workqueue = create_workqueue("glock_workqueue");
+       glock_workqueue = alloc_workqueue("glock_workqueue", WQ_RESCUER |
+                                         WQ_HIGHPRI | WQ_FREEZEABLE, 0);
        if (IS_ERR(glock_workqueue))
                return PTR_ERR(glock_workqueue);
-       gfs2_delete_workqueue = create_workqueue("delete_workqueue");
+       gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", WQ_RESCUER |
+                                               WQ_FREEZEABLE, 0);
        if (IS_ERR(gfs2_delete_workqueue)) {
                destroy_workqueue(glock_workqueue);
                return PTR_ERR(gfs2_delete_workqueue);
index 2bda1911b1563347b52d4d2da7892768d93bd947..db1c26d6d2206c8f9e9b68396380ed8791f3c720 100644 (file)
@@ -215,7 +215,7 @@ void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs);
 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...);
 
 /**
- * gfs2_glock_nq_init - intialize a holder and enqueue it on a glock
+ * gfs2_glock_nq_init - initialize a holder and enqueue it on a glock
  * @gl: the glock
  * @state: the state we're requesting
  * @flags: the modifier flags
index 49f97d3bb690c512cb0f338d85938e622d501a49..0d149dcc04e515adfaaeb632a6677e5e3b555f45 100644 (file)
@@ -262,13 +262,12 @@ static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
        const struct gfs2_inode *ip = gl->gl_object;
        if (ip == NULL)
                return 0;
-       gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu/%llu\n",
+       gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n",
                  (unsigned long long)ip->i_no_formal_ino,
                  (unsigned long long)ip->i_no_addr,
                  IF2DT(ip->i_inode.i_mode), ip->i_flags,
                  (unsigned int)ip->i_diskflags,
-                 (unsigned long long)ip->i_inode.i_size,
-                 (unsigned long long)ip->i_disksize);
+                 (unsigned long long)i_size_read(&ip->i_inode));
        return 0;
 }
 
@@ -453,7 +452,6 @@ const struct gfs2_glock_operations *gfs2_glops_list[] = {
        [LM_TYPE_META] = &gfs2_meta_glops,
        [LM_TYPE_INODE] = &gfs2_inode_glops,
        [LM_TYPE_RGRP] = &gfs2_rgrp_glops,
-       [LM_TYPE_NONDISK] = &gfs2_trans_glops,
        [LM_TYPE_IOPEN] = &gfs2_iopen_glops,
        [LM_TYPE_FLOCK] = &gfs2_flock_glops,
        [LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
index fdbf4b366fa540d295dcbd73b298099b50418f8c..764fbb49efc8e3adbdeda7f83f178b0fd6ea70f8 100644 (file)
@@ -196,6 +196,7 @@ enum {
        GLF_REPLY_PENDING               = 9,
        GLF_INITIAL                     = 10,
        GLF_FROZEN                      = 11,
+       GLF_QUEUED                      = 12,
 };
 
 struct gfs2_glock {
@@ -267,7 +268,6 @@ struct gfs2_inode {
        u64 i_no_formal_ino;
        u64 i_generation;
        u64 i_eattr;
-       loff_t i_disksize;
        unsigned long i_flags;          /* GIF_... */
        struct gfs2_glock *i_gl; /* Move into i_gh? */
        struct gfs2_holder i_iopen_gh;
@@ -416,11 +416,8 @@ struct gfs2_args {
        char ar_locktable[GFS2_LOCKNAME_LEN];   /* Name of the Lock Table */
        char ar_hostdata[GFS2_LOCKNAME_LEN];    /* Host specific data */
        unsigned int ar_spectator:1;            /* Don't get a journal */
-       unsigned int ar_ignore_local_fs:1;      /* Ignore optimisations */
        unsigned int ar_localflocks:1;          /* Let the VFS do flock|fcntl */
-       unsigned int ar_localcaching:1;         /* Local caching */
        unsigned int ar_debug:1;                /* Oops on errors */
-       unsigned int ar_upgrade:1;              /* Upgrade ondisk format */
        unsigned int ar_posix_acl:1;            /* Enable posix acls */
        unsigned int ar_quota:2;                /* off/account/on */
        unsigned int ar_suiddir:1;              /* suiddir support */
@@ -497,7 +494,7 @@ struct gfs2_sb_host {
  */
 
 struct lm_lockstruct {
-       unsigned int ls_jid;
+       int ls_jid;
        unsigned int ls_first;
        unsigned int ls_first_done;
        unsigned int ls_nodir;
@@ -572,6 +569,7 @@ struct gfs2_sbd {
        struct list_head sd_rindex_mru_list;
        struct gfs2_rgrpd *sd_rindex_forward;
        unsigned int sd_rgrps;
+       unsigned int sd_max_rg_data;
 
        /* Journal index stuff */
 
index 08140f185a3792153e23bab24f03ac3107d04757..06370f8bd8cf4aafa95fd4df64e93ec8d657328d 100644 (file)
@@ -359,8 +359,7 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
         * to do that.
         */
        ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink);
-       ip->i_disksize = be64_to_cpu(str->di_size);
-       i_size_write(&ip->i_inode, ip->i_disksize);
+       i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
        gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
        atime.tv_sec = be64_to_cpu(str->di_atime);
        atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
@@ -1055,7 +1054,7 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
        str->di_uid = cpu_to_be32(ip->i_inode.i_uid);
        str->di_gid = cpu_to_be32(ip->i_inode.i_gid);
        str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
-       str->di_size = cpu_to_be64(ip->i_disksize);
+       str->di_size = cpu_to_be64(i_size_read(&ip->i_inode));
        str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
        str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
        str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
@@ -1085,8 +1084,8 @@ void gfs2_dinode_print(const struct gfs2_inode *ip)
               (unsigned long long)ip->i_no_formal_ino);
        printk(KERN_INFO "  no_addr = %llu\n",
               (unsigned long long)ip->i_no_addr);
-       printk(KERN_INFO "  i_disksize = %llu\n",
-              (unsigned long long)ip->i_disksize);
+       printk(KERN_INFO "  i_size = %llu\n",
+              (unsigned long long)i_size_read(&ip->i_inode));
        printk(KERN_INFO "  blocks = %llu\n",
               (unsigned long long)gfs2_get_inode_blocks(&ip->i_inode));
        printk(KERN_INFO "  i_goal = %llu\n",
index 300ada3f21de0cf5fc22677283343a76caf8200e..6720d7d5fbc6aac91083b399b95ba6c978922c67 100644 (file)
@@ -19,6 +19,8 @@ extern int gfs2_releasepage(struct page *page, gfp_t gfp_mask);
 extern int gfs2_internal_read(struct gfs2_inode *ip,
                              struct file_ra_state *ra_state,
                              char *buf, loff_t *pos, unsigned size);
+extern void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
+                                  unsigned int from, unsigned int to);
 extern void gfs2_set_aops(struct inode *inode);
 
 static inline int gfs2_is_stuffed(const struct gfs2_inode *ip)
@@ -80,6 +82,19 @@ static inline void gfs2_inum_out(const struct gfs2_inode *ip,
        dent->de_inum.no_addr = cpu_to_be64(ip->i_no_addr);
 }
 
+static inline int gfs2_check_internal_file_size(struct inode *inode,
+                                               u64 minsize, u64 maxsize)
+{
+       u64 size = i_size_read(inode);
+       if (size < minsize || size > maxsize)
+               goto err;
+       if (size & ((1 << inode->i_blkbits) - 1))
+               goto err;
+       return 0;
+err:
+       gfs2_consist_inode(GFS2_I(inode));
+       return -EIO;
+}
 
 extern void gfs2_set_iop(struct inode *inode);
 extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type, 
index 0e0470ed34c273a341aed5860191bfadf42b41e5..1c09425b45fd728ba52c1f5f49c3feac187640a2 100644 (file)
@@ -42,9 +42,9 @@ static void gdlm_ast(void *arg)
                ret |= LM_OUT_CANCELED;
                goto out;
        case -EAGAIN: /* Try lock fails */
+       case -EDEADLK: /* Deadlock detected */
                goto out;
-       case -EINVAL: /* Invalid */
-       case -ENOMEM: /* Out of memory */
+       case -ETIMEDOUT: /* Canceled due to timeout */
                ret |= LM_OUT_ERROR;
                goto out;
        case 0: /* Success */
index cde1248a62255ae9bdb03b4c2757a71e973fa089..ac750bd31a6f311e8a3dfc859a257c6f85b2f79d 100644 (file)
@@ -932,7 +932,7 @@ int gfs2_logd(void *data)
 
                do {
                        prepare_to_wait(&sdp->sd_logd_waitq, &wait,
-                                       TASK_UNINTERRUPTIBLE);
+                                       TASK_INTERRUPTIBLE);
                        if (!gfs2_ail_flush_reqd(sdp) &&
                            !gfs2_jrnl_flush_reqd(sdp) &&
                            !kthread_should_stop())
index b1e9630eb46a8d0338fef57ffa15caf23ab1cf0f..d7eb1e209aa899561f7168a6d2964460ae73a89f 100644 (file)
@@ -24,6 +24,7 @@
 #include "glock.h"
 #include "quota.h"
 #include "recovery.h"
+#include "dir.h"
 
 static struct shrinker qd_shrinker = {
        .shrink = gfs2_shrink_qd_memory,
@@ -78,6 +79,9 @@ static int __init init_gfs2_fs(void)
 {
        int error;
 
+       gfs2_str2qstr(&gfs2_qdot, ".");
+       gfs2_str2qstr(&gfs2_qdotdot, "..");
+
        error = gfs2_sys_init();
        if (error)
                return error;
@@ -140,7 +144,7 @@ static int __init init_gfs2_fs(void)
 
        error = -ENOMEM;
        gfs_recovery_wq = alloc_workqueue("gfs_recovery",
-                                         WQ_NON_REENTRANT | WQ_RESCUER, 0);
+                                         WQ_RESCUER | WQ_FREEZEABLE, 0);
        if (!gfs_recovery_wq)
                goto fail_wq;
 
index 4d4b1e8ac64c02ef64ffd216a71ec801e2fdb625..aeafc233dc897fdb102df29bf052fc040b61faef 100644 (file)
 #define DO 0
 #define UNDO 1
 
-static const u32 gfs2_old_fs_formats[] = {
-        0
-};
-
-static const u32 gfs2_old_multihost_formats[] = {
-        0
-};
-
 /**
  * gfs2_tune_init - Fill a gfs2_tune structure with default values
  * @gt: tune
@@ -135,8 +127,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
 
 static int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb_host *sb, int silent)
 {
-       unsigned int x;
-
        if (sb->sb_magic != GFS2_MAGIC ||
            sb->sb_type != GFS2_METATYPE_SB) {
                if (!silent)
@@ -150,55 +140,9 @@ static int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb_host *sb, int sile
            sb->sb_multihost_format == GFS2_FORMAT_MULTI)
                return 0;
 
-       if (sb->sb_fs_format != GFS2_FORMAT_FS) {
-               for (x = 0; gfs2_old_fs_formats[x]; x++)
-                       if (gfs2_old_fs_formats[x] == sb->sb_fs_format)
-                               break;
+       fs_warn(sdp, "Unknown on-disk format, unable to mount\n");
 
-               if (!gfs2_old_fs_formats[x]) {
-                       printk(KERN_WARNING
-                              "GFS2: code version (%u, %u) is incompatible "
-                              "with ondisk format (%u, %u)\n",
-                              GFS2_FORMAT_FS, GFS2_FORMAT_MULTI,
-                              sb->sb_fs_format, sb->sb_multihost_format);
-                       printk(KERN_WARNING
-                              "GFS2: I don't know how to upgrade this FS\n");
-                       return -EINVAL;
-               }
-       }
-
-       if (sb->sb_multihost_format != GFS2_FORMAT_MULTI) {
-               for (x = 0; gfs2_old_multihost_formats[x]; x++)
-                       if (gfs2_old_multihost_formats[x] ==
-                           sb->sb_multihost_format)
-                               break;
-
-               if (!gfs2_old_multihost_formats[x]) {
-                       printk(KERN_WARNING
-                              "GFS2: code version (%u, %u) is incompatible "
-                              "with ondisk format (%u, %u)\n",
-                              GFS2_FORMAT_FS, GFS2_FORMAT_MULTI,
-                              sb->sb_fs_format, sb->sb_multihost_format);
-                       printk(KERN_WARNING
-                              "GFS2: I don't know how to upgrade this FS\n");
-                       return -EINVAL;
-               }
-       }
-
-       if (!sdp->sd_args.ar_upgrade) {
-               printk(KERN_WARNING
-                      "GFS2: code version (%u, %u) is incompatible "
-                      "with ondisk format (%u, %u)\n",
-                      GFS2_FORMAT_FS, GFS2_FORMAT_MULTI,
-                      sb->sb_fs_format, sb->sb_multihost_format);
-               printk(KERN_INFO
-                      "GFS2: Use the \"upgrade\" mount option to upgrade "
-                      "the FS\n");
-               printk(KERN_INFO "GFS2: See the manual for more details\n");
-               return -EINVAL;
-       }
-
-       return 0;
+       return -EINVAL;
 }
 
 static void end_bio_io_page(struct bio *bio, int error)
@@ -586,7 +530,7 @@ static int map_journal_extents(struct gfs2_sbd *sdp)
 
        prev_db = 0;
 
-       for (lb = 0; lb < ip->i_disksize >> sdp->sd_sb.sb_bsize_shift; lb++) {
+       for (lb = 0; lb < i_size_read(jd->jd_inode) >> sdp->sd_sb.sb_bsize_shift; lb++) {
                bh.b_state = 0;
                bh.b_blocknr = 0;
                bh.b_size = 1 << ip->i_inode.i_blkbits;
@@ -1022,7 +966,6 @@ static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent)
        if (!strcmp("lock_nolock", proto)) {
                lm = &nolock_ops;
                sdp->sd_args.ar_localflocks = 1;
-               sdp->sd_args.ar_localcaching = 1;
 #ifdef CONFIG_GFS2_FS_LOCKING_DLM
        } else if (!strcmp("lock_dlm", proto)) {
                lm = &gfs2_dlm_ops;
@@ -1113,8 +1056,6 @@ static int gfs2_journalid_wait(void *word)
 
 static int wait_on_journal(struct gfs2_sbd *sdp)
 {
-       if (sdp->sd_args.ar_spectator)
-               return 0;
        if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
                return 0;
 
@@ -1217,6 +1158,20 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent
        if (error)
                goto fail_sb;
 
+       /*
+        * If user space has failed to join the cluster or some similar
+        * failure has occurred, then the journal id will contain a
+        * negative (error) number. This will then be returned to the
+        * caller (of the mount syscall). We do this even for spectator
+        * mounts (which just write a jid of 0 to indicate "ok" even though
+        * the jid is unused in the spectator case)
+        */
+       if (sdp->sd_lockstruct.ls_jid < 0) {
+               error = sdp->sd_lockstruct.ls_jid;
+               sdp->sd_lockstruct.ls_jid = 0;
+               goto fail_sb;
+       }
+
        error = init_inodes(sdp, DO);
        if (error)
                goto fail_sb;
index 1009be2c9737687cdee8b48668a5f5d09752abeb..0534510200d5961887d3ee957d799e3b4669e76b 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/gfs2_ondisk.h>
 #include <linux/crc32.h>
 #include <linux/fiemap.h>
+#include <linux/swap.h>
+#include <linux/falloc.h>
 #include <asm/uaccess.h>
 
 #include "gfs2.h"
@@ -217,7 +219,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
                        goto out_gunlock_q;
 
                error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
-                                        al->al_rgd->rd_length +
+                                        gfs2_rg_blocks(al) +
                                         2 * RES_DINODE + RES_STATFS +
                                         RES_QUOTA, 0);
                if (error)
@@ -406,7 +408,6 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry,
 
        ip = ghs[1].gh_gl->gl_object;
 
-       ip->i_disksize = size;
        i_size_write(inode, size);
 
        error = gfs2_meta_inode_buffer(ip, &dibh);
@@ -461,7 +462,7 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode)
        ip = ghs[1].gh_gl->gl_object;
 
        ip->i_inode.i_nlink = 2;
-       ip->i_disksize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode);
+       i_size_write(inode, sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode));
        ip->i_diskflags |= GFS2_DIF_JDATA;
        ip->i_entries = 2;
 
@@ -470,18 +471,15 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode)
        if (!gfs2_assert_withdraw(sdp, !error)) {
                struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data;
                struct gfs2_dirent *dent = (struct gfs2_dirent *)(di+1);
-               struct qstr str;
 
-               gfs2_str2qstr(&str, ".");
                gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-               gfs2_qstr2dirent(&str, GFS2_DIRENT_SIZE(str.len), dent);
+               gfs2_qstr2dirent(&gfs2_qdot, GFS2_DIRENT_SIZE(gfs2_qdot.len), dent);
                dent->de_inum = di->di_num; /* already GFS2 endian */
                dent->de_type = cpu_to_be16(DT_DIR);
                di->di_entries = cpu_to_be32(1);
 
-               gfs2_str2qstr(&str, "..");
                dent = (struct gfs2_dirent *)((char*)dent + GFS2_DIRENT_SIZE(1));
-               gfs2_qstr2dirent(&str, dibh->b_size - GFS2_DIRENT_SIZE(1) - sizeof(struct gfs2_dinode), dent);
+               gfs2_qstr2dirent(&gfs2_qdotdot, dibh->b_size - GFS2_DIRENT_SIZE(1) - sizeof(struct gfs2_dinode), dent);
 
                gfs2_inum_out(dip, dent);
                dent->de_type = cpu_to_be16(DT_DIR);
@@ -522,7 +520,6 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode)
 static int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
                       struct gfs2_inode *ip)
 {
-       struct qstr dotname;
        int error;
 
        if (ip->i_entries != 2) {
@@ -539,13 +536,11 @@ static int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
        if (error)
                return error;
 
-       gfs2_str2qstr(&dotname, ".");
-       error = gfs2_dir_del(ip, &dotname);
+       error = gfs2_dir_del(ip, &gfs2_qdot);
        if (error)
                return error;
 
-       gfs2_str2qstr(&dotname, "..");
-       error = gfs2_dir_del(ip, &dotname);
+       error = gfs2_dir_del(ip, &gfs2_qdotdot);
        if (error)
                return error;
 
@@ -694,11 +689,8 @@ static int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to)
        struct inode *dir = &to->i_inode;
        struct super_block *sb = dir->i_sb;
        struct inode *tmp;
-       struct qstr dotdot;
        int error = 0;
 
-       gfs2_str2qstr(&dotdot, "..");
-
        igrab(dir);
 
        for (;;) {
@@ -711,7 +703,7 @@ static int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to)
                        break;
                }
 
-               tmp = gfs2_lookupi(dir, &dotdot, 1);
+               tmp = gfs2_lookupi(dir, &gfs2_qdotdot, 1);
                if (IS_ERR(tmp)) {
                        error = PTR_ERR(tmp);
                        break;
@@ -744,7 +736,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
        struct gfs2_inode *ip = GFS2_I(odentry->d_inode);
        struct gfs2_inode *nip = NULL;
        struct gfs2_sbd *sdp = GFS2_SB(odir);
-       struct gfs2_holder ghs[5], r_gh = { .gh_gl = NULL, };
+       struct gfs2_holder ghs[5], r_gh = { .gh_gl = NULL, }, ri_gh;
        struct gfs2_rgrpd *nrgd;
        unsigned int num_gh;
        int dir_rename = 0;
@@ -758,6 +750,9 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
                        return 0;
        }
 
+       error = gfs2_rindex_hold(sdp, &ri_gh);
+       if (error)
+               return error;
 
        if (odip != ndip) {
                error = gfs2_glock_nq_init(sdp->sd_rename_gl, LM_ST_EXCLUSIVE,
@@ -887,12 +882,12 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
 
                al->al_requested = sdp->sd_max_dirres;
 
-               error = gfs2_inplace_reserve(ndip);
+               error = gfs2_inplace_reserve_ri(ndip);
                if (error)
                        goto out_gunlock_q;
 
                error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
-                                        al->al_rgd->rd_length +
+                                        gfs2_rg_blocks(al) +
                                         4 * RES_DINODE + 4 * RES_LEAF +
                                         RES_STATFS + RES_QUOTA + 4, 0);
                if (error)
@@ -920,9 +915,6 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
        }
 
        if (dir_rename) {
-               struct qstr name;
-               gfs2_str2qstr(&name, "..");
-
                error = gfs2_change_nlink(ndip, +1);
                if (error)
                        goto out_end_trans;
@@ -930,7 +922,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
                if (error)
                        goto out_end_trans;
 
-               error = gfs2_dir_mvino(ip, &name, ndip, DT_DIR);
+               error = gfs2_dir_mvino(ip, &gfs2_qdotdot, ndip, DT_DIR);
                if (error)
                        goto out_end_trans;
        } else {
@@ -972,6 +964,7 @@ out_gunlock_r:
        if (r_gh.gh_gl)
                gfs2_glock_dq_uninit(&r_gh);
 out:
+       gfs2_glock_dq_uninit(&ri_gh);
        return error;
 }
 
@@ -990,7 +983,7 @@ static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd)
        struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
        struct gfs2_holder i_gh;
        struct buffer_head *dibh;
-       unsigned int x;
+       unsigned int x, size;
        char *buf;
        int error;
 
@@ -1002,7 +995,8 @@ static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd)
                return NULL;
        }
 
-       if (!ip->i_disksize) {
+       size = (unsigned int)i_size_read(&ip->i_inode);
+       if (size == 0) {
                gfs2_consist_inode(ip);
                buf = ERR_PTR(-EIO);
                goto out;
@@ -1014,7 +1008,7 @@ static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd)
                goto out;
        }
 
-       x = ip->i_disksize + 1;
+       x = size + 1;
        buf = kmalloc(x, GFP_NOFS);
        if (!buf)
                buf = ERR_PTR(-ENOMEM);
@@ -1071,30 +1065,6 @@ int gfs2_permission(struct inode *inode, int mask)
        return error;
 }
 
-/*
- * XXX(truncate): the truncate_setsize calls should be moved to the end.
- */
-static int setattr_size(struct inode *inode, struct iattr *attr)
-{
-       struct gfs2_inode *ip = GFS2_I(inode);
-       struct gfs2_sbd *sdp = GFS2_SB(inode);
-       int error;
-
-       if (attr->ia_size != ip->i_disksize) {
-               error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
-               if (error)
-                       return error;
-               truncate_setsize(inode, attr->ia_size);
-               gfs2_trans_end(sdp);
-       }
-
-       error = gfs2_truncatei(ip, attr->ia_size);
-       if (error && (inode->i_size != ip->i_disksize))
-               i_size_write(inode, ip->i_disksize);
-
-       return error;
-}
-
 static int setattr_chown(struct inode *inode, struct iattr *attr)
 {
        struct gfs2_inode *ip = GFS2_I(inode);
@@ -1195,7 +1165,7 @@ static int gfs2_setattr(struct dentry *dentry, struct iattr *attr)
                goto out;
 
        if (attr->ia_valid & ATTR_SIZE)
-               error = setattr_size(inode, attr);
+               error = gfs2_setattr_size(inode, attr->ia_size);
        else if (attr->ia_valid & (ATTR_UID | ATTR_GID))
                error = setattr_chown(inode, attr);
        else if ((attr->ia_valid & ATTR_MODE) && IS_POSIXACL(inode))
@@ -1301,6 +1271,257 @@ static int gfs2_removexattr(struct dentry *dentry, const char *name)
        return ret;
 }
 
+static void empty_write_end(struct page *page, unsigned from,
+                          unsigned to)
+{
+       struct gfs2_inode *ip = GFS2_I(page->mapping->host);
+
+       page_zero_new_buffers(page, from, to);
+       flush_dcache_page(page);
+       mark_page_accessed(page);
+
+       if (!gfs2_is_writeback(ip))
+               gfs2_page_add_databufs(ip, page, from, to);
+
+       block_commit_write(page, from, to);
+}
+
+
+static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
+{
+       unsigned start, end, next;
+       struct buffer_head *bh, *head;
+       int error;
+
+       if (!page_has_buffers(page)) {
+               error = block_prepare_write(page, from, to, gfs2_block_map);
+               if (unlikely(error))
+                       return error;
+
+               empty_write_end(page, from, to);
+               return 0;
+       }
+
+       bh = head = page_buffers(page);
+       next = end = 0;
+       while (next < from) {
+               next += bh->b_size;
+               bh = bh->b_this_page;
+       }
+       start = next;
+       do {
+               next += bh->b_size;
+               if (buffer_mapped(bh)) {
+                       if (end) {
+                               error = block_prepare_write(page, start, end,
+                                                           gfs2_block_map);
+                               if (unlikely(error))
+                                       return error;
+                               empty_write_end(page, start, end);
+                               end = 0;
+                       }
+                       start = next;
+               }
+               else
+                       end = next;
+               bh = bh->b_this_page;
+       } while (next < to);
+
+       if (end) {
+               error = block_prepare_write(page, start, end, gfs2_block_map);
+               if (unlikely(error))
+                       return error;
+               empty_write_end(page, start, end);
+       }
+
+       return 0;
+}
+
+static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
+                          int mode)
+{
+       struct gfs2_inode *ip = GFS2_I(inode);
+       struct buffer_head *dibh;
+       int error;
+       u64 start = offset >> PAGE_CACHE_SHIFT;
+       unsigned int start_offset = offset & ~PAGE_CACHE_MASK;
+       u64 end = (offset + len - 1) >> PAGE_CACHE_SHIFT;
+       pgoff_t curr;
+       struct page *page;
+       unsigned int end_offset = (offset + len) & ~PAGE_CACHE_MASK;
+       unsigned int from, to;
+
+       if (!end_offset)
+               end_offset = PAGE_CACHE_SIZE;
+
+       error = gfs2_meta_inode_buffer(ip, &dibh);
+       if (unlikely(error))
+               goto out;
+
+       gfs2_trans_add_bh(ip->i_gl, dibh, 1);
+
+       if (gfs2_is_stuffed(ip)) {
+               error = gfs2_unstuff_dinode(ip, NULL);
+               if (unlikely(error))
+                       goto out;
+       }
+
+       curr = start;
+       offset = start << PAGE_CACHE_SHIFT;
+       from = start_offset;
+       to = PAGE_CACHE_SIZE;
+       while (curr <= end) {
+               page = grab_cache_page_write_begin(inode->i_mapping, curr,
+                                                  AOP_FLAG_NOFS);
+               if (unlikely(!page)) {
+                       error = -ENOMEM;
+                       goto out;
+               }
+
+               if (curr == end)
+                       to = end_offset;
+               error = write_empty_blocks(page, from, to);
+               if (!error && offset + to > inode->i_size &&
+                   !(mode & FALLOC_FL_KEEP_SIZE)) {
+                       i_size_write(inode, offset + to);
+               }
+               unlock_page(page);
+               page_cache_release(page);
+               if (error)
+                       goto out;
+               curr++;
+               offset += PAGE_CACHE_SIZE;
+               from = 0;
+       }
+
+       gfs2_dinode_out(ip, dibh->b_data);
+       mark_inode_dirty(inode);
+
+       brelse(dibh);
+
+out:
+       return error;
+}
+
+static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len,
+                           unsigned int *data_blocks, unsigned int *ind_blocks)
+{
+       const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+       unsigned int max_blocks = ip->i_alloc->al_rgd->rd_free_clone;
+       unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
+
+       for (tmp = max_data; tmp > sdp->sd_diptrs;) {
+               tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
+               max_data -= tmp;
+       }
+       /* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
+          so it might end up with fewer data blocks */
+       if (max_data <= *data_blocks)
+               return;
+       *data_blocks = max_data;
+       *ind_blocks = max_blocks - max_data;
+       *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
+       if (*len > max) {
+               *len = max;
+               gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
+       }
+}
+
+static long gfs2_fallocate(struct inode *inode, int mode, loff_t offset,
+                          loff_t len)
+{
+       struct gfs2_sbd *sdp = GFS2_SB(inode);
+       struct gfs2_inode *ip = GFS2_I(inode);
+       unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
+       loff_t bytes, max_bytes;
+       struct gfs2_alloc *al;
+       int error;
+       loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
+       next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
+
+       offset = (offset >> sdp->sd_sb.sb_bsize_shift) <<
+                sdp->sd_sb.sb_bsize_shift;
+
+       len = next - offset;
+       bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
+       if (!bytes)
+               bytes = UINT_MAX;
+
+       gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
+       error = gfs2_glock_nq(&ip->i_gh);
+       if (unlikely(error))
+               goto out_uninit;
+
+       if (!gfs2_write_alloc_required(ip, offset, len))
+               goto out_unlock;
+
+       while (len > 0) {
+               if (len < bytes)
+                       bytes = len;
+               al = gfs2_alloc_get(ip);
+               if (!al) {
+                       error = -ENOMEM;
+                       goto out_unlock;
+               }
+
+               error = gfs2_quota_lock_check(ip);
+               if (error)
+                       goto out_alloc_put;
+
+retry:
+               gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
+
+               al->al_requested = data_blocks + ind_blocks;
+               error = gfs2_inplace_reserve(ip);
+               if (error) {
+                       if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
+                               bytes >>= 1;
+                               goto retry;
+                       }
+                       goto out_qunlock;
+               }
+               max_bytes = bytes;
+               calc_max_reserv(ip, len, &max_bytes, &data_blocks, &ind_blocks);
+               al->al_requested = data_blocks + ind_blocks;
+
+               rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
+                         RES_RG_HDR + gfs2_rg_blocks(al);
+               if (gfs2_is_jdata(ip))
+                       rblocks += data_blocks ? data_blocks : 1;
+
+               error = gfs2_trans_begin(sdp, rblocks,
+                                        PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
+               if (error)
+                       goto out_trans_fail;
+
+               error = fallocate_chunk(inode, offset, max_bytes, mode);
+               gfs2_trans_end(sdp);
+
+               if (error)
+                       goto out_trans_fail;
+
+               len -= max_bytes;
+               offset += max_bytes;
+               gfs2_inplace_release(ip);
+               gfs2_quota_unlock(ip);
+               gfs2_alloc_put(ip);
+       }
+       goto out_unlock;
+
+out_trans_fail:
+       gfs2_inplace_release(ip);
+out_qunlock:
+       gfs2_quota_unlock(ip);
+out_alloc_put:
+       gfs2_alloc_put(ip);
+out_unlock:
+       gfs2_glock_dq(&ip->i_gh);
+out_uninit:
+       gfs2_holder_uninit(&ip->i_gh);
+       return error;
+}
+
+
 static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                       u64 start, u64 len)
 {
@@ -1351,6 +1572,7 @@ const struct inode_operations gfs2_file_iops = {
        .getxattr = gfs2_getxattr,
        .listxattr = gfs2_listxattr,
        .removexattr = gfs2_removexattr,
+       .fallocate = gfs2_fallocate,
        .fiemap = gfs2_fiemap,
 };
 
index 1bc6b5695e6dfb34870810b87bd09c819f25c93f..58a9b9998b42d0d9603c7a49ffc746ab4a22ef87 100644 (file)
@@ -735,10 +735,8 @@ get_a_page:
                goto out;
 
        size = loc + sizeof(struct gfs2_quota);
-       if (size > inode->i_size) {
-               ip->i_disksize = size;
+       if (size > inode->i_size)
                i_size_write(inode, size);
-       }
        inode->i_mtime = inode->i_atime = CURRENT_TIME;
        gfs2_trans_add_bh(ip->i_gl, dibh, 1);
        gfs2_dinode_out(ip, dibh->b_data);
@@ -817,7 +815,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
                goto out_alloc;
 
        if (nalloc)
-               blocks += al->al_rgd->rd_length + nalloc * ind_blocks + RES_STATFS;
+               blocks += gfs2_rg_blocks(al) + nalloc * ind_blocks + RES_STATFS;
 
        error = gfs2_trans_begin(sdp, blocks, 0);
        if (error)
@@ -1190,18 +1188,17 @@ static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *
 int gfs2_quota_init(struct gfs2_sbd *sdp)
 {
        struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
-       unsigned int blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift;
+       u64 size = i_size_read(sdp->sd_qc_inode);
+       unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
        unsigned int x, slot = 0;
        unsigned int found = 0;
        u64 dblock;
        u32 extlen = 0;
        int error;
 
-       if (!ip->i_disksize || ip->i_disksize > (64 << 20) ||
-           ip->i_disksize & (sdp->sd_sb.sb_bsize - 1)) {
-               gfs2_consist_inode(ip);
+       if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
                return -EIO;
-       }
+
        sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
        sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
 
@@ -1589,6 +1586,7 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
                error = gfs2_inplace_reserve(ip);
                if (error)
                        goto out_alloc;
+               blocks += gfs2_rg_blocks(al);
        }
 
        error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 1, 0);
index f7f89a94a5a4598a4b532016a0c16846a62cef2b..f2a02edcac8f43e9de1dd22fd4c72ac7347f39a5 100644 (file)
@@ -455,11 +455,13 @@ void gfs2_recover_func(struct work_struct *work)
        int ro = 0;
        unsigned int pass;
        int error;
+       int jlocked = 0;
 
-       if (jd->jd_jid != sdp->sd_lockstruct.ls_jid) {
+       if (sdp->sd_args.ar_spectator ||
+           (jd->jd_jid != sdp->sd_lockstruct.ls_jid)) {
                fs_info(sdp, "jid=%u: Trying to acquire journal lock...\n",
                        jd->jd_jid);
-
+               jlocked = 1;
                /* Acquire the journal lock so we can do recovery */
 
                error = gfs2_glock_nq_num(sdp, jd->jd_jid, &gfs2_journal_glops,
@@ -554,13 +556,12 @@ void gfs2_recover_func(struct work_struct *work)
                        jd->jd_jid, t);
        }
 
-       if (jd->jd_jid != sdp->sd_lockstruct.ls_jid)
-               gfs2_glock_dq_uninit(&ji_gh);
-
        gfs2_recovery_done(sdp, jd->jd_jid, LM_RD_SUCCESS);
 
-       if (jd->jd_jid != sdp->sd_lockstruct.ls_jid)
+       if (jlocked) {
+               gfs2_glock_dq_uninit(&ji_gh);
                gfs2_glock_dq_uninit(&j_gh);
+       }
 
        fs_info(sdp, "jid=%u: Done\n", jd->jd_jid);
        goto done;
@@ -568,7 +569,7 @@ void gfs2_recover_func(struct work_struct *work)
 fail_gunlock_tr:
        gfs2_glock_dq_uninit(&t_gh);
 fail_gunlock_ji:
-       if (jd->jd_jid != sdp->sd_lockstruct.ls_jid) {
+       if (jlocked) {
                gfs2_glock_dq_uninit(&ji_gh);
 fail_gunlock_j:
                gfs2_glock_dq_uninit(&j_gh);
index 171a744f8e45d172f4e43eb793ae4e08dba8ba23..fb67f593f40856b213f03887642c44a3e4c7ccc5 100644 (file)
@@ -500,7 +500,7 @@ u64 gfs2_ri_total(struct gfs2_sbd *sdp)
        for (rgrps = 0;; rgrps++) {
                loff_t pos = rgrps * sizeof(struct gfs2_rindex);
 
-               if (pos + sizeof(struct gfs2_rindex) >= ip->i_disksize)
+               if (pos + sizeof(struct gfs2_rindex) >= i_size_read(inode))
                        break;
                error = gfs2_internal_read(ip, &ra_state, buf, &pos,
                                           sizeof(struct gfs2_rindex));
@@ -588,7 +588,9 @@ static int gfs2_ri_update(struct gfs2_inode *ip)
        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
        struct inode *inode = &ip->i_inode;
        struct file_ra_state ra_state;
-       u64 rgrp_count = ip->i_disksize;
+       u64 rgrp_count = i_size_read(inode);
+       struct gfs2_rgrpd *rgd;
+       unsigned int max_data = 0;
        int error;
 
        do_div(rgrp_count, sizeof(struct gfs2_rindex));
@@ -603,6 +605,10 @@ static int gfs2_ri_update(struct gfs2_inode *ip)
                }
        }
 
+       list_for_each_entry(rgd, &sdp->sd_rindex_list, rd_list)
+               if (rgd->rd_data > max_data)
+                       max_data = rgd->rd_data;
+       sdp->sd_max_rg_data = max_data;
        sdp->sd_rindex_uptodate = 1;
        return 0;
 }
@@ -622,13 +628,15 @@ static int gfs2_ri_update_special(struct gfs2_inode *ip)
        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
        struct inode *inode = &ip->i_inode;
        struct file_ra_state ra_state;
+       struct gfs2_rgrpd *rgd;
+       unsigned int max_data = 0;
        int error;
 
        file_ra_state_init(&ra_state, inode->i_mapping);
        for (sdp->sd_rgrps = 0;; sdp->sd_rgrps++) {
                /* Ignore partials */
                if ((sdp->sd_rgrps + 1) * sizeof(struct gfs2_rindex) >
-                   ip->i_disksize)
+                   i_size_read(inode))
                        break;
                error = read_rindex_entry(ip, &ra_state);
                if (error) {
@@ -636,6 +644,10 @@ static int gfs2_ri_update_special(struct gfs2_inode *ip)
                        return error;
                }
        }
+       list_for_each_entry(rgd, &sdp->sd_rindex_list, rd_list)
+               if (rgd->rd_data > max_data)
+                       max_data = rgd->rd_data;
+       sdp->sd_max_rg_data = max_data;
 
        sdp->sd_rindex_uptodate = 1;
        return 0;
@@ -1188,7 +1200,8 @@ out:
  * Returns: errno
  */
 
-int gfs2_inplace_reserve_i(struct gfs2_inode *ip, char *file, unsigned int line)
+int gfs2_inplace_reserve_i(struct gfs2_inode *ip, int hold_rindex,
+                          char *file, unsigned int line)
 {
        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
        struct gfs2_alloc *al = ip->i_alloc;
@@ -1199,12 +1212,15 @@ int gfs2_inplace_reserve_i(struct gfs2_inode *ip, char *file, unsigned int line)
                return -EINVAL;
 
 try_again:
-       /* We need to hold the rindex unless the inode we're using is
-          the rindex itself, in which case it's already held. */
-       if (ip != GFS2_I(sdp->sd_rindex))
-               error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
-       else if (!sdp->sd_rgrps) /* We may not have the rindex read in, so: */
-               error = gfs2_ri_update_special(ip);
+       if (hold_rindex) {
+               /* We need to hold the rindex unless the inode we're using is
+                  the rindex itself, in which case it's already held. */
+               if (ip != GFS2_I(sdp->sd_rindex))
+                       error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
+               else if (!sdp->sd_rgrps) /* We may not have the rindex read
+                                           in, so: */
+                       error = gfs2_ri_update_special(ip);
+       }
 
        if (error)
                return error;
@@ -1215,7 +1231,7 @@ try_again:
           try to free it, and try the allocation again. */
        error = get_local_rgrp(ip, &unlinked, &last_unlinked);
        if (error) {
-               if (ip != GFS2_I(sdp->sd_rindex))
+               if (hold_rindex && ip != GFS2_I(sdp->sd_rindex))
                        gfs2_glock_dq_uninit(&al->al_ri_gh);
                if (error != -EAGAIN)
                        return error;
@@ -1257,7 +1273,7 @@ void gfs2_inplace_release(struct gfs2_inode *ip)
        al->al_rgd = NULL;
        if (al->al_rgd_gh.gh_gl)
                gfs2_glock_dq_uninit(&al->al_rgd_gh);
-       if (ip != GFS2_I(sdp->sd_rindex))
+       if (ip != GFS2_I(sdp->sd_rindex) && al->al_ri_gh.gh_gl)
                gfs2_glock_dq_uninit(&al->al_ri_gh);
 }
 
@@ -1496,11 +1512,19 @@ int gfs2_alloc_block(struct gfs2_inode *ip, u64 *bn, unsigned int *n)
        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
        struct buffer_head *dibh;
        struct gfs2_alloc *al = ip->i_alloc;
-       struct gfs2_rgrpd *rgd = al->al_rgd;
+       struct gfs2_rgrpd *rgd;
        u32 goal, blk;
        u64 block;
        int error;
 
+       /* Only happens if there is a bug in gfs2, return something distinctive
+        * to ensure that it is noticed.
+        */
+       if (al == NULL)
+               return -ECANCELED;
+
+       rgd = al->al_rgd;
+
        if (rgrp_contains_block(rgd, ip->i_goal))
                goal = ip->i_goal - rgd->rd_data0;
        else
index f07119d89557855fc9a8673b32e9119cad921570..0e35c0466f9a6c5979a3fe8c339def323bc37fad 100644 (file)
@@ -39,10 +39,12 @@ static inline void gfs2_alloc_put(struct gfs2_inode *ip)
        ip->i_alloc = NULL;
 }
 
-extern int gfs2_inplace_reserve_i(struct gfs2_inode *ip, char *file,
-                                 unsigned int line);
+extern int gfs2_inplace_reserve_i(struct gfs2_inode *ip, int hold_rindex,
+                                 char *file, unsigned int line);
 #define gfs2_inplace_reserve(ip) \
-gfs2_inplace_reserve_i((ip), __FILE__, __LINE__)
+       gfs2_inplace_reserve_i((ip), 1, __FILE__, __LINE__)
+#define gfs2_inplace_reserve_ri(ip) \
+       gfs2_inplace_reserve_i((ip), 0, __FILE__, __LINE__)
 
 extern void gfs2_inplace_release(struct gfs2_inode *ip);
 
index 77cb9f830ee47eb51520bd8581ebc700b426455e..047d1176096c79d6f0ce50227b8c098755fb0150 100644 (file)
@@ -85,6 +85,7 @@ static const match_table_t tokens = {
        {Opt_locktable, "locktable=%s"},
        {Opt_hostdata, "hostdata=%s"},
        {Opt_spectator, "spectator"},
+       {Opt_spectator, "norecovery"},
        {Opt_ignore_local_fs, "ignore_local_fs"},
        {Opt_localflocks, "localflocks"},
        {Opt_localcaching, "localcaching"},
@@ -159,13 +160,13 @@ int gfs2_mount_args(struct gfs2_args *args, char *options)
                        args->ar_spectator = 1;
                        break;
                case Opt_ignore_local_fs:
-                       args->ar_ignore_local_fs = 1;
+                       /* Retained for backwards compat only */
                        break;
                case Opt_localflocks:
                        args->ar_localflocks = 1;
                        break;
                case Opt_localcaching:
-                       args->ar_localcaching = 1;
+                       /* Retained for backwards compat only */
                        break;
                case Opt_debug:
                        if (args->ar_errors == GFS2_ERRORS_PANIC) {
@@ -179,7 +180,7 @@ int gfs2_mount_args(struct gfs2_args *args, char *options)
                        args->ar_debug = 0;
                        break;
                case Opt_upgrade:
-                       args->ar_upgrade = 1;
+                       /* Retained for backwards compat only */
                        break;
                case Opt_acl:
                        args->ar_posix_acl = 1;
@@ -342,15 +343,14 @@ int gfs2_jdesc_check(struct gfs2_jdesc *jd)
 {
        struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
        struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
+       u64 size = i_size_read(jd->jd_inode);
 
-       if (ip->i_disksize < (8 << 20) || ip->i_disksize > (1 << 30) ||
-           (ip->i_disksize & (sdp->sd_sb.sb_bsize - 1))) {
-               gfs2_consist_inode(ip);
+       if (gfs2_check_internal_file_size(jd->jd_inode, 8 << 20, 1 << 30))
                return -EIO;
-       }
-       jd->jd_blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift;
 
-       if (gfs2_write_alloc_required(ip, 0, ip->i_disksize)) {
+       jd->jd_blocks = size >> sdp->sd_sb.sb_bsize_shift;
+
+       if (gfs2_write_alloc_required(ip, 0, size)) {
                gfs2_consist_inode(ip);
                return -EIO;
        }
@@ -1129,9 +1129,7 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
 
        /* Some flags must not be changed */
        if (args_neq(&args, &sdp->sd_args, spectator) ||
-           args_neq(&args, &sdp->sd_args, ignore_local_fs) ||
            args_neq(&args, &sdp->sd_args, localflocks) ||
-           args_neq(&args, &sdp->sd_args, localcaching) ||
            args_neq(&args, &sdp->sd_args, meta))
                return -EINVAL;
 
@@ -1234,16 +1232,10 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
                seq_printf(s, ",hostdata=%s", args->ar_hostdata);
        if (args->ar_spectator)
                seq_printf(s, ",spectator");
-       if (args->ar_ignore_local_fs)
-               seq_printf(s, ",ignore_local_fs");
        if (args->ar_localflocks)
                seq_printf(s, ",localflocks");
-       if (args->ar_localcaching)
-               seq_printf(s, ",localcaching");
        if (args->ar_debug)
                seq_printf(s, ",debug");
-       if (args->ar_upgrade)
-               seq_printf(s, ",upgrade");
        if (args->ar_posix_acl)
                seq_printf(s, ",acl");
        if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
index ccacffd2faaa6d65d1f116b9b2788ee732786f3f..748ccb557c18fc504c28951f13d634fe9f05fa0a 100644 (file)
@@ -230,7 +230,10 @@ static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len
 
        if (gltype > LM_TYPE_JOURNAL)
                return -EINVAL;
-       glops = gfs2_glops_list[gltype];
+       if (gltype == LM_TYPE_NONDISK && glnum == GFS2_TRANS_LOCK)
+               glops = &gfs2_trans_glops;
+       else
+               glops = gfs2_glops_list[gltype];
        if (glops == NULL)
                return -EINVAL;
        if (!test_and_set_bit(SDF_DEMOTE, &sdp->sd_flags))
@@ -399,31 +402,32 @@ static ssize_t recover_status_show(struct gfs2_sbd *sdp, char *buf)
 
 static ssize_t jid_show(struct gfs2_sbd *sdp, char *buf)
 {
-       return sprintf(buf, "%u\n", sdp->sd_lockstruct.ls_jid);
+       return sprintf(buf, "%d\n", sdp->sd_lockstruct.ls_jid);
 }
 
 static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
 {
-        unsigned jid;
+        int jid;
        int rv;
 
-       rv = sscanf(buf, "%u", &jid);
+       rv = sscanf(buf, "%d", &jid);
        if (rv != 1)
                return -EINVAL;
 
        spin_lock(&sdp->sd_jindex_spin);
        rv = -EINVAL;
-       if (sdp->sd_args.ar_spectator)
-               goto out;
        if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
                goto out;
        rv = -EBUSY;
-       if (test_and_clear_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0)
+       if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0)
                goto out;
+       rv = 0;
+       if (sdp->sd_args.ar_spectator && jid > 0)
+               rv = jid = -EINVAL;
        sdp->sd_lockstruct.ls_jid = jid;
+       clear_bit(SDF_NOJOURNALID, &sdp->sd_flags);
        smp_mb__after_clear_bit();
        wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID);
-       rv = 0;
 out:
        spin_unlock(&sdp->sd_jindex_spin);
        return rv ? rv : len;
@@ -617,7 +621,7 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
        add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name);
        add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name);
        if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags))
-               add_uevent_var(env, "JOURNALID=%u", sdp->sd_lockstruct.ls_jid);
+               add_uevent_var(env, "JOURNALID=%d", sdp->sd_lockstruct.ls_jid);
        if (gfs2_uuid_valid(uuid))
                add_uevent_var(env, "UUID=%pUB", uuid);
        return 0;
index 148d55c14171dee39435c83d56c906c09dcd0cf8..cedb0bb96d968414d14e6eca3e5b934b68ee606e 100644 (file)
@@ -39,7 +39,8 @@
        {(1UL << GLF_INVALIDATE_IN_PROGRESS),   "i" },          \
        {(1UL << GLF_REPLY_PENDING),            "r" },          \
        {(1UL << GLF_INITIAL),                  "I" },          \
-       {(1UL << GLF_FROZEN),                   "F" })
+       {(1UL << GLF_FROZEN),                   "F" },          \
+       {(1UL << GLF_QUEUED),                   "q" })
 
 #ifndef NUMPTY
 #define NUMPTY
index edf9d4bd908ee2726991f12ac869bfeb60971381..fb56b783e028c8ce61b0b663e67e2df14e408b07 100644 (file)
@@ -20,11 +20,20 @@ struct gfs2_glock;
 #define RES_JDATA      1
 #define RES_DATA       1
 #define RES_LEAF       1
+#define RES_RG_HDR     1
 #define RES_RG_BIT     2
 #define RES_EATTR      1
 #define RES_STATFS     1
 #define RES_QUOTA      2
 
+/* reserve either the number of blocks to be allocated plus the rg header
+ * block, or all of the blocks in the rg, whichever is smaller */
+static inline unsigned int gfs2_rg_blocks(const struct gfs2_alloc *al)
+{
+       return (al->al_requested < al->al_rgd->rd_length)?
+              al->al_requested + 1 : al->al_rgd->rd_length;
+}
+
 int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
                     unsigned int revokes);
 
index 776af6eb4bcb1b193ecf5ef858ac09cb0535b95f..30b58f07c8a6b219fc964efe101ce5f861397885 100644 (file)
@@ -734,7 +734,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
                goto out_gunlock_q;
 
        error = gfs2_trans_begin(GFS2_SB(&ip->i_inode),
-                                blks + al->al_rgd->rd_length +
+                                blks + gfs2_rg_blocks(al) +
                                 RES_DINODE + RES_STATFS + RES_QUOTA, 0);
        if (error)
                goto out_ipres;
index 4129cdb3f0d8fbf80c4b161d877d9e922666f9ea..571abe97b42a2919caf381238eec6e58c57d0eaf 100644 (file)
@@ -23,7 +23,7 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
        fd->search_key = ptr;
        fd->key = ptr + tree->max_key_len + 2;
        dprint(DBG_BNODE_REFS, "find_init: %d (%p)\n", tree->cnid, __builtin_return_address(0));
-       down(&tree->tree_lock);
+       mutex_lock(&tree->tree_lock);
        return 0;
 }
 
@@ -32,7 +32,7 @@ void hfs_find_exit(struct hfs_find_data *fd)
        hfs_bnode_put(fd->bnode);
        kfree(fd->search_key);
        dprint(DBG_BNODE_REFS, "find_exit: %d (%p)\n", fd->tree->cnid, __builtin_return_address(0));
-       up(&fd->tree->tree_lock);
+       mutex_unlock(&fd->tree->tree_lock);
        fd->tree = NULL;
 }
 
index 38a0a9917d7f3a67b0eaad49a75246430eef7d76..3ebc437736febb4e0ef37a3612ea6501c7eac59a 100644 (file)
@@ -27,7 +27,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
        if (!tree)
                return NULL;
 
-       init_MUTEX(&tree->tree_lock);
+       mutex_init(&tree->tree_lock);
        spin_lock_init(&tree->hash_lock);
        /* Set the correct compare function */
        tree->sb = sb;
index cc51905ac21de4f110929dedd9160152d76a0f26..2a1d712f85dccfb5bb2983615e84278a1c4923e7 100644 (file)
@@ -33,7 +33,7 @@ struct hfs_btree {
        unsigned int depth;
 
        //unsigned int map1_size, map_size;
-       struct semaphore tree_lock;
+       struct mutex tree_lock;
 
        unsigned int pages_per_bnode;
        spinlock_t hash_lock;
index 5007a41f1be9d345ff11dd7420285ee6a79c08e2..d182438c7ae4bea8b4ae108ad910ee795417410a 100644 (file)
@@ -23,7 +23,7 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
        fd->search_key = ptr;
        fd->key = ptr + tree->max_key_len + 2;
        dprint(DBG_BNODE_REFS, "find_init: %d (%p)\n", tree->cnid, __builtin_return_address(0));
-       down(&tree->tree_lock);
+       mutex_lock(&tree->tree_lock);
        return 0;
 }
 
@@ -32,7 +32,7 @@ void hfs_find_exit(struct hfs_find_data *fd)
        hfs_bnode_put(fd->bnode);
        kfree(fd->search_key);
        dprint(DBG_BNODE_REFS, "find_exit: %d (%p)\n", fd->tree->cnid, __builtin_return_address(0));
-       up(&fd->tree->tree_lock);
+       mutex_unlock(&fd->tree->tree_lock);
        fd->tree = NULL;
 }
 
@@ -52,6 +52,10 @@ int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd)
                rec = (e + b) / 2;
                len = hfs_brec_lenoff(bnode, rec, &off);
                keylen = hfs_brec_keylen(bnode, rec);
+               if (keylen == 0) {
+                       res = -EINVAL;
+                       goto fail;
+               }
                hfs_bnode_read(bnode, fd->key, off, keylen);
                cmpval = bnode->tree->keycmp(fd->key, fd->search_key);
                if (!cmpval) {
@@ -67,6 +71,10 @@ int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd)
        if (rec != e && e >= 0) {
                len = hfs_brec_lenoff(bnode, e, &off);
                keylen = hfs_brec_keylen(bnode, e);
+               if (keylen == 0) {
+                       res = -EINVAL;
+                       goto fail;
+               }
                hfs_bnode_read(bnode, fd->key, off, keylen);
        }
 done:
@@ -75,6 +83,7 @@ done:
        fd->keylength = keylen;
        fd->entryoffset = off + keylen;
        fd->entrylength = len - keylen;
+fail:
        return res;
 }
 
@@ -198,6 +207,10 @@ int hfs_brec_goto(struct hfs_find_data *fd, int cnt)
 
        len = hfs_brec_lenoff(bnode, fd->record, &off);
        keylen = hfs_brec_keylen(bnode, fd->record);
+       if (keylen == 0) {
+               res = -EINVAL;
+               goto out;
+       }
        fd->keyoffset = off;
        fd->keylength = keylen;
        fd->entryoffset = off + keylen;
index ea30afc2a03c774221cc17341c446d30a0cb341b..ad57f5991eb1f14e3ac24dafa207e440d6d74be3 100644 (file)
@@ -17,6 +17,7 @@
 
 int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *max)
 {
+       struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
        struct page *page;
        struct address_space *mapping;
        __be32 *pptr, *curr, *end;
@@ -29,8 +30,8 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *ma
                return size;
 
        dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
-       mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
-       mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
+       mutex_lock(&sbi->alloc_mutex);
+       mapping = sbi->alloc_file->i_mapping;
        page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL);
        if (IS_ERR(page)) {
                start = size;
@@ -150,16 +151,17 @@ done:
        set_page_dirty(page);
        kunmap(page);
        *max = offset + (curr - pptr) * 32 + i - start;
-       HFSPLUS_SB(sb).free_blocks -= *max;
+       sbi->free_blocks -= *max;
        sb->s_dirt = 1;
        dprint(DBG_BITMAP, "-> %u,%u\n", start, *max);
 out:
-       mutex_unlock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
+       mutex_unlock(&sbi->alloc_mutex);
        return start;
 }
 
 int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
 {
+       struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
        struct page *page;
        struct address_space *mapping;
        __be32 *pptr, *curr, *end;
@@ -172,11 +174,11 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
 
        dprint(DBG_BITMAP, "block_free: %u,%u\n", offset, count);
        /* are all of the bits in range? */
-       if ((offset + count) > HFSPLUS_SB(sb).total_blocks)
+       if ((offset + count) > sbi->total_blocks)
                return -2;
 
-       mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
-       mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
+       mutex_lock(&sbi->alloc_mutex);
+       mapping = sbi->alloc_file->i_mapping;
        pnr = offset / PAGE_CACHE_BITS;
        page = read_mapping_page(mapping, pnr, NULL);
        pptr = kmap(page);
@@ -224,9 +226,9 @@ done:
 out:
        set_page_dirty(page);
        kunmap(page);
-       HFSPLUS_SB(sb).free_blocks += len;
+       sbi->free_blocks += len;
        sb->s_dirt = 1;
-       mutex_unlock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
+       mutex_unlock(&sbi->alloc_mutex);
 
        return 0;
 }
index c88e5d72a402ae2d29a8905cdccf7b59ccd4337d..2f39d05443e1a374b197359f70d20337562aa9e8 100644 (file)
@@ -42,10 +42,13 @@ u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec)
                recoff = hfs_bnode_read_u16(node, node->tree->node_size - (rec + 1) * 2);
                if (!recoff)
                        return 0;
-               if (node->tree->attributes & HFS_TREE_BIGKEYS)
-                       retval = hfs_bnode_read_u16(node, recoff) + 2;
-               else
-                       retval = (hfs_bnode_read_u8(node, recoff) | 1) + 1;
+
+               retval = hfs_bnode_read_u16(node, recoff) + 2;
+               if (retval > node->tree->max_key_len + 2) {
+                       printk(KERN_ERR "hfs: keylen %d too large\n",
+                               retval);
+                       retval = 0;
+               }
        }
        return retval;
 }
@@ -216,7 +219,7 @@ skip:
 static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd)
 {
        struct hfs_btree *tree;
-       struct hfs_bnode *node, *new_node;
+       struct hfs_bnode *node, *new_node, *next_node;
        struct hfs_bnode_desc node_desc;
        int num_recs, new_rec_off, new_off, old_rec_off;
        int data_start, data_end, size;
@@ -235,6 +238,17 @@ static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd)
        new_node->type = node->type;
        new_node->height = node->height;
 
+       if (node->next)
+               next_node = hfs_bnode_find(tree, node->next);
+       else
+               next_node = NULL;
+
+       if (IS_ERR(next_node)) {
+               hfs_bnode_put(node);
+               hfs_bnode_put(new_node);
+               return next_node;
+       }
+
        size = tree->node_size / 2 - node->num_recs * 2 - 14;
        old_rec_off = tree->node_size - 4;
        num_recs = 1;
@@ -248,6 +262,8 @@ static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd)
                /* panic? */
                hfs_bnode_put(node);
                hfs_bnode_put(new_node);
+               if (next_node)
+                       hfs_bnode_put(next_node);
                return ERR_PTR(-ENOSPC);
        }
 
@@ -302,8 +318,7 @@ static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd)
        hfs_bnode_write(node, &node_desc, 0, sizeof(node_desc));
 
        /* update next bnode header */
-       if (new_node->next) {
-               struct hfs_bnode *next_node = hfs_bnode_find(tree, new_node->next);
+       if (next_node) {
                next_node->prev = new_node->this;
                hfs_bnode_read(next_node, &node_desc, 0, sizeof(node_desc));
                node_desc.prev = cpu_to_be32(next_node->prev);
index e49fcee1e293f725786e84ea6126e408e5eda7c8..22e4d4e329999c3ba9848036a3639bc194598f74 100644 (file)
@@ -30,7 +30,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
        if (!tree)
                return NULL;
 
-       init_MUTEX(&tree->tree_lock);
+       mutex_init(&tree->tree_lock);
        spin_lock_init(&tree->hash_lock);
        tree->sb = sb;
        tree->cnid = id;
@@ -39,10 +39,16 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
                goto free_tree;
        tree->inode = inode;
 
+       if (!HFSPLUS_I(tree->inode)->first_blocks) {
+               printk(KERN_ERR
+                      "hfs: invalid btree extent records (0 size).\n");
+               goto free_inode;
+       }
+
        mapping = tree->inode->i_mapping;
        page = read_mapping_page(mapping, 0, NULL);
        if (IS_ERR(page))
-               goto free_tree;
+               goto free_inode;
 
        /* Load the header */
        head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
@@ -57,27 +63,56 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
        tree->max_key_len = be16_to_cpu(head->max_key_len);
        tree->depth = be16_to_cpu(head->depth);
 
-       /* Set the correct compare function */
-       if (id == HFSPLUS_EXT_CNID) {
+       /* Verify the tree and set the correct compare function */
+       switch (id) {
+       case HFSPLUS_EXT_CNID:
+               if (tree->max_key_len != HFSPLUS_EXT_KEYLEN - sizeof(u16)) {
+                       printk(KERN_ERR "hfs: invalid extent max_key_len %d\n",
+                               tree->max_key_len);
+                       goto fail_page;
+               }
+               if (tree->attributes & HFS_TREE_VARIDXKEYS) {
+                       printk(KERN_ERR "hfs: invalid extent btree flag\n");
+                       goto fail_page;
+               }
+
                tree->keycmp = hfsplus_ext_cmp_key;
-       } else if (id == HFSPLUS_CAT_CNID) {
-               if ((HFSPLUS_SB(sb).flags & HFSPLUS_SB_HFSX) &&
+               break;
+       case HFSPLUS_CAT_CNID:
+               if (tree->max_key_len != HFSPLUS_CAT_KEYLEN - sizeof(u16)) {
+                       printk(KERN_ERR "hfs: invalid catalog max_key_len %d\n",
+                               tree->max_key_len);
+                       goto fail_page;
+               }
+               if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) {
+                       printk(KERN_ERR "hfs: invalid catalog btree flag\n");
+                       goto fail_page;
+               }
+
+               if (test_bit(HFSPLUS_SB_HFSX, &HFSPLUS_SB(sb)->flags) &&
                    (head->key_type == HFSPLUS_KEY_BINARY))
                        tree->keycmp = hfsplus_cat_bin_cmp_key;
                else {
                        tree->keycmp = hfsplus_cat_case_cmp_key;
-                       HFSPLUS_SB(sb).flags |= HFSPLUS_SB_CASEFOLD;
+                       set_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags);
                }
-       } else {
+               break;
+       default:
                printk(KERN_ERR "hfs: unknown B*Tree requested\n");
                goto fail_page;
        }
 
+       if (!(tree->attributes & HFS_TREE_BIGKEYS)) {
+               printk(KERN_ERR "hfs: invalid btree flag\n");
+               goto fail_page;
+       }
+
        size = tree->node_size;
        if (!is_power_of_2(size))
                goto fail_page;
        if (!tree->node_count)
                goto fail_page;
+
        tree->node_size_shift = ffs(size) - 1;
 
        tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
@@ -87,10 +122,11 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
        return tree;
 
  fail_page:
-       tree->inode->i_mapping->a_ops = &hfsplus_aops;
        page_cache_release(page);
- free_tree:
+ free_inode:
+       tree->inode->i_mapping->a_ops = &hfsplus_aops;
        iput(tree->inode);
+ free_tree:
        kfree(tree);
        return NULL;
 }
@@ -192,17 +228,18 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
 
        while (!tree->free_nodes) {
                struct inode *inode = tree->inode;
+               struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
                u32 count;
                int res;
 
                res = hfsplus_file_extend(inode);
                if (res)
                        return ERR_PTR(res);
-               HFSPLUS_I(inode).phys_size = inode->i_size =
-                               (loff_t)HFSPLUS_I(inode).alloc_blocks <<
-                               HFSPLUS_SB(tree->sb).alloc_blksz_shift;
-               HFSPLUS_I(inode).fs_blocks = HFSPLUS_I(inode).alloc_blocks <<
-                                            HFSPLUS_SB(tree->sb).fs_shift;
+               hip->phys_size = inode->i_size =
+                       (loff_t)hip->alloc_blocks <<
+                               HFSPLUS_SB(tree->sb)->alloc_blksz_shift;
+               hip->fs_blocks =
+                       hip->alloc_blocks << HFSPLUS_SB(tree->sb)->fs_shift;
                inode_set_bytes(inode, inode->i_size);
                count = inode->i_size >> tree->node_size_shift;
                tree->free_nodes = count - tree->node_count;
index f6874acb2cf2a3a81a242f1983600eb912b9f607..8af45fc5b051abb3353501441fa9ea9030d9395a 100644 (file)
@@ -67,7 +67,7 @@ static void hfsplus_cat_build_key_uni(hfsplus_btree_key *key, u32 parent,
        key->key_len = cpu_to_be16(6 + ustrlen);
 }
 
-static void hfsplus_set_perms(struct inode *inode, struct hfsplus_perm *perms)
+void hfsplus_cat_set_perms(struct inode *inode, struct hfsplus_perm *perms)
 {
        if (inode->i_flags & S_IMMUTABLE)
                perms->rootflags |= HFSPLUS_FLG_IMMUTABLE;
@@ -77,15 +77,24 @@ static void hfsplus_set_perms(struct inode *inode, struct hfsplus_perm *perms)
                perms->rootflags |= HFSPLUS_FLG_APPEND;
        else
                perms->rootflags &= ~HFSPLUS_FLG_APPEND;
-       HFSPLUS_I(inode).rootflags = perms->rootflags;
-       HFSPLUS_I(inode).userflags = perms->userflags;
+
+       perms->userflags = HFSPLUS_I(inode)->userflags;
        perms->mode = cpu_to_be16(inode->i_mode);
        perms->owner = cpu_to_be32(inode->i_uid);
        perms->group = cpu_to_be32(inode->i_gid);
+
+       if (S_ISREG(inode->i_mode))
+               perms->dev = cpu_to_be32(inode->i_nlink);
+       else if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode))
+               perms->dev = cpu_to_be32(inode->i_rdev);
+       else
+               perms->dev = 0;
 }
 
 static int hfsplus_cat_build_record(hfsplus_cat_entry *entry, u32 cnid, struct inode *inode)
 {
+       struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
+
        if (S_ISDIR(inode->i_mode)) {
                struct hfsplus_cat_folder *folder;
 
@@ -93,13 +102,13 @@ static int hfsplus_cat_build_record(hfsplus_cat_entry *entry, u32 cnid, struct i
                memset(folder, 0, sizeof(*folder));
                folder->type = cpu_to_be16(HFSPLUS_FOLDER);
                folder->id = cpu_to_be32(inode->i_ino);
-               HFSPLUS_I(inode).create_date =
+               HFSPLUS_I(inode)->create_date =
                        folder->create_date =
                        folder->content_mod_date =
                        folder->attribute_mod_date =
                        folder->access_date = hfsp_now2mt();
-               hfsplus_set_perms(inode, &folder->permissions);
-               if (inode == HFSPLUS_SB(inode->i_sb).hidden_dir)
+               hfsplus_cat_set_perms(inode, &folder->permissions);
+               if (inode == sbi->hidden_dir)
                        /* invisible and namelocked */
                        folder->user_info.frFlags = cpu_to_be16(0x5000);
                return sizeof(*folder);
@@ -111,19 +120,19 @@ static int hfsplus_cat_build_record(hfsplus_cat_entry *entry, u32 cnid, struct i
                file->type = cpu_to_be16(HFSPLUS_FILE);
                file->flags = cpu_to_be16(HFSPLUS_FILE_THREAD_EXISTS);
                file->id = cpu_to_be32(cnid);
-               HFSPLUS_I(inode).create_date =
+               HFSPLUS_I(inode)->create_date =
                        file->create_date =
                        file->content_mod_date =
                        file->attribute_mod_date =
                        file->access_date = hfsp_now2mt();
                if (cnid == inode->i_ino) {
-                       hfsplus_set_perms(inode, &file->permissions);
+                       hfsplus_cat_set_perms(inode, &file->permissions);
                        if (S_ISLNK(inode->i_mode)) {
                                file->user_info.fdType = cpu_to_be32(HFSP_SYMLINK_TYPE);
                                file->user_info.fdCreator = cpu_to_be32(HFSP_SYMLINK_CREATOR);
                        } else {
-                               file->user_info.fdType = cpu_to_be32(HFSPLUS_SB(inode->i_sb).type);
-                               file->user_info.fdCreator = cpu_to_be32(HFSPLUS_SB(inode->i_sb).creator);
+                               file->user_info.fdType = cpu_to_be32(sbi->type);
+                               file->user_info.fdCreator = cpu_to_be32(sbi->creator);
                        }
                        if ((file->permissions.rootflags | file->permissions.userflags) & HFSPLUS_FLG_IMMUTABLE)
                                file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED);
@@ -131,8 +140,8 @@ static int hfsplus_cat_build_record(hfsplus_cat_entry *entry, u32 cnid, struct i
                        file->user_info.fdType = cpu_to_be32(HFSP_HARDLINK_TYPE);
                        file->user_info.fdCreator = cpu_to_be32(HFSP_HFSPLUS_CREATOR);
                        file->user_info.fdFlags = cpu_to_be16(0x100);
-                       file->create_date = HFSPLUS_I(HFSPLUS_SB(inode->i_sb).hidden_dir).create_date;
-                       file->permissions.dev = cpu_to_be32(HFSPLUS_I(inode).dev);
+                       file->create_date = HFSPLUS_I(sbi->hidden_dir)->create_date;
+                       file->permissions.dev = cpu_to_be32(HFSPLUS_I(inode)->linkid);
                }
                return sizeof(*file);
        }
@@ -180,15 +189,14 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
 
 int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct inode *inode)
 {
+       struct super_block *sb = dir->i_sb;
        struct hfs_find_data fd;
-       struct super_block *sb;
        hfsplus_cat_entry entry;
        int entry_size;
        int err;
 
        dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
-       sb = dir->i_sb;
-       hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
+       hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
 
        hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL);
        entry_size = hfsplus_fill_cat_thread(sb, &entry, S_ISDIR(inode->i_mode) ?
@@ -234,7 +242,7 @@ err2:
 
 int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
 {
-       struct super_block *sb;
+       struct super_block *sb = dir->i_sb;
        struct hfs_find_data fd;
        struct hfsplus_fork_raw fork;
        struct list_head *pos;
@@ -242,8 +250,7 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
        u16 type;
 
        dprint(DBG_CAT_MOD, "delete_cat: %s,%u\n", str ? str->name : NULL, cnid);
-       sb = dir->i_sb;
-       hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
+       hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
 
        if (!str) {
                int len;
@@ -279,7 +286,7 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
                hfsplus_free_fork(sb, cnid, &fork, HFSPLUS_TYPE_RSRC);
        }
 
-       list_for_each(pos, &HFSPLUS_I(dir).open_dir_list) {
+       list_for_each(pos, &HFSPLUS_I(dir)->open_dir_list) {
                struct hfsplus_readdir_data *rd =
                        list_entry(pos, struct hfsplus_readdir_data, list);
                if (fd.tree->keycmp(fd.search_key, (void *)&rd->key) < 0)
@@ -312,7 +319,7 @@ int hfsplus_rename_cat(u32 cnid,
                       struct inode *src_dir, struct qstr *src_name,
                       struct inode *dst_dir, struct qstr *dst_name)
 {
-       struct super_block *sb;
+       struct super_block *sb = src_dir->i_sb;
        struct hfs_find_data src_fd, dst_fd;
        hfsplus_cat_entry entry;
        int entry_size, type;
@@ -320,8 +327,7 @@ int hfsplus_rename_cat(u32 cnid,
 
        dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
                dst_dir->i_ino, dst_name->name);
-       sb = src_dir->i_sb;
-       hfs_find_init(HFSPLUS_SB(sb).cat_tree, &src_fd);
+       hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &src_fd);
        dst_fd = src_fd;
 
        /* find the old dir entry and read the data */
index 764fd1bdca882da08028e34b133930d9d155a364..d236d85ec9d73f703384ecaa9d6522fe7433c775 100644 (file)
@@ -39,7 +39,7 @@ static struct dentry *hfsplus_lookup(struct inode *dir, struct dentry *dentry,
 
        dentry->d_op = &hfsplus_dentry_operations;
        dentry->d_fsdata = NULL;
-       hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
+       hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
        hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, &dentry->d_name);
 again:
        err = hfs_brec_read(&fd, &entry, sizeof(entry));
@@ -68,9 +68,9 @@ again:
                cnid = be32_to_cpu(entry.file.id);
                if (entry.file.user_info.fdType == cpu_to_be32(HFSP_HARDLINK_TYPE) &&
                    entry.file.user_info.fdCreator == cpu_to_be32(HFSP_HFSPLUS_CREATOR) &&
-                   (entry.file.create_date == HFSPLUS_I(HFSPLUS_SB(sb).hidden_dir).create_date ||
-                    entry.file.create_date == HFSPLUS_I(sb->s_root->d_inode).create_date) &&
-                   HFSPLUS_SB(sb).hidden_dir) {
+                   (entry.file.create_date == HFSPLUS_I(HFSPLUS_SB(sb)->hidden_dir)->create_date ||
+                    entry.file.create_date == HFSPLUS_I(sb->s_root->d_inode)->create_date) &&
+                   HFSPLUS_SB(sb)->hidden_dir) {
                        struct qstr str;
                        char name[32];
 
@@ -86,7 +86,8 @@ again:
                                linkid = be32_to_cpu(entry.file.permissions.dev);
                                str.len = sprintf(name, "iNode%d", linkid);
                                str.name = name;
-                               hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_SB(sb).hidden_dir->i_ino, &str);
+                               hfsplus_cat_build_key(sb, fd.search_key,
+                                       HFSPLUS_SB(sb)->hidden_dir->i_ino, &str);
                                goto again;
                        }
                } else if (!dentry->d_fsdata)
@@ -101,7 +102,7 @@ again:
        if (IS_ERR(inode))
                return ERR_CAST(inode);
        if (S_ISREG(inode->i_mode))
-               HFSPLUS_I(inode).dev = linkid;
+               HFSPLUS_I(inode)->linkid = linkid;
 out:
        d_add(dentry, inode);
        return NULL;
@@ -124,7 +125,7 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
        if (filp->f_pos >= inode->i_size)
                return 0;
 
-       hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
+       hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
        hfsplus_cat_build_key(sb, fd.search_key, inode->i_ino, NULL);
        err = hfs_brec_find(&fd);
        if (err)
@@ -180,8 +181,9 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
                                err = -EIO;
                                goto out;
                        }
-                       if (HFSPLUS_SB(sb).hidden_dir &&
-                           HFSPLUS_SB(sb).hidden_dir->i_ino == be32_to_cpu(entry.folder.id))
+                       if (HFSPLUS_SB(sb)->hidden_dir &&
+                           HFSPLUS_SB(sb)->hidden_dir->i_ino ==
+                                       be32_to_cpu(entry.folder.id))
                                goto next;
                        if (filldir(dirent, strbuf, len, filp->f_pos,
                                    be32_to_cpu(entry.folder.id), DT_DIR))
@@ -217,7 +219,7 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
                }
                filp->private_data = rd;
                rd->file = filp;
-               list_add(&rd->list, &HFSPLUS_I(inode).open_dir_list);
+               list_add(&rd->list, &HFSPLUS_I(inode)->open_dir_list);
        }
        memcpy(&rd->key, fd.key, sizeof(struct hfsplus_cat_key));
 out:
@@ -229,38 +231,18 @@ static int hfsplus_dir_release(struct inode *inode, struct file *file)
 {
        struct hfsplus_readdir_data *rd = file->private_data;
        if (rd) {
+               mutex_lock(&inode->i_mutex);
                list_del(&rd->list);
+               mutex_unlock(&inode->i_mutex);
                kfree(rd);
        }
        return 0;
 }
 
-static int hfsplus_create(struct inode *dir, struct dentry *dentry, int mode,
-                         struct nameidata *nd)
-{
-       struct inode *inode;
-       int res;
-
-       inode = hfsplus_new_inode(dir->i_sb, mode);
-       if (!inode)
-               return -ENOSPC;
-
-       res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode);
-       if (res) {
-               inode->i_nlink = 0;
-               hfsplus_delete_inode(inode);
-               iput(inode);
-               return res;
-       }
-       hfsplus_instantiate(dentry, inode, inode->i_ino);
-       mark_inode_dirty(inode);
-       return 0;
-}
-
 static int hfsplus_link(struct dentry *src_dentry, struct inode *dst_dir,
                        struct dentry *dst_dentry)
 {
-       struct super_block *sb = dst_dir->i_sb;
+       struct hfsplus_sb_info *sbi = HFSPLUS_SB(dst_dir->i_sb);
        struct inode *inode = src_dentry->d_inode;
        struct inode *src_dir = src_dentry->d_parent->d_inode;
        struct qstr str;
@@ -270,7 +252,10 @@ static int hfsplus_link(struct dentry *src_dentry, struct inode *dst_dir,
 
        if (HFSPLUS_IS_RSRC(inode))
                return -EPERM;
+       if (!S_ISREG(inode->i_mode))
+               return -EPERM;
 
+       mutex_lock(&sbi->vh_mutex);
        if (inode->i_ino == (u32)(unsigned long)src_dentry->d_fsdata) {
                for (;;) {
                        get_random_bytes(&id, sizeof(cnid));
@@ -279,40 +264,41 @@ static int hfsplus_link(struct dentry *src_dentry, struct inode *dst_dir,
                        str.len = sprintf(name, "iNode%d", id);
                        res = hfsplus_rename_cat(inode->i_ino,
                                                 src_dir, &src_dentry->d_name,
-                                                HFSPLUS_SB(sb).hidden_dir, &str);
+                                                sbi->hidden_dir, &str);
                        if (!res)
                                break;
                        if (res != -EEXIST)
-                               return res;
+                               goto out;
                }
-               HFSPLUS_I(inode).dev = id;
-               cnid = HFSPLUS_SB(sb).next_cnid++;
+               HFSPLUS_I(inode)->linkid = id;
+               cnid = sbi->next_cnid++;
                src_dentry->d_fsdata = (void *)(unsigned long)cnid;
                res = hfsplus_create_cat(cnid, src_dir, &src_dentry->d_name, inode);
                if (res)
                        /* panic? */
-                       return res;
-               HFSPLUS_SB(sb).file_count++;
+                       goto out;
+               sbi->file_count++;
        }
-       cnid = HFSPLUS_SB(sb).next_cnid++;
+       cnid = sbi->next_cnid++;
        res = hfsplus_create_cat(cnid, dst_dir, &dst_dentry->d_name, inode);
        if (res)
-               return res;
+               goto out;
 
        inc_nlink(inode);
        hfsplus_instantiate(dst_dentry, inode, cnid);
        atomic_inc(&inode->i_count);
        inode->i_ctime = CURRENT_TIME_SEC;
        mark_inode_dirty(inode);
-       HFSPLUS_SB(sb).file_count++;
-       sb->s_dirt = 1;
-
-       return 0;
+       sbi->file_count++;
+       dst_dir->i_sb->s_dirt = 1;
+out:
+       mutex_unlock(&sbi->vh_mutex);
+       return res;
 }
 
 static int hfsplus_unlink(struct inode *dir, struct dentry *dentry)
 {
-       struct super_block *sb = dir->i_sb;
+       struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
        struct inode *inode = dentry->d_inode;
        struct qstr str;
        char name[32];
@@ -322,21 +308,22 @@ static int hfsplus_unlink(struct inode *dir, struct dentry *dentry)
        if (HFSPLUS_IS_RSRC(inode))
                return -EPERM;
 
+       mutex_lock(&sbi->vh_mutex);
        cnid = (u32)(unsigned long)dentry->d_fsdata;
        if (inode->i_ino == cnid &&
-           atomic_read(&HFSPLUS_I(inode).opencnt)) {
+           atomic_read(&HFSPLUS_I(inode)->opencnt)) {
                str.name = name;
                str.len = sprintf(name, "temp%lu", inode->i_ino);
                res = hfsplus_rename_cat(inode->i_ino,
                                         dir, &dentry->d_name,
-                                        HFSPLUS_SB(sb).hidden_dir, &str);
+                                        sbi->hidden_dir, &str);
                if (!res)
                        inode->i_flags |= S_DEAD;
-               return res;
+               goto out;
        }
        res = hfsplus_delete_cat(cnid, dir, &dentry->d_name);
        if (res)
-               return res;
+               goto out;
 
        if (inode->i_nlink > 0)
                drop_nlink(inode);
@@ -344,10 +331,10 @@ static int hfsplus_unlink(struct inode *dir, struct dentry *dentry)
                clear_nlink(inode);
        if (!inode->i_nlink) {
                if (inode->i_ino != cnid) {
-                       HFSPLUS_SB(sb).file_count--;
-                       if (!atomic_read(&HFSPLUS_I(inode).opencnt)) {
+                       sbi->file_count--;
+                       if (!atomic_read(&HFSPLUS_I(inode)->opencnt)) {
                                res = hfsplus_delete_cat(inode->i_ino,
-                                                        HFSPLUS_SB(sb).hidden_dir,
+                                                        sbi->hidden_dir,
                                                         NULL);
                                if (!res)
                                        hfsplus_delete_inode(inode);
@@ -356,107 +343,108 @@ static int hfsplus_unlink(struct inode *dir, struct dentry *dentry)
                } else
                        hfsplus_delete_inode(inode);
        } else
-               HFSPLUS_SB(sb).file_count--;
+               sbi->file_count--;
        inode->i_ctime = CURRENT_TIME_SEC;
        mark_inode_dirty(inode);
-
+out:
+       mutex_unlock(&sbi->vh_mutex);
        return res;
 }
 
-static int hfsplus_mkdir(struct inode *dir, struct dentry *dentry, int mode)
-{
-       struct inode *inode;
-       int res;
-
-       inode = hfsplus_new_inode(dir->i_sb, S_IFDIR | mode);
-       if (!inode)
-               return -ENOSPC;
-
-       res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode);
-       if (res) {
-               inode->i_nlink = 0;
-               hfsplus_delete_inode(inode);
-               iput(inode);
-               return res;
-       }
-       hfsplus_instantiate(dentry, inode, inode->i_ino);
-       mark_inode_dirty(inode);
-       return 0;
-}
-
 static int hfsplus_rmdir(struct inode *dir, struct dentry *dentry)
 {
-       struct inode *inode;
+       struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
+       struct inode *inode = dentry->d_inode;
        int res;
 
-       inode = dentry->d_inode;
        if (inode->i_size != 2)
                return -ENOTEMPTY;
+
+       mutex_lock(&sbi->vh_mutex);
        res = hfsplus_delete_cat(inode->i_ino, dir, &dentry->d_name);
        if (res)
-               return res;
+               goto out;
        clear_nlink(inode);
        inode->i_ctime = CURRENT_TIME_SEC;
        hfsplus_delete_inode(inode);
        mark_inode_dirty(inode);
-       return 0;
+out:
+       mutex_unlock(&sbi->vh_mutex);
+       return res;
 }
 
 static int hfsplus_symlink(struct inode *dir, struct dentry *dentry,
                           const char *symname)
 {
-       struct super_block *sb;
+       struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
        struct inode *inode;
-       int res;
+       int res = -ENOSPC;
 
-       sb = dir->i_sb;
-       inode = hfsplus_new_inode(sb, S_IFLNK | S_IRWXUGO);
+       mutex_lock(&sbi->vh_mutex);
+       inode = hfsplus_new_inode(dir->i_sb, S_IFLNK | S_IRWXUGO);
        if (!inode)
-               return -ENOSPC;
+               goto out;
 
        res = page_symlink(inode, symname, strlen(symname) + 1);
-       if (res) {
-               inode->i_nlink = 0;
-               hfsplus_delete_inode(inode);
-               iput(inode);
-               return res;
-       }
+       if (res)
+               goto out_err;
 
-       mark_inode_dirty(inode);
        res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode);
+       if (res)
+               goto out_err;
 
-       if (!res) {
-               hfsplus_instantiate(dentry, inode, inode->i_ino);
-               mark_inode_dirty(inode);
-       }
+       hfsplus_instantiate(dentry, inode, inode->i_ino);
+       mark_inode_dirty(inode);
+       goto out;
 
+out_err:
+       inode->i_nlink = 0;
+       hfsplus_delete_inode(inode);
+       iput(inode);
+out:
+       mutex_unlock(&sbi->vh_mutex);
        return res;
 }
 
 static int hfsplus_mknod(struct inode *dir, struct dentry *dentry,
                         int mode, dev_t rdev)
 {
-       struct super_block *sb;
+       struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
        struct inode *inode;
-       int res;
+       int res = -ENOSPC;
 
-       sb = dir->i_sb;
-       inode = hfsplus_new_inode(sb, mode);
+       mutex_lock(&sbi->vh_mutex);
+       inode = hfsplus_new_inode(dir->i_sb, mode);
        if (!inode)
-               return -ENOSPC;
+               goto out;
+
+       if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode))
+               init_special_inode(inode, mode, rdev);
 
        res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode);
        if (res) {
                inode->i_nlink = 0;
                hfsplus_delete_inode(inode);
                iput(inode);
-               return res;
+               goto out;
        }
-       init_special_inode(inode, mode, rdev);
+
        hfsplus_instantiate(dentry, inode, inode->i_ino);
        mark_inode_dirty(inode);
+out:
+       mutex_unlock(&sbi->vh_mutex);
+       return res;
+}
 
-       return 0;
+static int hfsplus_create(struct inode *dir, struct dentry *dentry, int mode,
+                         struct nameidata *nd)
+{
+       return hfsplus_mknod(dir, dentry, mode, 0);
+}
+
+static int hfsplus_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+       return hfsplus_mknod(dir, dentry, mode | S_IFDIR, 0);
 }
 
 static int hfsplus_rename(struct inode *old_dir, struct dentry *old_dentry,
@@ -466,7 +454,10 @@ static int hfsplus_rename(struct inode *old_dir, struct dentry *old_dentry,
 
        /* Unlink destination if it already exists */
        if (new_dentry->d_inode) {
-               res = hfsplus_unlink(new_dir, new_dentry);
+               if (S_ISDIR(new_dentry->d_inode->i_mode))
+                       res = hfsplus_rmdir(new_dir, new_dentry);
+               else
+                       res = hfsplus_unlink(new_dir, new_dentry);
                if (res)
                        return res;
        }
index 0022eec63cdacd97c2a438b8d9f623ff6be88dd4..0c9cb1820a523fae02c6bf2f37e6fbdc5c5dceeb 100644 (file)
@@ -85,35 +85,49 @@ static u32 hfsplus_ext_lastblock(struct hfsplus_extent *ext)
 
 static void __hfsplus_ext_write_extent(struct inode *inode, struct hfs_find_data *fd)
 {
+       struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
        int res;
 
-       hfsplus_ext_build_key(fd->search_key, inode->i_ino, HFSPLUS_I(inode).cached_start,
-                             HFSPLUS_IS_RSRC(inode) ?  HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA);
+       WARN_ON(!mutex_is_locked(&hip->extents_lock));
+
+       hfsplus_ext_build_key(fd->search_key, inode->i_ino, hip->cached_start,
+                             HFSPLUS_IS_RSRC(inode) ?
+                               HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA);
+
        res = hfs_brec_find(fd);
-       if (HFSPLUS_I(inode).flags & HFSPLUS_FLG_EXT_NEW) {
+       if (hip->flags & HFSPLUS_FLG_EXT_NEW) {
                if (res != -ENOENT)
                        return;
-               hfs_brec_insert(fd, HFSPLUS_I(inode).cached_extents, sizeof(hfsplus_extent_rec));
-               HFSPLUS_I(inode).flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW);
+               hfs_brec_insert(fd, hip->cached_extents,
+                               sizeof(hfsplus_extent_rec));
+               hip->flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW);
        } else {
                if (res)
                        return;
-               hfs_bnode_write(fd->bnode, HFSPLUS_I(inode).cached_extents, fd->entryoffset, fd->entrylength);
-               HFSPLUS_I(inode).flags &= ~HFSPLUS_FLG_EXT_DIRTY;
+               hfs_bnode_write(fd->bnode, hip->cached_extents,
+                               fd->entryoffset, fd->entrylength);
+               hip->flags &= ~HFSPLUS_FLG_EXT_DIRTY;
        }
 }
 
-void hfsplus_ext_write_extent(struct inode *inode)
+static void hfsplus_ext_write_extent_locked(struct inode *inode)
 {
-       if (HFSPLUS_I(inode).flags & HFSPLUS_FLG_EXT_DIRTY) {
+       if (HFSPLUS_I(inode)->flags & HFSPLUS_FLG_EXT_DIRTY) {
                struct hfs_find_data fd;
 
-               hfs_find_init(HFSPLUS_SB(inode->i_sb).ext_tree, &fd);
+               hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
                __hfsplus_ext_write_extent(inode, &fd);
                hfs_find_exit(&fd);
        }
 }
 
+void hfsplus_ext_write_extent(struct inode *inode)
+{
+       mutex_lock(&HFSPLUS_I(inode)->extents_lock);
+       hfsplus_ext_write_extent_locked(inode);
+       mutex_unlock(&HFSPLUS_I(inode)->extents_lock);
+}
+
 static inline int __hfsplus_ext_read_extent(struct hfs_find_data *fd,
                                            struct hfsplus_extent *extent,
                                            u32 cnid, u32 block, u8 type)
@@ -136,33 +150,39 @@ static inline int __hfsplus_ext_read_extent(struct hfs_find_data *fd,
 
 static inline int __hfsplus_ext_cache_extent(struct hfs_find_data *fd, struct inode *inode, u32 block)
 {
+       struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
        int res;
 
-       if (HFSPLUS_I(inode).flags & HFSPLUS_FLG_EXT_DIRTY)
+       WARN_ON(!mutex_is_locked(&hip->extents_lock));
+
+       if (hip->flags & HFSPLUS_FLG_EXT_DIRTY)
                __hfsplus_ext_write_extent(inode, fd);
 
-       res = __hfsplus_ext_read_extent(fd, HFSPLUS_I(inode).cached_extents, inode->i_ino,
-                                       block, HFSPLUS_IS_RSRC(inode) ? HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA);
+       res = __hfsplus_ext_read_extent(fd, hip->cached_extents, inode->i_ino,
+                                       block, HFSPLUS_IS_RSRC(inode) ?
+                                               HFSPLUS_TYPE_RSRC :
+                                               HFSPLUS_TYPE_DATA);
        if (!res) {
-               HFSPLUS_I(inode).cached_start = be32_to_cpu(fd->key->ext.start_block);
-               HFSPLUS_I(inode).cached_blocks = hfsplus_ext_block_count(HFSPLUS_I(inode).cached_extents);
+               hip->cached_start = be32_to_cpu(fd->key->ext.start_block);
+               hip->cached_blocks = hfsplus_ext_block_count(hip->cached_extents);
        } else {
-               HFSPLUS_I(inode).cached_start = HFSPLUS_I(inode).cached_blocks = 0;
-               HFSPLUS_I(inode).flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW);
+               hip->cached_start = hip->cached_blocks = 0;
+               hip->flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW);
        }
        return res;
 }
 
 static int hfsplus_ext_read_extent(struct inode *inode, u32 block)
 {
+       struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
        struct hfs_find_data fd;
        int res;
 
-       if (block >= HFSPLUS_I(inode).cached_start &&
-           block < HFSPLUS_I(inode).cached_start + HFSPLUS_I(inode).cached_blocks)
+       if (block >= hip->cached_start &&
+           block < hip->cached_start + hip->cached_blocks)
                return 0;
 
-       hfs_find_init(HFSPLUS_SB(inode->i_sb).ext_tree, &fd);
+       hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
        res = __hfsplus_ext_cache_extent(&fd, inode, block);
        hfs_find_exit(&fd);
        return res;
@@ -172,21 +192,21 @@ static int hfsplus_ext_read_extent(struct inode *inode, u32 block)
 int hfsplus_get_block(struct inode *inode, sector_t iblock,
                      struct buffer_head *bh_result, int create)
 {
-       struct super_block *sb;
+       struct super_block *sb = inode->i_sb;
+       struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
+       struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
        int res = -EIO;
        u32 ablock, dblock, mask;
        int shift;
 
-       sb = inode->i_sb;
-
        /* Convert inode block to disk allocation block */
-       shift = HFSPLUS_SB(sb).alloc_blksz_shift - sb->s_blocksize_bits;
-       ablock = iblock >> HFSPLUS_SB(sb).fs_shift;
+       shift = sbi->alloc_blksz_shift - sb->s_blocksize_bits;
+       ablock = iblock >> sbi->fs_shift;
 
-       if (iblock >= HFSPLUS_I(inode).fs_blocks) {
-               if (iblock > HFSPLUS_I(inode).fs_blocks || !create)
+       if (iblock >= hip->fs_blocks) {
+               if (iblock > hip->fs_blocks || !create)
                        return -EIO;
-               if (ablock >= HFSPLUS_I(inode).alloc_blocks) {
+               if (ablock >= hip->alloc_blocks) {
                        res = hfsplus_file_extend(inode);
                        if (res)
                                return res;
@@ -194,33 +214,33 @@ int hfsplus_get_block(struct inode *inode, sector_t iblock,
        } else
                create = 0;
 
-       if (ablock < HFSPLUS_I(inode).first_blocks) {
-               dblock = hfsplus_ext_find_block(HFSPLUS_I(inode).first_extents, ablock);
+       if (ablock < hip->first_blocks) {
+               dblock = hfsplus_ext_find_block(hip->first_extents, ablock);
                goto done;
        }
 
        if (inode->i_ino == HFSPLUS_EXT_CNID)
                return -EIO;
 
-       mutex_lock(&HFSPLUS_I(inode).extents_lock);
+       mutex_lock(&hip->extents_lock);
        res = hfsplus_ext_read_extent(inode, ablock);
        if (!res) {
-               dblock = hfsplus_ext_find_block(HFSPLUS_I(inode).cached_extents, ablock -
-                                            HFSPLUS_I(inode).cached_start);
+               dblock = hfsplus_ext_find_block(hip->cached_extents,
+                                               ablock - hip->cached_start);
        } else {
-               mutex_unlock(&HFSPLUS_I(inode).extents_lock);
+               mutex_unlock(&hip->extents_lock);
                return -EIO;
        }
-       mutex_unlock(&HFSPLUS_I(inode).extents_lock);
+       mutex_unlock(&hip->extents_lock);
 
 done:
        dprint(DBG_EXTENT, "get_block(%lu): %llu - %u\n", inode->i_ino, (long long)iblock, dblock);
-       mask = (1 << HFSPLUS_SB(sb).fs_shift) - 1;
-       map_bh(bh_result, sb, (dblock << HFSPLUS_SB(sb).fs_shift) + HFSPLUS_SB(sb).blockoffset + (iblock & mask));
+       mask = (1 << sbi->fs_shift) - 1;
+       map_bh(bh_result, sb, (dblock << sbi->fs_shift) + sbi->blockoffset + (iblock & mask));
        if (create) {
                set_buffer_new(bh_result);
-               HFSPLUS_I(inode).phys_size += sb->s_blocksize;
-               HFSPLUS_I(inode).fs_blocks++;
+               hip->phys_size += sb->s_blocksize;
+               hip->fs_blocks++;
                inode_add_bytes(inode, sb->s_blocksize);
                mark_inode_dirty(inode);
        }
@@ -327,7 +347,7 @@ int hfsplus_free_fork(struct super_block *sb, u32 cnid, struct hfsplus_fork_raw
        if (total_blocks == blocks)
                return 0;
 
-       hfs_find_init(HFSPLUS_SB(sb).ext_tree, &fd);
+       hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd);
        do {
                res = __hfsplus_ext_read_extent(&fd, ext_entry, cnid,
                                                total_blocks, type);
@@ -348,29 +368,33 @@ int hfsplus_free_fork(struct super_block *sb, u32 cnid, struct hfsplus_fork_raw
 int hfsplus_file_extend(struct inode *inode)
 {
        struct super_block *sb = inode->i_sb;
+       struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
+       struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
        u32 start, len, goal;
        int res;
 
-       if (HFSPLUS_SB(sb).alloc_file->i_size * 8 < HFSPLUS_SB(sb).total_blocks - HFSPLUS_SB(sb).free_blocks + 8) {
+       if (sbi->alloc_file->i_size * 8 <
+           sbi->total_blocks - sbi->free_blocks + 8) {
                // extend alloc file
-               printk(KERN_ERR "hfs: extend alloc file! (%Lu,%u,%u)\n", HFSPLUS_SB(sb).alloc_file->i_size * 8,
-                       HFSPLUS_SB(sb).total_blocks, HFSPLUS_SB(sb).free_blocks);
+               printk(KERN_ERR "hfs: extend alloc file! (%Lu,%u,%u)\n",
+                               sbi->alloc_file->i_size * 8,
+                               sbi->total_blocks, sbi->free_blocks);
                return -ENOSPC;
        }
 
-       mutex_lock(&HFSPLUS_I(inode).extents_lock);
-       if (HFSPLUS_I(inode).alloc_blocks == HFSPLUS_I(inode).first_blocks)
-               goal = hfsplus_ext_lastblock(HFSPLUS_I(inode).first_extents);
+       mutex_lock(&hip->extents_lock);
+       if (hip->alloc_blocks == hip->first_blocks)
+               goal = hfsplus_ext_lastblock(hip->first_extents);
        else {
-               res = hfsplus_ext_read_extent(inode, HFSPLUS_I(inode).alloc_blocks);
+               res = hfsplus_ext_read_extent(inode, hip->alloc_blocks);
                if (res)
                        goto out;
-               goal = hfsplus_ext_lastblock(HFSPLUS_I(inode).cached_extents);
+               goal = hfsplus_ext_lastblock(hip->cached_extents);
        }
 
-       len = HFSPLUS_I(inode).clump_blocks;
-       start = hfsplus_block_allocate(sb, HFSPLUS_SB(sb).total_blocks, goal, &len);
-       if (start >= HFSPLUS_SB(sb).total_blocks) {
+       len = hip->clump_blocks;
+       start = hfsplus_block_allocate(sb, sbi->total_blocks, goal, &len);
+       if (start >= sbi->total_blocks) {
                start = hfsplus_block_allocate(sb, goal, 0, &len);
                if (start >= goal) {
                        res = -ENOSPC;
@@ -379,56 +403,56 @@ int hfsplus_file_extend(struct inode *inode)
        }
 
        dprint(DBG_EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len);
-       if (HFSPLUS_I(inode).alloc_blocks <= HFSPLUS_I(inode).first_blocks) {
-               if (!HFSPLUS_I(inode).first_blocks) {
+
+       if (hip->alloc_blocks <= hip->first_blocks) {
+               if (!hip->first_blocks) {
                        dprint(DBG_EXTENT, "first extents\n");
                        /* no extents yet */
-                       HFSPLUS_I(inode).first_extents[0].start_block = cpu_to_be32(start);
-                       HFSPLUS_I(inode).first_extents[0].block_count = cpu_to_be32(len);
+                       hip->first_extents[0].start_block = cpu_to_be32(start);
+                       hip->first_extents[0].block_count = cpu_to_be32(len);
                        res = 0;
                } else {
                        /* try to append to extents in inode */
-                       res = hfsplus_add_extent(HFSPLUS_I(inode).first_extents,
-                                                HFSPLUS_I(inode).alloc_blocks,
+                       res = hfsplus_add_extent(hip->first_extents,
+                                                hip->alloc_blocks,
                                                 start, len);
                        if (res == -ENOSPC)
                                goto insert_extent;
                }
                if (!res) {
-                       hfsplus_dump_extent(HFSPLUS_I(inode).first_extents);
-                       HFSPLUS_I(inode).first_blocks += len;
+                       hfsplus_dump_extent(hip->first_extents);
+                       hip->first_blocks += len;
                }
        } else {
-               res = hfsplus_add_extent(HFSPLUS_I(inode).cached_extents,
-                                        HFSPLUS_I(inode).alloc_blocks -
-                                        HFSPLUS_I(inode).cached_start,
+               res = hfsplus_add_extent(hip->cached_extents,
+                                        hip->alloc_blocks - hip->cached_start,
                                         start, len);
                if (!res) {
-                       hfsplus_dump_extent(HFSPLUS_I(inode).cached_extents);
-                       HFSPLUS_I(inode).flags |= HFSPLUS_FLG_EXT_DIRTY;
-                       HFSPLUS_I(inode).cached_blocks += len;
+                       hfsplus_dump_extent(hip->cached_extents);
+                       hip->flags |= HFSPLUS_FLG_EXT_DIRTY;
+                       hip->cached_blocks += len;
                } else if (res == -ENOSPC)
                        goto insert_extent;
        }
 out:
-       mutex_unlock(&HFSPLUS_I(inode).extents_lock);
+       mutex_unlock(&hip->extents_lock);
        if (!res) {
-               HFSPLUS_I(inode).alloc_blocks += len;
+               hip->alloc_blocks += len;
                mark_inode_dirty(inode);
        }
        return res;
 
 insert_extent:
        dprint(DBG_EXTENT, "insert new extent\n");
-       hfsplus_ext_write_extent(inode);
+       hfsplus_ext_write_extent_locked(inode);
 
-       memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec));
-       HFSPLUS_I(inode).cached_extents[0].start_block = cpu_to_be32(start);
-       HFSPLUS_I(inode).cached_extents[0].block_count = cpu_to_be32(len);
-       hfsplus_dump_extent(HFSPLUS_I(inode).cached_extents);
-       HFSPLUS_I(inode).flags |= HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW;
-       HFSPLUS_I(inode).cached_start = HFSPLUS_I(inode).alloc_blocks;
-       HFSPLUS_I(inode).cached_blocks = len;
+       memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
+       hip->cached_extents[0].start_block = cpu_to_be32(start);
+       hip->cached_extents[0].block_count = cpu_to_be32(len);
+       hfsplus_dump_extent(hip->cached_extents);
+       hip->flags |= HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW;
+       hip->cached_start = hip->alloc_blocks;
+       hip->cached_blocks = len;
 
        res = 0;
        goto out;
@@ -437,13 +461,15 @@ insert_extent:
 void hfsplus_file_truncate(struct inode *inode)
 {
        struct super_block *sb = inode->i_sb;
+       struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
        struct hfs_find_data fd;
        u32 alloc_cnt, blk_cnt, start;
        int res;
 
-       dprint(DBG_INODE, "truncate: %lu, %Lu -> %Lu\n", inode->i_ino,
-              (long long)HFSPLUS_I(inode).phys_size, inode->i_size);
-       if (inode->i_size > HFSPLUS_I(inode).phys_size) {
+       dprint(DBG_INODE, "truncate: %lu, %Lu -> %Lu\n",
+               inode->i_ino, (long long)hip->phys_size, inode->i_size);
+
+       if (inode->i_size > hip->phys_size) {
                struct address_space *mapping = inode->i_mapping;
                struct page *page;
                void *fsdata;
@@ -460,47 +486,48 @@ void hfsplus_file_truncate(struct inode *inode)
                        return;
                mark_inode_dirty(inode);
                return;
-       } else if (inode->i_size == HFSPLUS_I(inode).phys_size)
+       } else if (inode->i_size == hip->phys_size)
                return;
 
-       blk_cnt = (inode->i_size + HFSPLUS_SB(sb).alloc_blksz - 1) >> HFSPLUS_SB(sb).alloc_blksz_shift;
-       alloc_cnt = HFSPLUS_I(inode).alloc_blocks;
+       blk_cnt = (inode->i_size + HFSPLUS_SB(sb)->alloc_blksz - 1) >>
+                       HFSPLUS_SB(sb)->alloc_blksz_shift;
+       alloc_cnt = hip->alloc_blocks;
        if (blk_cnt == alloc_cnt)
                goto out;
 
-       mutex_lock(&HFSPLUS_I(inode).extents_lock);
-       hfs_find_init(HFSPLUS_SB(sb).ext_tree, &fd);
+       mutex_lock(&hip->extents_lock);
+       hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd);
        while (1) {
-               if (alloc_cnt == HFSPLUS_I(inode).first_blocks) {
-                       hfsplus_free_extents(sb, HFSPLUS_I(inode).first_extents,
+               if (alloc_cnt == hip->first_blocks) {
+                       hfsplus_free_extents(sb, hip->first_extents,
                                             alloc_cnt, alloc_cnt - blk_cnt);
-                       hfsplus_dump_extent(HFSPLUS_I(inode).first_extents);
-                       HFSPLUS_I(inode).first_blocks = blk_cnt;
+                       hfsplus_dump_extent(hip->first_extents);
+                       hip->first_blocks = blk_cnt;
                        break;
                }
                res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt);
                if (res)
                        break;
-               start = HFSPLUS_I(inode).cached_start;
-               hfsplus_free_extents(sb, HFSPLUS_I(inode).cached_extents,
+               start = hip->cached_start;
+               hfsplus_free_extents(sb, hip->cached_extents,
                                     alloc_cnt - start, alloc_cnt - blk_cnt);
-               hfsplus_dump_extent(HFSPLUS_I(inode).cached_extents);
+               hfsplus_dump_extent(hip->cached_extents);
                if (blk_cnt > start) {
-                       HFSPLUS_I(inode).flags |= HFSPLUS_FLG_EXT_DIRTY;
+                       hip->flags |= HFSPLUS_FLG_EXT_DIRTY;
                        break;
                }
                alloc_cnt = start;
-               HFSPLUS_I(inode).cached_start = HFSPLUS_I(inode).cached_blocks = 0;
-               HFSPLUS_I(inode).flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW);
+               hip->cached_start = hip->cached_blocks = 0;
+               hip->flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW);
                hfs_brec_remove(&fd);
        }
        hfs_find_exit(&fd);
-       mutex_unlock(&HFSPLUS_I(inode).extents_lock);
+       mutex_unlock(&hip->extents_lock);
 
-       HFSPLUS_I(inode).alloc_blocks = blk_cnt;
+       hip->alloc_blocks = blk_cnt;
 out:
-       HFSPLUS_I(inode).phys_size = inode->i_size;
-       HFSPLUS_I(inode).fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
-       inode_set_bytes(inode, HFSPLUS_I(inode).fs_blocks << sb->s_blocksize_bits);
+       hip->phys_size = inode->i_size;
+       hip->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
+       inode_set_bytes(inode, hip->fs_blocks << sb->s_blocksize_bits);
        mark_inode_dirty(inode);
 }
index dc856be3c2b010854c78da1049b86d2f7ce48147..cb3653efb57a2dcf2285a19fcb7262cb7a1ba509 100644 (file)
@@ -62,7 +62,7 @@ struct hfs_btree {
        unsigned int depth;
 
        //unsigned int map1_size, map_size;
-       struct semaphore tree_lock;
+       struct mutex tree_lock;
 
        unsigned int pages_per_bnode;
        spinlock_t hash_lock;
@@ -121,16 +121,21 @@ struct hfsplus_sb_info {
        u32 sect_count;
        int fs_shift;
 
-       /* Stuff in host order from Vol Header */
+       /* immutable data from the volume header */
        u32 alloc_blksz;
        int alloc_blksz_shift;
        u32 total_blocks;
+       u32 data_clump_blocks, rsrc_clump_blocks;
+
+       /* mutable data from the volume header, protected by alloc_mutex */
        u32 free_blocks;
-       u32 next_alloc;
+       struct mutex alloc_mutex;
+
+       /* mutable data from the volume header, protected by vh_mutex */
        u32 next_cnid;
        u32 file_count;
        u32 folder_count;
-       u32 data_clump_blocks, rsrc_clump_blocks;
+       struct mutex vh_mutex;
 
        /* Config options */
        u32 creator;
@@ -143,40 +148,50 @@ struct hfsplus_sb_info {
        int part, session;
 
        unsigned long flags;
-
-       struct hlist_head rsrc_inodes;
 };
 
-#define HFSPLUS_SB_WRITEBACKUP 0x0001
-#define HFSPLUS_SB_NODECOMPOSE 0x0002
-#define HFSPLUS_SB_FORCE       0x0004
-#define HFSPLUS_SB_HFSX                0x0008
-#define HFSPLUS_SB_CASEFOLD    0x0010
+#define HFSPLUS_SB_WRITEBACKUP 0
+#define HFSPLUS_SB_NODECOMPOSE 1
+#define HFSPLUS_SB_FORCE       2
+#define HFSPLUS_SB_HFSX                3
+#define HFSPLUS_SB_CASEFOLD    4
 
 
 struct hfsplus_inode_info {
-       struct mutex extents_lock;
-       u32 clump_blocks, alloc_blocks;
-       sector_t fs_blocks;
-       /* Allocation extents from catalog record or volume header */
-       hfsplus_extent_rec first_extents;
-       u32 first_blocks;
-       hfsplus_extent_rec cached_extents;
-       u32 cached_start, cached_blocks;
        atomic_t opencnt;
 
-       struct inode *rsrc_inode;
+       /*
+        * Extent allocation information, protected by extents_lock.
+        */
+       u32 first_blocks;
+       u32 clump_blocks;
+       u32 alloc_blocks;
+       u32 cached_start;
+       u32 cached_blocks;
+       hfsplus_extent_rec first_extents;
+       hfsplus_extent_rec cached_extents;
        unsigned long flags;
+       struct mutex extents_lock;
 
+       /*
+        * Immutable data.
+        */
+       struct inode *rsrc_inode;
        __be32 create_date;
-       /* Device number in hfsplus_permissions in catalog */
-       u32 dev;
-       /* BSD system and user file flags */
-       u8 rootflags;
-       u8 userflags;
 
+       /*
+        * Protected by sbi->vh_mutex.
+        */
+       u32 linkid;
+
+       /*
+        * Protected by i_mutex.
+        */
+       sector_t fs_blocks;
+       u8 userflags;           /* BSD user file flags */
        struct list_head open_dir_list;
        loff_t phys_size;
+
        struct inode vfs_inode;
 };
 
@@ -184,8 +199,8 @@ struct hfsplus_inode_info {
 #define HFSPLUS_FLG_EXT_DIRTY  0x0002
 #define HFSPLUS_FLG_EXT_NEW    0x0004
 
-#define HFSPLUS_IS_DATA(inode)   (!(HFSPLUS_I(inode).flags & HFSPLUS_FLG_RSRC))
-#define HFSPLUS_IS_RSRC(inode)   (HFSPLUS_I(inode).flags & HFSPLUS_FLG_RSRC)
+#define HFSPLUS_IS_DATA(inode)   (!(HFSPLUS_I(inode)->flags & HFSPLUS_FLG_RSRC))
+#define HFSPLUS_IS_RSRC(inode)   (HFSPLUS_I(inode)->flags & HFSPLUS_FLG_RSRC)
 
 struct hfs_find_data {
        /* filled by caller */
@@ -311,6 +326,7 @@ int hfsplus_create_cat(u32, struct inode *, struct qstr *, struct inode *);
 int hfsplus_delete_cat(u32, struct inode *, struct qstr *);
 int hfsplus_rename_cat(u32, struct inode *, struct qstr *,
                       struct inode *, struct qstr *);
+void hfsplus_cat_set_perms(struct inode *inode, struct hfsplus_perm *perms);
 
 /* dir.c */
 extern const struct inode_operations hfsplus_dir_inode_operations;
@@ -372,26 +388,15 @@ int hfsplus_read_wrapper(struct super_block *);
 int hfs_part_find(struct super_block *, sector_t *, sector_t *);
 
 /* access macros */
-/*
 static inline struct hfsplus_sb_info *HFSPLUS_SB(struct super_block *sb)
 {
        return sb->s_fs_info;
 }
+
 static inline struct hfsplus_inode_info *HFSPLUS_I(struct inode *inode)
 {
        return list_entry(inode, struct hfsplus_inode_info, vfs_inode);
 }
-*/
-#define HFSPLUS_SB(super)      (*(struct hfsplus_sb_info *)(super)->s_fs_info)
-#define HFSPLUS_I(inode)       (*list_entry(inode, struct hfsplus_inode_info, vfs_inode))
-
-#if 1
-#define hfsplus_kmap(p)                ({ struct page *__p = (p); kmap(__p); })
-#define hfsplus_kunmap(p)      ({ struct page *__p = (p); kunmap(__p); __p; })
-#else
-#define hfsplus_kmap(p)                kmap(p)
-#define hfsplus_kunmap(p)      kunmap(p)
-#endif
 
 #define sb_bread512(sb, sec, data) ({                  \
        struct buffer_head *__bh;                       \
@@ -419,6 +424,4 @@ static inline struct hfsplus_inode_info *HFSPLUS_I(struct inode *inode)
 #define hfsp_ut2mt(t)          __hfsp_ut2mt((t).tv_sec)
 #define hfsp_now2mt()          __hfsp_ut2mt(get_seconds())
 
-#define kdev_t_to_nr(x)                (x)
-
 #endif
index fe99fe8db61a3cb279885cee2c73cf5743704604..6892899fd6fbbabce55d3fe1f2c8915fe3f33d89 100644 (file)
@@ -200,6 +200,7 @@ struct hfsplus_cat_key {
        struct hfsplus_unistr name;
 } __packed;
 
+#define HFSPLUS_CAT_KEYLEN     (sizeof(struct hfsplus_cat_key))
 
 /* Structs from hfs.h */
 struct hfsp_point {
@@ -323,7 +324,7 @@ struct hfsplus_ext_key {
        __be32 start_block;
 } __packed;
 
-#define HFSPLUS_EXT_KEYLEN 12
+#define HFSPLUS_EXT_KEYLEN     sizeof(struct hfsplus_ext_key)
 
 /* HFS+ generic BTree key */
 typedef union {
index c5a979d62c657a866685dac4743fa01515b1653a..78449280dae08471958a328afca7f225fe9d744f 100644 (file)
@@ -36,7 +36,7 @@ static int hfsplus_write_begin(struct file *file, struct address_space *mapping,
        *pagep = NULL;
        ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
                                hfsplus_get_block,
-                               &HFSPLUS_I(mapping->host).phys_size);
+                               &HFSPLUS_I(mapping->host)->phys_size);
        if (unlikely(ret)) {
                loff_t isize = mapping->host->i_size;
                if (pos + len > isize)
@@ -62,13 +62,13 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
 
        switch (inode->i_ino) {
        case HFSPLUS_EXT_CNID:
-               tree = HFSPLUS_SB(sb).ext_tree;
+               tree = HFSPLUS_SB(sb)->ext_tree;
                break;
        case HFSPLUS_CAT_CNID:
-               tree = HFSPLUS_SB(sb).cat_tree;
+               tree = HFSPLUS_SB(sb)->cat_tree;
                break;
        case HFSPLUS_ATTR_CNID:
-               tree = HFSPLUS_SB(sb).attr_tree;
+               tree = HFSPLUS_SB(sb)->attr_tree;
                break;
        default:
                BUG();
@@ -172,12 +172,13 @@ static struct dentry *hfsplus_file_lookup(struct inode *dir, struct dentry *dent
        struct hfs_find_data fd;
        struct super_block *sb = dir->i_sb;
        struct inode *inode = NULL;
+       struct hfsplus_inode_info *hip;
        int err;
 
        if (HFSPLUS_IS_RSRC(dir) || strcmp(dentry->d_name.name, "rsrc"))
                goto out;
 
-       inode = HFSPLUS_I(dir).rsrc_inode;
+       inode = HFSPLUS_I(dir)->rsrc_inode;
        if (inode)
                goto out;
 
@@ -185,12 +186,13 @@ static struct dentry *hfsplus_file_lookup(struct inode *dir, struct dentry *dent
        if (!inode)
                return ERR_PTR(-ENOMEM);
 
+       hip = HFSPLUS_I(inode);
        inode->i_ino = dir->i_ino;
-       INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list);
-       mutex_init(&HFSPLUS_I(inode).extents_lock);
-       HFSPLUS_I(inode).flags = HFSPLUS_FLG_RSRC;
+       INIT_LIST_HEAD(&hip->open_dir_list);
+       mutex_init(&hip->extents_lock);
+       hip->flags = HFSPLUS_FLG_RSRC;
 
-       hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
+       hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
        err = hfsplus_find_cat(sb, dir->i_ino, &fd);
        if (!err)
                err = hfsplus_cat_read_inode(inode, &fd);
@@ -199,10 +201,18 @@ static struct dentry *hfsplus_file_lookup(struct inode *dir, struct dentry *dent
                iput(inode);
                return ERR_PTR(err);
        }
-       HFSPLUS_I(inode).rsrc_inode = dir;
-       HFSPLUS_I(dir).rsrc_inode = inode;
+       hip->rsrc_inode = dir;
+       HFSPLUS_I(dir)->rsrc_inode = inode;
        igrab(dir);
-       hlist_add_head(&inode->i_hash, &HFSPLUS_SB(sb).rsrc_inodes);
+
+       /*
+        * __mark_inode_dirty expects inodes to be hashed.  Since we don't
+        * want resource fork inodes in the regular inode space, we make them
+        * appear hashed, but do not put on any lists.  hlist_del()
+        * will work fine and require no locking.
+        */
+       inode->i_hash.pprev = &inode->i_hash.next;
+
        mark_inode_dirty(inode);
 out:
        d_add(dentry, inode);
@@ -211,30 +221,27 @@ out:
 
 static void hfsplus_get_perms(struct inode *inode, struct hfsplus_perm *perms, int dir)
 {
-       struct super_block *sb = inode->i_sb;
+       struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
        u16 mode;
 
        mode = be16_to_cpu(perms->mode);
 
        inode->i_uid = be32_to_cpu(perms->owner);
        if (!inode->i_uid && !mode)
-               inode->i_uid = HFSPLUS_SB(sb).uid;
+               inode->i_uid = sbi->uid;
 
        inode->i_gid = be32_to_cpu(perms->group);
        if (!inode->i_gid && !mode)
-               inode->i_gid = HFSPLUS_SB(sb).gid;
+               inode->i_gid = sbi->gid;
 
        if (dir) {
-               mode = mode ? (mode & S_IALLUGO) :
-                       (S_IRWXUGO & ~(HFSPLUS_SB(sb).umask));
+               mode = mode ? (mode & S_IALLUGO) : (S_IRWXUGO & ~(sbi->umask));
                mode |= S_IFDIR;
        } else if (!mode)
-               mode = S_IFREG | ((S_IRUGO|S_IWUGO) &
-                       ~(HFSPLUS_SB(sb).umask));
+               mode = S_IFREG | ((S_IRUGO|S_IWUGO) & ~(sbi->umask));
        inode->i_mode = mode;
 
-       HFSPLUS_I(inode).rootflags = perms->rootflags;
-       HFSPLUS_I(inode).userflags = perms->userflags;
+       HFSPLUS_I(inode)->userflags = perms->userflags;
        if (perms->rootflags & HFSPLUS_FLG_IMMUTABLE)
                inode->i_flags |= S_IMMUTABLE;
        else
@@ -245,30 +252,13 @@ static void hfsplus_get_perms(struct inode *inode, struct hfsplus_perm *perms, i
                inode->i_flags &= ~S_APPEND;
 }
 
-static void hfsplus_set_perms(struct inode *inode, struct hfsplus_perm *perms)
-{
-       if (inode->i_flags & S_IMMUTABLE)
-               perms->rootflags |= HFSPLUS_FLG_IMMUTABLE;
-       else
-               perms->rootflags &= ~HFSPLUS_FLG_IMMUTABLE;
-       if (inode->i_flags & S_APPEND)
-               perms->rootflags |= HFSPLUS_FLG_APPEND;
-       else
-               perms->rootflags &= ~HFSPLUS_FLG_APPEND;
-       perms->userflags = HFSPLUS_I(inode).userflags;
-       perms->mode = cpu_to_be16(inode->i_mode);
-       perms->owner = cpu_to_be32(inode->i_uid);
-       perms->group = cpu_to_be32(inode->i_gid);
-       perms->dev = cpu_to_be32(HFSPLUS_I(inode).dev);
-}
-
 static int hfsplus_file_open(struct inode *inode, struct file *file)
 {
        if (HFSPLUS_IS_RSRC(inode))
-               inode = HFSPLUS_I(inode).rsrc_inode;
+               inode = HFSPLUS_I(inode)->rsrc_inode;
        if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
                return -EOVERFLOW;
-       atomic_inc(&HFSPLUS_I(inode).opencnt);
+       atomic_inc(&HFSPLUS_I(inode)->opencnt);
        return 0;
 }
 
@@ -277,12 +267,13 @@ static int hfsplus_file_release(struct inode *inode, struct file *file)
        struct super_block *sb = inode->i_sb;
 
        if (HFSPLUS_IS_RSRC(inode))
-               inode = HFSPLUS_I(inode).rsrc_inode;
-       if (atomic_dec_and_test(&HFSPLUS_I(inode).opencnt)) {
+               inode = HFSPLUS_I(inode)->rsrc_inode;
+       if (atomic_dec_and_test(&HFSPLUS_I(inode)->opencnt)) {
                mutex_lock(&inode->i_mutex);
                hfsplus_file_truncate(inode);
                if (inode->i_flags & S_DEAD) {
-                       hfsplus_delete_cat(inode->i_ino, HFSPLUS_SB(sb).hidden_dir, NULL);
+                       hfsplus_delete_cat(inode->i_ino,
+                                          HFSPLUS_SB(sb)->hidden_dir, NULL);
                        hfsplus_delete_inode(inode);
                }
                mutex_unlock(&inode->i_mutex);
@@ -361,47 +352,52 @@ static const struct file_operations hfsplus_file_operations = {
 
 struct inode *hfsplus_new_inode(struct super_block *sb, int mode)
 {
+       struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
        struct inode *inode = new_inode(sb);
+       struct hfsplus_inode_info *hip;
+
        if (!inode)
                return NULL;
 
-       inode->i_ino = HFSPLUS_SB(sb).next_cnid++;
+       inode->i_ino = sbi->next_cnid++;
        inode->i_mode = mode;
        inode->i_uid = current_fsuid();
        inode->i_gid = current_fsgid();
        inode->i_nlink = 1;
        inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
-       INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list);
-       mutex_init(&HFSPLUS_I(inode).extents_lock);
-       atomic_set(&HFSPLUS_I(inode).opencnt, 0);
-       HFSPLUS_I(inode).flags = 0;
-       memset(HFSPLUS_I(inode).first_extents, 0, sizeof(hfsplus_extent_rec));
-       memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec));
-       HFSPLUS_I(inode).alloc_blocks = 0;
-       HFSPLUS_I(inode).first_blocks = 0;
-       HFSPLUS_I(inode).cached_start = 0;
-       HFSPLUS_I(inode).cached_blocks = 0;
-       HFSPLUS_I(inode).phys_size = 0;
-       HFSPLUS_I(inode).fs_blocks = 0;
-       HFSPLUS_I(inode).rsrc_inode = NULL;
+
+       hip = HFSPLUS_I(inode);
+       INIT_LIST_HEAD(&hip->open_dir_list);
+       mutex_init(&hip->extents_lock);
+       atomic_set(&hip->opencnt, 0);
+       hip->flags = 0;
+       memset(hip->first_extents, 0, sizeof(hfsplus_extent_rec));
+       memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
+       hip->alloc_blocks = 0;
+       hip->first_blocks = 0;
+       hip->cached_start = 0;
+       hip->cached_blocks = 0;
+       hip->phys_size = 0;
+       hip->fs_blocks = 0;
+       hip->rsrc_inode = NULL;
        if (S_ISDIR(inode->i_mode)) {
                inode->i_size = 2;
-               HFSPLUS_SB(sb).folder_count++;
+               sbi->folder_count++;
                inode->i_op = &hfsplus_dir_inode_operations;
                inode->i_fop = &hfsplus_dir_operations;
        } else if (S_ISREG(inode->i_mode)) {
-               HFSPLUS_SB(sb).file_count++;
+               sbi->file_count++;
                inode->i_op = &hfsplus_file_inode_operations;
                inode->i_fop = &hfsplus_file_operations;
                inode->i_mapping->a_ops = &hfsplus_aops;
-               HFSPLUS_I(inode).clump_blocks = HFSPLUS_SB(sb).data_clump_blocks;
+               hip->clump_blocks = sbi->data_clump_blocks;
        } else if (S_ISLNK(inode->i_mode)) {
-               HFSPLUS_SB(sb).file_count++;
+               sbi->file_count++;
                inode->i_op = &page_symlink_inode_operations;
                inode->i_mapping->a_ops = &hfsplus_aops;
-               HFSPLUS_I(inode).clump_blocks = 1;
+               hip->clump_blocks = 1;
        } else
-               HFSPLUS_SB(sb).file_count++;
+               sbi->file_count++;
        insert_inode_hash(inode);
        mark_inode_dirty(inode);
        sb->s_dirt = 1;
@@ -414,11 +410,11 @@ void hfsplus_delete_inode(struct inode *inode)
        struct super_block *sb = inode->i_sb;
 
        if (S_ISDIR(inode->i_mode)) {
-               HFSPLUS_SB(sb).folder_count--;
+               HFSPLUS_SB(sb)->folder_count--;
                sb->s_dirt = 1;
                return;
        }
-       HFSPLUS_SB(sb).file_count--;
+       HFSPLUS_SB(sb)->file_count--;
        if (S_ISREG(inode->i_mode)) {
                if (!inode->i_nlink) {
                        inode->i_size = 0;
@@ -434,34 +430,39 @@ void hfsplus_delete_inode(struct inode *inode)
 void hfsplus_inode_read_fork(struct inode *inode, struct hfsplus_fork_raw *fork)
 {
        struct super_block *sb = inode->i_sb;
+       struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
+       struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
        u32 count;
        int i;
 
-       memcpy(&HFSPLUS_I(inode).first_extents, &fork->extents,
-              sizeof(hfsplus_extent_rec));
+       memcpy(&hip->first_extents, &fork->extents, sizeof(hfsplus_extent_rec));
        for (count = 0, i = 0; i < 8; i++)
                count += be32_to_cpu(fork->extents[i].block_count);
-       HFSPLUS_I(inode).first_blocks = count;
-       memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec));
-       HFSPLUS_I(inode).cached_start = 0;
-       HFSPLUS_I(inode).cached_blocks = 0;
-
-       HFSPLUS_I(inode).alloc_blocks = be32_to_cpu(fork->total_blocks);
-       inode->i_size = HFSPLUS_I(inode).phys_size = be64_to_cpu(fork->total_size);
-       HFSPLUS_I(inode).fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
-       inode_set_bytes(inode, HFSPLUS_I(inode).fs_blocks << sb->s_blocksize_bits);
-       HFSPLUS_I(inode).clump_blocks = be32_to_cpu(fork->clump_size) >> HFSPLUS_SB(sb).alloc_blksz_shift;
-       if (!HFSPLUS_I(inode).clump_blocks)
-               HFSPLUS_I(inode).clump_blocks = HFSPLUS_IS_RSRC(inode) ? HFSPLUS_SB(sb).rsrc_clump_blocks :
-                               HFSPLUS_SB(sb).data_clump_blocks;
+       hip->first_blocks = count;
+       memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
+       hip->cached_start = 0;
+       hip->cached_blocks = 0;
+
+       hip->alloc_blocks = be32_to_cpu(fork->total_blocks);
+       hip->phys_size = inode->i_size = be64_to_cpu(fork->total_size);
+       hip->fs_blocks =
+               (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
+       inode_set_bytes(inode, hip->fs_blocks << sb->s_blocksize_bits);
+       hip->clump_blocks =
+               be32_to_cpu(fork->clump_size) >> sbi->alloc_blksz_shift;
+       if (!hip->clump_blocks) {
+               hip->clump_blocks = HFSPLUS_IS_RSRC(inode) ?
+                       sbi->rsrc_clump_blocks :
+                       sbi->data_clump_blocks;
+       }
 }
 
 void hfsplus_inode_write_fork(struct inode *inode, struct hfsplus_fork_raw *fork)
 {
-       memcpy(&fork->extents, &HFSPLUS_I(inode).first_extents,
+       memcpy(&fork->extents, &HFSPLUS_I(inode)->first_extents,
               sizeof(hfsplus_extent_rec));
        fork->total_size = cpu_to_be64(inode->i_size);
-       fork->total_blocks = cpu_to_be32(HFSPLUS_I(inode).alloc_blocks);
+       fork->total_blocks = cpu_to_be32(HFSPLUS_I(inode)->alloc_blocks);
 }
 
 int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
@@ -472,7 +473,7 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
 
        type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
 
-       HFSPLUS_I(inode).dev = 0;
+       HFSPLUS_I(inode)->linkid = 0;
        if (type == HFSPLUS_FOLDER) {
                struct hfsplus_cat_folder *folder = &entry.folder;
 
@@ -486,8 +487,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
                inode->i_atime = hfsp_mt2ut(folder->access_date);
                inode->i_mtime = hfsp_mt2ut(folder->content_mod_date);
                inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date);
-               HFSPLUS_I(inode).create_date = folder->create_date;
-               HFSPLUS_I(inode).fs_blocks = 0;
+               HFSPLUS_I(inode)->create_date = folder->create_date;
+               HFSPLUS_I(inode)->fs_blocks = 0;
                inode->i_op = &hfsplus_dir_inode_operations;
                inode->i_fop = &hfsplus_dir_operations;
        } else if (type == HFSPLUS_FILE) {
@@ -518,7 +519,7 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
                inode->i_atime = hfsp_mt2ut(file->access_date);
                inode->i_mtime = hfsp_mt2ut(file->content_mod_date);
                inode->i_ctime = hfsp_mt2ut(file->attribute_mod_date);
-               HFSPLUS_I(inode).create_date = file->create_date;
+               HFSPLUS_I(inode)->create_date = file->create_date;
        } else {
                printk(KERN_ERR "hfs: bad catalog entry used to create inode\n");
                res = -EIO;
@@ -533,12 +534,12 @@ int hfsplus_cat_write_inode(struct inode *inode)
        hfsplus_cat_entry entry;
 
        if (HFSPLUS_IS_RSRC(inode))
-               main_inode = HFSPLUS_I(inode).rsrc_inode;
+               main_inode = HFSPLUS_I(inode)->rsrc_inode;
 
        if (!main_inode->i_nlink)
                return 0;
 
-       if (hfs_find_init(HFSPLUS_SB(main_inode->i_sb).cat_tree, &fd))
+       if (hfs_find_init(HFSPLUS_SB(main_inode->i_sb)->cat_tree, &fd))
                /* panic? */
                return -EIO;
 
@@ -554,7 +555,7 @@ int hfsplus_cat_write_inode(struct inode *inode)
                hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
                                        sizeof(struct hfsplus_cat_folder));
                /* simple node checks? */
-               hfsplus_set_perms(inode, &folder->permissions);
+               hfsplus_cat_set_perms(inode, &folder->permissions);
                folder->access_date = hfsp_ut2mt(inode->i_atime);
                folder->content_mod_date = hfsp_ut2mt(inode->i_mtime);
                folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
@@ -576,11 +577,7 @@ int hfsplus_cat_write_inode(struct inode *inode)
                hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
                                        sizeof(struct hfsplus_cat_file));
                hfsplus_inode_write_fork(inode, &file->data_fork);
-               if (S_ISREG(inode->i_mode))
-                       HFSPLUS_I(inode).dev = inode->i_nlink;
-               if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
-                       HFSPLUS_I(inode).dev = kdev_t_to_nr(inode->i_rdev);
-               hfsplus_set_perms(inode, &file->permissions);
+               hfsplus_cat_set_perms(inode, &file->permissions);
                if ((file->permissions.rootflags | file->permissions.userflags) & HFSPLUS_FLG_IMMUTABLE)
                        file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED);
                else
index ac405f09902651838979e322931ba7a1f1441633..5b4667e08ef7789e49c274758a28d11ef86d5fde 100644 (file)
 #include <linux/mount.h>
 #include <linux/sched.h>
 #include <linux/xattr.h>
-#include <linux/smp_lock.h>
 #include <asm/uaccess.h>
 #include "hfsplus_fs.h"
 
-long hfsplus_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+static int hfsplus_ioctl_getflags(struct file *file, int __user *user_flags)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file->f_path.dentry->d_inode;
+       struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
+       unsigned int flags = 0;
+
+       if (inode->i_flags & S_IMMUTABLE)
+               flags |= FS_IMMUTABLE_FL;
+       if (inode->i_flags |= S_APPEND)
+               flags |= FS_APPEND_FL;
+       if (hip->userflags & HFSPLUS_FLG_NODUMP)
+               flags |= FS_NODUMP_FL;
+
+       return put_user(flags, user_flags);
+}
+
+static int hfsplus_ioctl_setflags(struct file *file, int __user *user_flags)
+{
+       struct inode *inode = file->f_path.dentry->d_inode;
+       struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
        unsigned int flags;
+       int err = 0;
 
-       lock_kernel();
-       switch (cmd) {
-       case HFSPLUS_IOC_EXT2_GETFLAGS:
-               flags = 0;
-               if (HFSPLUS_I(inode).rootflags & HFSPLUS_FLG_IMMUTABLE)
-                       flags |= FS_IMMUTABLE_FL; /* EXT2_IMMUTABLE_FL */
-               if (HFSPLUS_I(inode).rootflags & HFSPLUS_FLG_APPEND)
-                       flags |= FS_APPEND_FL; /* EXT2_APPEND_FL */
-               if (HFSPLUS_I(inode).userflags & HFSPLUS_FLG_NODUMP)
-                       flags |= FS_NODUMP_FL; /* EXT2_NODUMP_FL */
-               return put_user(flags, (int __user *)arg);
-       case HFSPLUS_IOC_EXT2_SETFLAGS: {
-               int err = 0;
-               err = mnt_want_write(filp->f_path.mnt);
-               if (err) {
-                       unlock_kernel();
-                       return err;
-               }
+       err = mnt_want_write(file->f_path.mnt);
+       if (err)
+               goto out;
 
-               if (!is_owner_or_cap(inode)) {
-                       err = -EACCES;
-                       goto setflags_out;
-               }
-               if (get_user(flags, (int __user *)arg)) {
-                       err = -EFAULT;
-                       goto setflags_out;
-               }
-               if (flags & (FS_IMMUTABLE_FL|FS_APPEND_FL) ||
-                   HFSPLUS_I(inode).rootflags & (HFSPLUS_FLG_IMMUTABLE|HFSPLUS_FLG_APPEND)) {
-                       if (!capable(CAP_LINUX_IMMUTABLE)) {
-                               err = -EPERM;
-                               goto setflags_out;
-                       }
-               }
+       if (!is_owner_or_cap(inode)) {
+               err = -EACCES;
+               goto out_drop_write;
+       }
 
-               /* don't silently ignore unsupported ext2 flags */
-               if (flags & ~(FS_IMMUTABLE_FL|FS_APPEND_FL|FS_NODUMP_FL)) {
-                       err = -EOPNOTSUPP;
-                       goto setflags_out;
-               }
-               if (flags & FS_IMMUTABLE_FL) { /* EXT2_IMMUTABLE_FL */
-                       inode->i_flags |= S_IMMUTABLE;
-                       HFSPLUS_I(inode).rootflags |= HFSPLUS_FLG_IMMUTABLE;
-               } else {
-                       inode->i_flags &= ~S_IMMUTABLE;
-                       HFSPLUS_I(inode).rootflags &= ~HFSPLUS_FLG_IMMUTABLE;
-               }
-               if (flags & FS_APPEND_FL) { /* EXT2_APPEND_FL */
-                       inode->i_flags |= S_APPEND;
-                       HFSPLUS_I(inode).rootflags |= HFSPLUS_FLG_APPEND;
-               } else {
-                       inode->i_flags &= ~S_APPEND;
-                       HFSPLUS_I(inode).rootflags &= ~HFSPLUS_FLG_APPEND;
+       if (get_user(flags, user_flags)) {
+               err = -EFAULT;
+               goto out_drop_write;
+       }
+
+       mutex_lock(&inode->i_mutex);
+
+       if ((flags & (FS_IMMUTABLE_FL|FS_APPEND_FL)) ||
+           inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
+               if (!capable(CAP_LINUX_IMMUTABLE)) {
+                       err = -EPERM;
+                       goto out_unlock_inode;
                }
-               if (flags & FS_NODUMP_FL) /* EXT2_NODUMP_FL */
-                       HFSPLUS_I(inode).userflags |= HFSPLUS_FLG_NODUMP;
-               else
-                       HFSPLUS_I(inode).userflags &= ~HFSPLUS_FLG_NODUMP;
-
-               inode->i_ctime = CURRENT_TIME_SEC;
-               mark_inode_dirty(inode);
-setflags_out:
-               mnt_drop_write(filp->f_path.mnt);
-               unlock_kernel();
-               return err;
        }
+
+       /* don't silently ignore unsupported ext2 flags */
+       if (flags & ~(FS_IMMUTABLE_FL|FS_APPEND_FL|FS_NODUMP_FL)) {
+               err = -EOPNOTSUPP;
+               goto out_unlock_inode;
+       }
+
+       if (flags & FS_IMMUTABLE_FL)
+               inode->i_flags |= S_IMMUTABLE;
+       else
+               inode->i_flags &= ~S_IMMUTABLE;
+
+       if (flags & FS_APPEND_FL)
+               inode->i_flags |= S_APPEND;
+       else
+               inode->i_flags &= ~S_APPEND;
+
+       if (flags & FS_NODUMP_FL)
+               hip->userflags |= HFSPLUS_FLG_NODUMP;
+       else
+               hip->userflags &= ~HFSPLUS_FLG_NODUMP;
+
+       inode->i_ctime = CURRENT_TIME_SEC;
+       mark_inode_dirty(inode);
+
+out_unlock_inode:
+       mutex_lock(&inode->i_mutex);
+out_drop_write:
+       mnt_drop_write(file->f_path.mnt);
+out:
+       return err;
+}
+
+long hfsplus_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       void __user *argp = (void __user *)arg;
+
+       switch (cmd) {
+       case HFSPLUS_IOC_EXT2_GETFLAGS:
+               return hfsplus_ioctl_getflags(file, argp);
+       case HFSPLUS_IOC_EXT2_SETFLAGS:
+               return hfsplus_ioctl_setflags(file, argp);
        default:
-               unlock_kernel();
                return -ENOTTY;
        }
 }
@@ -110,7 +125,7 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name,
        if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
                return -EOPNOTSUPP;
 
-       res = hfs_find_init(HFSPLUS_SB(inode->i_sb).cat_tree, &fd);
+       res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
        if (res)
                return res;
        res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
@@ -153,7 +168,7 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
                return -EOPNOTSUPP;
 
        if (size) {
-               res = hfs_find_init(HFSPLUS_SB(inode->i_sb).cat_tree, &fd);
+               res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
                if (res)
                        return res;
                res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
@@ -177,7 +192,7 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
                } else
                        res = size ? -ERANGE : 4;
        } else
-               res = -ENODATA;
+               res = -EOPNOTSUPP;
 out:
        if (size)
                hfs_find_exit(&fd);
index 572628b4b07d23af08f98ca7b757c85acfbcbc0c..f9ab276a4d8de9e15d2acf49a358da1a9ff10fe6 100644 (file)
@@ -143,13 +143,13 @@ int hfsplus_parse_options(char *input, struct hfsplus_sb_info *sbi)
                        kfree(p);
                        break;
                case opt_decompose:
-                       sbi->flags &= ~HFSPLUS_SB_NODECOMPOSE;
+                       clear_bit(HFSPLUS_SB_NODECOMPOSE, &sbi->flags);
                        break;
                case opt_nodecompose:
-                       sbi->flags |= HFSPLUS_SB_NODECOMPOSE;
+                       set_bit(HFSPLUS_SB_NODECOMPOSE, &sbi->flags);
                        break;
                case opt_force:
-                       sbi->flags |= HFSPLUS_SB_FORCE;
+                       set_bit(HFSPLUS_SB_FORCE, &sbi->flags);
                        break;
                default:
                        return 0;
@@ -171,7 +171,7 @@ done:
 
 int hfsplus_show_options(struct seq_file *seq, struct vfsmount *mnt)
 {
-       struct hfsplus_sb_info *sbi = &HFSPLUS_SB(mnt->mnt_sb);
+       struct hfsplus_sb_info *sbi = HFSPLUS_SB(mnt->mnt_sb);
 
        if (sbi->creator != HFSPLUS_DEF_CR_TYPE)
                seq_printf(seq, ",creator=%.4s", (char *)&sbi->creator);
@@ -184,7 +184,7 @@ int hfsplus_show_options(struct seq_file *seq, struct vfsmount *mnt)
                seq_printf(seq, ",session=%u", sbi->session);
        if (sbi->nls)
                seq_printf(seq, ",nls=%s", sbi->nls->charset);
-       if (sbi->flags & HFSPLUS_SB_NODECOMPOSE)
+       if (test_bit(HFSPLUS_SB_NODECOMPOSE, &sbi->flags))
                seq_printf(seq, ",nodecompose");
        return 0;
 }
index 1528a6fd02992f1858ee254fe01039520da77bd4..208b16c645cc234c6f5ce1b15fbd5b49ba802ee8 100644 (file)
@@ -74,6 +74,7 @@ struct old_pmap {
 int hfs_part_find(struct super_block *sb,
                  sector_t *part_start, sector_t *part_size)
 {
+       struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
        struct buffer_head *bh;
        __be16 *data;
        int i, size, res;
@@ -95,7 +96,7 @@ int hfs_part_find(struct super_block *sb,
                for (i = 0; i < size; p++, i++) {
                        if (p->pdStart && p->pdSize &&
                            p->pdFSID == cpu_to_be32(0x54465331)/*"TFS1"*/ &&
-                           (HFSPLUS_SB(sb).part < 0 || HFSPLUS_SB(sb).part == i)) {
+                           (sbi->part < 0 || sbi->part == i)) {
                                *part_start += be32_to_cpu(p->pdStart);
                                *part_size = be32_to_cpu(p->pdSize);
                                res = 0;
@@ -111,7 +112,7 @@ int hfs_part_find(struct super_block *sb,
                size = be32_to_cpu(pm->pmMapBlkCnt);
                for (i = 0; i < size;) {
                        if (!memcmp(pm->pmPartType,"Apple_HFS", 9) &&
-                           (HFSPLUS_SB(sb).part < 0 || HFSPLUS_SB(sb).part == i)) {
+                           (sbi->part < 0 || sbi->part == i)) {
                                *part_start += be32_to_cpu(pm->pmPyPartStart);
                                *part_size = be32_to_cpu(pm->pmPartBlkCnt);
                                res = 0;
index 3b55c050c74274710fa95cad827edf6abd6b8316..9a88d7536103e2c1c70f3824b42d367d3eac848f 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/pagemap.h>
 #include <linux/fs.h>
 #include <linux/slab.h>
-#include <linux/smp_lock.h>
 #include <linux/vfs.h>
 #include <linux/nls.h>
 
@@ -21,40 +20,11 @@ static void hfsplus_destroy_inode(struct inode *inode);
 
 #include "hfsplus_fs.h"
 
-struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino)
+static int hfsplus_system_read_inode(struct inode *inode)
 {
-       struct hfs_find_data fd;
-       struct hfsplus_vh *vhdr;
-       struct inode *inode;
-       long err = -EIO;
-
-       inode = iget_locked(sb, ino);
-       if (!inode)
-               return ERR_PTR(-ENOMEM);
-       if (!(inode->i_state & I_NEW))
-               return inode;
+       struct hfsplus_vh *vhdr = HFSPLUS_SB(inode->i_sb)->s_vhdr;
 
-       INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list);
-       mutex_init(&HFSPLUS_I(inode).extents_lock);
-       HFSPLUS_I(inode).flags = 0;
-       HFSPLUS_I(inode).rsrc_inode = NULL;
-       atomic_set(&HFSPLUS_I(inode).opencnt, 0);
-
-       if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID) {
-       read_inode:
-               hfs_find_init(HFSPLUS_SB(inode->i_sb).cat_tree, &fd);
-               err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
-               if (!err)
-                       err = hfsplus_cat_read_inode(inode, &fd);
-               hfs_find_exit(&fd);
-               if (err)
-                       goto bad_inode;
-               goto done;
-       }
-       vhdr = HFSPLUS_SB(inode->i_sb).s_vhdr;
-       switch(inode->i_ino) {
-       case HFSPLUS_ROOT_CNID:
-               goto read_inode;
+       switch (inode->i_ino) {
        case HFSPLUS_EXT_CNID:
                hfsplus_inode_read_fork(inode, &vhdr->ext_file);
                inode->i_mapping->a_ops = &hfsplus_btree_aops;
@@ -75,74 +45,101 @@ struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino)
                inode->i_mapping->a_ops = &hfsplus_btree_aops;
                break;
        default:
-               goto bad_inode;
+               return -EIO;
+       }
+
+       return 0;
+}
+
+struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino)
+{
+       struct hfs_find_data fd;
+       struct inode *inode;
+       int err;
+
+       inode = iget_locked(sb, ino);
+       if (!inode)
+               return ERR_PTR(-ENOMEM);
+       if (!(inode->i_state & I_NEW))
+               return inode;
+
+       INIT_LIST_HEAD(&HFSPLUS_I(inode)->open_dir_list);
+       mutex_init(&HFSPLUS_I(inode)->extents_lock);
+       HFSPLUS_I(inode)->flags = 0;
+       HFSPLUS_I(inode)->rsrc_inode = NULL;
+       atomic_set(&HFSPLUS_I(inode)->opencnt, 0);
+
+       if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID ||
+           inode->i_ino == HFSPLUS_ROOT_CNID) {
+               hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
+               err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
+               if (!err)
+                       err = hfsplus_cat_read_inode(inode, &fd);
+               hfs_find_exit(&fd);
+       } else {
+               err = hfsplus_system_read_inode(inode);
+       }
+
+       if (err) {
+               iget_failed(inode);
+               return ERR_PTR(err);
        }
 
-done:
        unlock_new_inode(inode);
        return inode;
-
-bad_inode:
-       iget_failed(inode);
-       return ERR_PTR(err);
 }
 
-static int hfsplus_write_inode(struct inode *inode,
-               struct writeback_control *wbc)
+static int hfsplus_system_write_inode(struct inode *inode)
 {
-       struct hfsplus_vh *vhdr;
-       int ret = 0;
+       struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
+       struct hfsplus_vh *vhdr = sbi->s_vhdr;
+       struct hfsplus_fork_raw *fork;
+       struct hfs_btree *tree = NULL;
 
-       dprint(DBG_INODE, "hfsplus_write_inode: %lu\n", inode->i_ino);
-       hfsplus_ext_write_extent(inode);
-       if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID) {
-               return hfsplus_cat_write_inode(inode);
-       }
-       vhdr = HFSPLUS_SB(inode->i_sb).s_vhdr;
        switch (inode->i_ino) {
-       case HFSPLUS_ROOT_CNID:
-               ret = hfsplus_cat_write_inode(inode);
-               break;
        case HFSPLUS_EXT_CNID:
-               if (vhdr->ext_file.total_size != cpu_to_be64(inode->i_size)) {
-                       HFSPLUS_SB(inode->i_sb).flags |= HFSPLUS_SB_WRITEBACKUP;
-                       inode->i_sb->s_dirt = 1;
-               }
-               hfsplus_inode_write_fork(inode, &vhdr->ext_file);
-               hfs_btree_write(HFSPLUS_SB(inode->i_sb).ext_tree);
+               fork = &vhdr->ext_file;
+               tree = sbi->ext_tree;
                break;
        case HFSPLUS_CAT_CNID:
-               if (vhdr->cat_file.total_size != cpu_to_be64(inode->i_size)) {
-                       HFSPLUS_SB(inode->i_sb).flags |= HFSPLUS_SB_WRITEBACKUP;
-                       inode->i_sb->s_dirt = 1;
-               }
-               hfsplus_inode_write_fork(inode, &vhdr->cat_file);
-               hfs_btree_write(HFSPLUS_SB(inode->i_sb).cat_tree);
+               fork = &vhdr->cat_file;
+               tree = sbi->cat_tree;
                break;
        case HFSPLUS_ALLOC_CNID:
-               if (vhdr->alloc_file.total_size != cpu_to_be64(inode->i_size)) {
-                       HFSPLUS_SB(inode->i_sb).flags |= HFSPLUS_SB_WRITEBACKUP;
-                       inode->i_sb->s_dirt = 1;
-               }
-               hfsplus_inode_write_fork(inode, &vhdr->alloc_file);
+               fork = &vhdr->alloc_file;
                break;
        case HFSPLUS_START_CNID:
-               if (vhdr->start_file.total_size != cpu_to_be64(inode->i_size)) {
-                       HFSPLUS_SB(inode->i_sb).flags |= HFSPLUS_SB_WRITEBACKUP;
-                       inode->i_sb->s_dirt = 1;
-               }
-               hfsplus_inode_write_fork(inode, &vhdr->start_file);
+               fork = &vhdr->start_file;
                break;
        case HFSPLUS_ATTR_CNID:
-               if (vhdr->attr_file.total_size != cpu_to_be64(inode->i_size)) {
-                       HFSPLUS_SB(inode->i_sb).flags |= HFSPLUS_SB_WRITEBACKUP;
-                       inode->i_sb->s_dirt = 1;
-               }
-               hfsplus_inode_write_fork(inode, &vhdr->attr_file);
-               hfs_btree_write(HFSPLUS_SB(inode->i_sb).attr_tree);
-               break;
+               fork = &vhdr->attr_file;
+               tree = sbi->attr_tree;
+       default:
+               return -EIO;
+       }
+
+       if (fork->total_size != cpu_to_be64(inode->i_size)) {
+               set_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags);
+               inode->i_sb->s_dirt = 1;
        }
-       return ret;
+       hfsplus_inode_write_fork(inode, fork);
+       if (tree)
+               hfs_btree_write(tree);
+       return 0;
+}
+
+static int hfsplus_write_inode(struct inode *inode,
+               struct writeback_control *wbc)
+{
+       dprint(DBG_INODE, "hfsplus_write_inode: %lu\n", inode->i_ino);
+
+       hfsplus_ext_write_extent(inode);
+
+       if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID ||
+           inode->i_ino == HFSPLUS_ROOT_CNID)
+               return hfsplus_cat_write_inode(inode);
+       else
+               return hfsplus_system_write_inode(inode);
 }
 
 static void hfsplus_evict_inode(struct inode *inode)
@@ -151,51 +148,53 @@ static void hfsplus_evict_inode(struct inode *inode)
        truncate_inode_pages(&inode->i_data, 0);
        end_writeback(inode);
        if (HFSPLUS_IS_RSRC(inode)) {
-               HFSPLUS_I(HFSPLUS_I(inode).rsrc_inode).rsrc_inode = NULL;
-               iput(HFSPLUS_I(inode).rsrc_inode);
+               HFSPLUS_I(HFSPLUS_I(inode)->rsrc_inode)->rsrc_inode = NULL;
+               iput(HFSPLUS_I(inode)->rsrc_inode);
        }
 }
 
 int hfsplus_sync_fs(struct super_block *sb, int wait)
 {
-       struct hfsplus_vh *vhdr = HFSPLUS_SB(sb).s_vhdr;
+       struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
+       struct hfsplus_vh *vhdr = sbi->s_vhdr;
 
        dprint(DBG_SUPER, "hfsplus_write_super\n");
 
-       lock_super(sb);
+       mutex_lock(&sbi->vh_mutex);
+       mutex_lock(&sbi->alloc_mutex);
        sb->s_dirt = 0;
 
-       vhdr->free_blocks = cpu_to_be32(HFSPLUS_SB(sb).free_blocks);
-       vhdr->next_alloc = cpu_to_be32(HFSPLUS_SB(sb).next_alloc);
-       vhdr->next_cnid = cpu_to_be32(HFSPLUS_SB(sb).next_cnid);
-       vhdr->folder_count = cpu_to_be32(HFSPLUS_SB(sb).folder_count);
-       vhdr->file_count = cpu_to_be32(HFSPLUS_SB(sb).file_count);
+       vhdr->free_blocks = cpu_to_be32(sbi->free_blocks);
+       vhdr->next_cnid = cpu_to_be32(sbi->next_cnid);
+       vhdr->folder_count = cpu_to_be32(sbi->folder_count);
+       vhdr->file_count = cpu_to_be32(sbi->file_count);
 
-       mark_buffer_dirty(HFSPLUS_SB(sb).s_vhbh);
-       if (HFSPLUS_SB(sb).flags & HFSPLUS_SB_WRITEBACKUP) {
-               if (HFSPLUS_SB(sb).sect_count) {
+       mark_buffer_dirty(sbi->s_vhbh);
+       if (test_and_clear_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags)) {
+               if (sbi->sect_count) {
                        struct buffer_head *bh;
                        u32 block, offset;
 
-                       block = HFSPLUS_SB(sb).blockoffset;
-                       block += (HFSPLUS_SB(sb).sect_count - 2) >> (sb->s_blocksize_bits - 9);
-                       offset = ((HFSPLUS_SB(sb).sect_count - 2) << 9) & (sb->s_blocksize - 1);
-                       printk(KERN_DEBUG "hfs: backup: %u,%u,%u,%u\n", HFSPLUS_SB(sb).blockoffset,
-                               HFSPLUS_SB(sb).sect_count, block, offset);
+                       block = sbi->blockoffset;
+                       block += (sbi->sect_count - 2) >> (sb->s_blocksize_bits - 9);
+                       offset = ((sbi->sect_count - 2) << 9) & (sb->s_blocksize - 1);
+                       printk(KERN_DEBUG "hfs: backup: %u,%u,%u,%u\n",
+                                         sbi->blockoffset, sbi->sect_count,
+                                         block, offset);
                        bh = sb_bread(sb, block);
                        if (bh) {
                                vhdr = (struct hfsplus_vh *)(bh->b_data + offset);
                                if (be16_to_cpu(vhdr->signature) == HFSPLUS_VOLHEAD_SIG) {
-                                       memcpy(vhdr, HFSPLUS_SB(sb).s_vhdr, sizeof(*vhdr));
+                                       memcpy(vhdr, sbi->s_vhdr, sizeof(*vhdr));
                                        mark_buffer_dirty(bh);
                                        brelse(bh);
                                } else
                                        printk(KERN_WARNING "hfs: backup not found!\n");
                        }
                }
-               HFSPLUS_SB(sb).flags &= ~HFSPLUS_SB_WRITEBACKUP;
        }
-       unlock_super(sb);
+       mutex_unlock(&sbi->alloc_mutex);
+       mutex_unlock(&sbi->vh_mutex);
        return 0;
 }
 
@@ -209,48 +208,48 @@ static void hfsplus_write_super(struct super_block *sb)
 
 static void hfsplus_put_super(struct super_block *sb)
 {
+       struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
+
        dprint(DBG_SUPER, "hfsplus_put_super\n");
+
        if (!sb->s_fs_info)
                return;
 
-       lock_kernel();
-
        if (sb->s_dirt)
                hfsplus_write_super(sb);
-       if (!(sb->s_flags & MS_RDONLY) && HFSPLUS_SB(sb).s_vhdr) {
-               struct hfsplus_vh *vhdr = HFSPLUS_SB(sb).s_vhdr;
+       if (!(sb->s_flags & MS_RDONLY) && sbi->s_vhdr) {
+               struct hfsplus_vh *vhdr = sbi->s_vhdr;
 
                vhdr->modify_date = hfsp_now2mt();
                vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_UNMNT);
                vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_INCNSTNT);
-               mark_buffer_dirty(HFSPLUS_SB(sb).s_vhbh);
-               sync_dirty_buffer(HFSPLUS_SB(sb).s_vhbh);
+               mark_buffer_dirty(sbi->s_vhbh);
+               sync_dirty_buffer(sbi->s_vhbh);
        }
 
-       hfs_btree_close(HFSPLUS_SB(sb).cat_tree);
-       hfs_btree_close(HFSPLUS_SB(sb).ext_tree);
-       iput(HFSPLUS_SB(sb).alloc_file);
-       iput(HFSPLUS_SB(sb).hidden_dir);
-       brelse(HFSPLUS_SB(sb).s_vhbh);
-       unload_nls(HFSPLUS_SB(sb).nls);
+       hfs_btree_close(sbi->cat_tree);
+       hfs_btree_close(sbi->ext_tree);
+       iput(sbi->alloc_file);
+       iput(sbi->hidden_dir);
+       brelse(sbi->s_vhbh);
+       unload_nls(sbi->nls);
        kfree(sb->s_fs_info);
        sb->s_fs_info = NULL;
-
-       unlock_kernel();
 }
 
 static int hfsplus_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
        struct super_block *sb = dentry->d_sb;
+       struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
        u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
 
        buf->f_type = HFSPLUS_SUPER_MAGIC;
        buf->f_bsize = sb->s_blocksize;
-       buf->f_blocks = HFSPLUS_SB(sb).total_blocks << HFSPLUS_SB(sb).fs_shift;
-       buf->f_bfree = HFSPLUS_SB(sb).free_blocks << HFSPLUS_SB(sb).fs_shift;
+       buf->f_blocks = sbi->total_blocks << sbi->fs_shift;
+       buf->f_bfree = sbi->free_blocks << sbi->fs_shift;
        buf->f_bavail = buf->f_bfree;
        buf->f_files = 0xFFFFFFFF;
-       buf->f_ffree = 0xFFFFFFFF - HFSPLUS_SB(sb).next_cnid;
+       buf->f_ffree = 0xFFFFFFFF - sbi->next_cnid;
        buf->f_fsid.val[0] = (u32)id;
        buf->f_fsid.val[1] = (u32)(id >> 32);
        buf->f_namelen = HFSPLUS_MAX_STRLEN;
@@ -263,11 +262,11 @@ static int hfsplus_remount(struct super_block *sb, int *flags, char *data)
        if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
                return 0;
        if (!(*flags & MS_RDONLY)) {
-               struct hfsplus_vh *vhdr = HFSPLUS_SB(sb).s_vhdr;
+               struct hfsplus_vh *vhdr = HFSPLUS_SB(sb)->s_vhdr;
                struct hfsplus_sb_info sbi;
 
                memset(&sbi, 0, sizeof(struct hfsplus_sb_info));
-               sbi.nls = HFSPLUS_SB(sb).nls;
+               sbi.nls = HFSPLUS_SB(sb)->nls;
                if (!hfsplus_parse_options(data, &sbi))
                        return -EINVAL;
 
@@ -276,7 +275,7 @@ static int hfsplus_remount(struct super_block *sb, int *flags, char *data)
                               "running fsck.hfsplus is recommended.  leaving read-only.\n");
                        sb->s_flags |= MS_RDONLY;
                        *flags |= MS_RDONLY;
-               } else if (sbi.flags & HFSPLUS_SB_FORCE) {
+               } else if (test_bit(HFSPLUS_SB_FORCE, &sbi.flags)) {
                        /* nothing */
                } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
                        printk(KERN_WARNING "hfs: filesystem is marked locked, leaving read-only.\n");
@@ -320,7 +319,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
                return -ENOMEM;
 
        sb->s_fs_info = sbi;
-       INIT_HLIST_HEAD(&sbi->rsrc_inodes);
+       mutex_init(&sbi->alloc_mutex);
+       mutex_init(&sbi->vh_mutex);
        hfsplus_fill_defaults(sbi);
        if (!hfsplus_parse_options(data, sbi)) {
                printk(KERN_ERR "hfs: unable to parse mount options\n");
@@ -344,7 +344,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
                err = -EINVAL;
                goto cleanup;
        }
-       vhdr = HFSPLUS_SB(sb).s_vhdr;
+       vhdr = sbi->s_vhdr;
 
        /* Copy parts of the volume header into the superblock */
        sb->s_magic = HFSPLUS_VOLHEAD_SIG;
@@ -353,18 +353,19 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
                printk(KERN_ERR "hfs: wrong filesystem version\n");
                goto cleanup;
        }
-       HFSPLUS_SB(sb).total_blocks = be32_to_cpu(vhdr->total_blocks);
-       HFSPLUS_SB(sb).free_blocks = be32_to_cpu(vhdr->free_blocks);
-       HFSPLUS_SB(sb).next_alloc = be32_to_cpu(vhdr->next_alloc);
-       HFSPLUS_SB(sb).next_cnid = be32_to_cpu(vhdr->next_cnid);
-       HFSPLUS_SB(sb).file_count = be32_to_cpu(vhdr->file_count);
-       HFSPLUS_SB(sb).folder_count = be32_to_cpu(vhdr->folder_count);
-       HFSPLUS_SB(sb).data_clump_blocks = be32_to_cpu(vhdr->data_clump_sz) >> HFSPLUS_SB(sb).alloc_blksz_shift;
-       if (!HFSPLUS_SB(sb).data_clump_blocks)
-               HFSPLUS_SB(sb).data_clump_blocks = 1;
-       HFSPLUS_SB(sb).rsrc_clump_blocks = be32_to_cpu(vhdr->rsrc_clump_sz) >> HFSPLUS_SB(sb).alloc_blksz_shift;
-       if (!HFSPLUS_SB(sb).rsrc_clump_blocks)
-               HFSPLUS_SB(sb).rsrc_clump_blocks = 1;
+       sbi->total_blocks = be32_to_cpu(vhdr->total_blocks);
+       sbi->free_blocks = be32_to_cpu(vhdr->free_blocks);
+       sbi->next_cnid = be32_to_cpu(vhdr->next_cnid);
+       sbi->file_count = be32_to_cpu(vhdr->file_count);
+       sbi->folder_count = be32_to_cpu(vhdr->folder_count);
+       sbi->data_clump_blocks =
+               be32_to_cpu(vhdr->data_clump_sz) >> sbi->alloc_blksz_shift;
+       if (!sbi->data_clump_blocks)
+               sbi->data_clump_blocks = 1;
+       sbi->rsrc_clump_blocks =
+               be32_to_cpu(vhdr->rsrc_clump_sz) >> sbi->alloc_blksz_shift;
+       if (!sbi->rsrc_clump_blocks)
+               sbi->rsrc_clump_blocks = 1;
 
        /* Set up operations so we can load metadata */
        sb->s_op = &hfsplus_sops;
@@ -374,7 +375,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
                printk(KERN_WARNING "hfs: Filesystem was not cleanly unmounted, "
                       "running fsck.hfsplus is recommended.  mounting read-only.\n");
                sb->s_flags |= MS_RDONLY;
-       } else if (sbi->flags & HFSPLUS_SB_FORCE) {
+       } else if (test_and_clear_bit(HFSPLUS_SB_FORCE, &sbi->flags)) {
                /* nothing */
        } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
                printk(KERN_WARNING "hfs: Filesystem is marked locked, mounting read-only.\n");
@@ -384,16 +385,15 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
                       "use the force option at your own risk, mounting read-only.\n");
                sb->s_flags |= MS_RDONLY;
        }
-       sbi->flags &= ~HFSPLUS_SB_FORCE;
 
        /* Load metadata objects (B*Trees) */
-       HFSPLUS_SB(sb).ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID);
-       if (!HFSPLUS_SB(sb).ext_tree) {
+       sbi->ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID);
+       if (!sbi->ext_tree) {
                printk(KERN_ERR "hfs: failed to load extents file\n");
                goto cleanup;
        }
-       HFSPLUS_SB(sb).cat_tree = hfs_btree_open(sb, HFSPLUS_CAT_CNID);
-       if (!HFSPLUS_SB(sb).cat_tree) {
+       sbi->cat_tree = hfs_btree_open(sb, HFSPLUS_CAT_CNID);
+       if (!sbi->cat_tree) {
                printk(KERN_ERR "hfs: failed to load catalog file\n");
                goto cleanup;
        }
@@ -404,7 +404,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
                err = PTR_ERR(inode);
                goto cleanup;
        }
-       HFSPLUS_SB(sb).alloc_file = inode;
+       sbi->alloc_file = inode;
 
        /* Load the root directory */
        root = hfsplus_iget(sb, HFSPLUS_ROOT_CNID);
@@ -423,7 +423,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
 
        str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1;
        str.name = HFSP_HIDDENDIR_NAME;
-       hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
+       hfs_find_init(sbi->cat_tree, &fd);
        hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str);
        if (!hfs_brec_read(&fd, &entry, sizeof(entry))) {
                hfs_find_exit(&fd);
@@ -434,7 +434,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
                        err = PTR_ERR(inode);
                        goto cleanup;
                }
-               HFSPLUS_SB(sb).hidden_dir = inode;
+               sbi->hidden_dir = inode;
        } else
                hfs_find_exit(&fd);
 
@@ -449,15 +449,19 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
        be32_add_cpu(&vhdr->write_count, 1);
        vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT);
        vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT);
-       mark_buffer_dirty(HFSPLUS_SB(sb).s_vhbh);
-       sync_dirty_buffer(HFSPLUS_SB(sb).s_vhbh);
+       mark_buffer_dirty(sbi->s_vhbh);
+       sync_dirty_buffer(sbi->s_vhbh);
 
-       if (!HFSPLUS_SB(sb).hidden_dir) {
+       if (!sbi->hidden_dir) {
                printk(KERN_DEBUG "hfs: create hidden dir...\n");
-               HFSPLUS_SB(sb).hidden_dir = hfsplus_new_inode(sb, S_IFDIR);
-               hfsplus_create_cat(HFSPLUS_SB(sb).hidden_dir->i_ino, sb->s_root->d_inode,
-                                  &str, HFSPLUS_SB(sb).hidden_dir);
-               mark_inode_dirty(HFSPLUS_SB(sb).hidden_dir);
+
+               mutex_lock(&sbi->vh_mutex);
+               sbi->hidden_dir = hfsplus_new_inode(sb, S_IFDIR);
+               hfsplus_create_cat(sbi->hidden_dir->i_ino, sb->s_root->d_inode,
+                                  &str, sbi->hidden_dir);
+               mutex_unlock(&sbi->vh_mutex);
+
+               mark_inode_dirty(sbi->hidden_dir);
        }
 out:
        unload_nls(sbi->nls);
@@ -486,7 +490,7 @@ static struct inode *hfsplus_alloc_inode(struct super_block *sb)
 
 static void hfsplus_destroy_inode(struct inode *inode)
 {
-       kmem_cache_free(hfsplus_inode_cachep, &HFSPLUS_I(inode));
+       kmem_cache_free(hfsplus_inode_cachep, HFSPLUS_I(inode));
 }
 
 #define HFSPLUS_INODE_SIZE     sizeof(struct hfsplus_inode_info)
index 628ccf6fa402500aa15d7d53969b0f62b6ea5188..b66d67de882c3d098d661f54cbc2b19983bab32b 100644 (file)
@@ -121,7 +121,7 @@ static u16 *hfsplus_compose_lookup(u16 *p, u16 cc)
 int hfsplus_uni2asc(struct super_block *sb, const struct hfsplus_unistr *ustr, char *astr, int *len_p)
 {
        const hfsplus_unichr *ip;
-       struct nls_table *nls = HFSPLUS_SB(sb).nls;
+       struct nls_table *nls = HFSPLUS_SB(sb)->nls;
        u8 *op;
        u16 cc, c0, c1;
        u16 *ce1, *ce2;
@@ -132,7 +132,7 @@ int hfsplus_uni2asc(struct super_block *sb, const struct hfsplus_unistr *ustr, c
        ustrlen = be16_to_cpu(ustr->length);
        len = *len_p;
        ce1 = NULL;
-       compose = !(HFSPLUS_SB(sb).flags & HFSPLUS_SB_NODECOMPOSE);
+       compose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags);
 
        while (ustrlen > 0) {
                c0 = be16_to_cpu(*ip++);
@@ -246,7 +246,7 @@ out:
 static inline int asc2unichar(struct super_block *sb, const char *astr, int len,
                              wchar_t *uc)
 {
-       int size = HFSPLUS_SB(sb).nls->char2uni(astr, len, uc);
+       int size = HFSPLUS_SB(sb)->nls->char2uni(astr, len, uc);
        if (size <= 0) {
                *uc = '?';
                size = 1;
@@ -293,7 +293,7 @@ int hfsplus_asc2uni(struct super_block *sb, struct hfsplus_unistr *ustr,
        u16 *dstr, outlen = 0;
        wchar_t c;
 
-       decompose = !(HFSPLUS_SB(sb).flags & HFSPLUS_SB_NODECOMPOSE);
+       decompose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags);
        while (outlen < HFSPLUS_MAX_STRLEN && len > 0) {
                size = asc2unichar(sb, astr, len, &c);
 
@@ -330,8 +330,8 @@ int hfsplus_hash_dentry(struct dentry *dentry, struct qstr *str)
        wchar_t c;
        u16 c2;
 
-       casefold = (HFSPLUS_SB(sb).flags & HFSPLUS_SB_CASEFOLD);
-       decompose = !(HFSPLUS_SB(sb).flags & HFSPLUS_SB_NODECOMPOSE);
+       casefold = test_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags);
+       decompose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags);
        hash = init_name_hash();
        astr = str->name;
        len = str->len;
@@ -373,8 +373,8 @@ int hfsplus_compare_dentry(struct dentry *dentry, struct qstr *s1, struct qstr *
        u16 c1, c2;
        wchar_t c;
 
-       casefold = (HFSPLUS_SB(sb).flags & HFSPLUS_SB_CASEFOLD);
-       decompose = !(HFSPLUS_SB(sb).flags & HFSPLUS_SB_NODECOMPOSE);
+       casefold = test_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags);
+       decompose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags);
        astr1 = s1->name;
        len1 = s1->len;
        astr2 = s2->name;
index bed78ac8f6d1f5e530a93c0afac22e1cfdf2178a..8972c20b3216941a88eb3deffa591d89ea3c114a 100644 (file)
@@ -65,8 +65,8 @@ static int hfsplus_get_last_session(struct super_block *sb,
        *start = 0;
        *size = sb->s_bdev->bd_inode->i_size >> 9;
 
-       if (HFSPLUS_SB(sb).session >= 0) {
-               te.cdte_track = HFSPLUS_SB(sb).session;
+       if (HFSPLUS_SB(sb)->session >= 0) {
+               te.cdte_track = HFSPLUS_SB(sb)->session;
                te.cdte_format = CDROM_LBA;
                res = ioctl_by_bdev(sb->s_bdev, CDROMREADTOCENTRY, (unsigned long)&te);
                if (!res && (te.cdte_ctrl & CDROM_DATA_TRACK) == 4) {
@@ -87,6 +87,7 @@ static int hfsplus_get_last_session(struct super_block *sb,
 /* Takes in super block, returns true if good data read */
 int hfsplus_read_wrapper(struct super_block *sb)
 {
+       struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
        struct buffer_head *bh;
        struct hfsplus_vh *vhdr;
        struct hfsplus_wd wd;
@@ -122,7 +123,7 @@ int hfsplus_read_wrapper(struct super_block *sb)
                if (vhdr->signature == cpu_to_be16(HFSPLUS_VOLHEAD_SIG))
                        break;
                if (vhdr->signature == cpu_to_be16(HFSPLUS_VOLHEAD_SIGX)) {
-                       HFSPLUS_SB(sb).flags |= HFSPLUS_SB_HFSX;
+                       set_bit(HFSPLUS_SB_HFSX, &sbi->flags);
                        break;
                }
                brelse(bh);
@@ -143,11 +144,11 @@ int hfsplus_read_wrapper(struct super_block *sb)
        if (blocksize < HFSPLUS_SECTOR_SIZE ||
            ((blocksize - 1) & blocksize))
                return -EINVAL;
-       HFSPLUS_SB(sb).alloc_blksz = blocksize;
-       HFSPLUS_SB(sb).alloc_blksz_shift = 0;
+       sbi->alloc_blksz = blocksize;
+       sbi->alloc_blksz_shift = 0;
        while ((blocksize >>= 1) != 0)
-               HFSPLUS_SB(sb).alloc_blksz_shift++;
-       blocksize = min(HFSPLUS_SB(sb).alloc_blksz, (u32)PAGE_SIZE);
+               sbi->alloc_blksz_shift++;
+       blocksize = min(sbi->alloc_blksz, (u32)PAGE_SIZE);
 
        /* align block size to block offset */
        while (part_start & ((blocksize >> HFSPLUS_SECTOR_SHIFT) - 1))
@@ -158,23 +159,26 @@ int hfsplus_read_wrapper(struct super_block *sb)
                return -EINVAL;
        }
 
-       HFSPLUS_SB(sb).blockoffset = part_start >>
-                       (sb->s_blocksize_bits - HFSPLUS_SECTOR_SHIFT);
-       HFSPLUS_SB(sb).sect_count = part_size;
-       HFSPLUS_SB(sb).fs_shift = HFSPLUS_SB(sb).alloc_blksz_shift -
-                       sb->s_blocksize_bits;
+       sbi->blockoffset =
+               part_start >> (sb->s_blocksize_bits - HFSPLUS_SECTOR_SHIFT);
+       sbi->sect_count = part_size;
+       sbi->fs_shift = sbi->alloc_blksz_shift - sb->s_blocksize_bits;
 
        bh = sb_bread512(sb, part_start + HFSPLUS_VOLHEAD_SECTOR, vhdr);
        if (!bh)
                return -EIO;
 
        /* should still be the same... */
-       if (vhdr->signature != (HFSPLUS_SB(sb).flags & HFSPLUS_SB_HFSX ?
-                               cpu_to_be16(HFSPLUS_VOLHEAD_SIGX) :
-                               cpu_to_be16(HFSPLUS_VOLHEAD_SIG)))
-               goto error;
-       HFSPLUS_SB(sb).s_vhbh = bh;
-       HFSPLUS_SB(sb).s_vhdr = vhdr;
+       if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) {
+               if (vhdr->signature != cpu_to_be16(HFSPLUS_VOLHEAD_SIGX))
+                       goto error;
+       } else {
+               if (vhdr->signature != cpu_to_be16(HFSPLUS_VOLHEAD_SIG))
+                       goto error;
+       }
+
+       sbi->s_vhbh = bh;
+       sbi->s_vhdr = vhdr;
 
        return 0;
  error:
index e20ee85955d1c77c3a410da2c82893cd38acce8e..f3f3578393a417085812ba1ad7e0b7c1e4e5c981 100644 (file)
@@ -115,7 +115,7 @@ static int minix_mkdir(struct inode * dir, struct dentry *dentry, int mode)
 
        inode_inc_link_count(dir);
 
-       inode = minix_new_inode(dir, mode, &err);
+       inode = minix_new_inode(dir, S_IFDIR | mode, &err);
        if (!inode)
                goto out_dir;
 
index 6c2aad49d7318054b57c22b58c6b4eaa0be50178..f7e13db613cbce9a3014b826c9d00414169f4de8 100644 (file)
@@ -63,6 +63,7 @@ config NFS_V3_ACL
 config NFS_V4
        bool "NFS client support for NFS version 4"
        depends on NFS_FS
+       select SUNRPC_GSS
        help
          This option enables support for version 4 of the NFS protocol
          (RFC 3530) in the kernel's NFS client.
index 4e7df2adb2125724a4ea9ade0fc767606d54c4b3..e7340729af896e2bce1097fe4f8521618680173e 100644 (file)
@@ -275,7 +275,7 @@ static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1,
            sin1->sin6_scope_id != sin2->sin6_scope_id)
                return 0;
 
-       return ipv6_addr_equal(&sin1->sin6_addr, &sin1->sin6_addr);
+       return ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr);
 }
 #else  /* !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE) */
 static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1,
index eb51bd6201da0cd361d8265b4c6b0e3edee50ec9..05bf3c0dc751d5d489b1c7be2cf594005caf1695 100644 (file)
@@ -723,10 +723,6 @@ static int do_vfs_lock(struct file *file, struct file_lock *fl)
                default:
                        BUG();
        }
-       if (res < 0)
-               dprintk(KERN_WARNING "%s: VFS is out of sync with lock manager"
-                       " - error %d!\n",
-                               __func__, res);
        return res;
 }
 
index ec3966e4706b2f70d8709199f86456392a6e6584..f4cbf0c306c64d6583cbfffaa14cfbc7a467dfaa 100644 (file)
@@ -431,7 +431,15 @@ static int nfs_statfs(struct dentry *dentry, struct kstatfs *buf)
                goto out_err;
 
        error = server->nfs_client->rpc_ops->statfs(server, fh, &res);
+       if (unlikely(error == -ESTALE)) {
+               struct dentry *pd_dentry;
 
+               pd_dentry = dget_parent(dentry);
+               if (pd_dentry != NULL) {
+                       nfs_zap_caches(pd_dentry->d_inode);
+                       dput(pd_dentry);
+               }
+       }
        nfs_free_fattr(res.fattr);
        if (error < 0)
                goto out_err;
index 95932f523aef2b2b7ef4686b8bb4a18383b4a395..4264377552e207f57550b9e72d53776e0689ca81 100644 (file)
@@ -69,6 +69,7 @@ config NFSD_V4
        depends on NFSD && PROC_FS && EXPERIMENTAL
        select NFSD_V3
        select FS_POSIX_ACL
+       select SUNRPC_GSS
        help
          This option enables support in your system's NFS server for
          version 4 of the NFS protocol (RFC 3530).
index 3dfef062396845d2b45cc42a22064ec4402ee05f..cf0d2ffb3c84a149bc904323cd53599620c8c917 100644 (file)
@@ -440,7 +440,7 @@ test_share(struct nfs4_stateid *stp, struct nfsd4_open *open) {
 
 static int nfs4_access_to_omode(u32 access)
 {
-       switch (access) {
+       switch (access & NFS4_SHARE_ACCESS_BOTH) {
        case NFS4_SHARE_ACCESS_READ:
                return O_RDONLY;
        case NFS4_SHARE_ACCESS_WRITE:
index cdfb8c6a420674cbde75467a09d6a158977a4b61..c16f8d8331b5afcd80baec98cbdbc4594c66142d 100644 (file)
@@ -196,8 +196,6 @@ fh_lock(struct svc_fh *fhp)
 static inline void
 fh_unlock(struct svc_fh *fhp)
 {
-       BUG_ON(!fhp->fh_dentry);
-
        if (fhp->fh_locked) {
                fill_post_wcc(fhp);
                mutex_unlock(&fhp->fh_dentry->d_inode->i_mutex);
index 22c629eedd82d70425704ee86b4ddf816b7bd174..b388443c3a09b01574b78befaabbf7fe03f9eeb4 100644 (file)
@@ -3,4 +3,4 @@ config FSNOTIFY
 
 source "fs/notify/dnotify/Kconfig"
 source "fs/notify/inotify/Kconfig"
-source "fs/notify/fanotify/Kconfig"
+#source "fs/notify/fanotify/Kconfig"
index a76e0aa5cd3fc5188a392639618858a7e7e34092..391915093fe1c494a58ec8feae54cd576fd2464a 100644 (file)
@@ -209,7 +209,10 @@ static int ocfs2_acl_set_mode(struct inode *inode, struct buffer_head *di_bh,
        }
 
        inode->i_mode = new_mode;
+       inode->i_ctime = CURRENT_TIME;
        di->i_mode = cpu_to_le16(inode->i_mode);
+       di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
+       di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
 
        ocfs2_journal_dirty(handle, di_bh);
 
index 215e12ce1d85e2079359838cf287f1c3c670ff31..592fae5007d1245baade87453ce731121aa6efe5 100644 (file)
@@ -6672,7 +6672,7 @@ int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end,
        last_page_bytes = PAGE_ALIGN(end);
        index = start >> PAGE_CACHE_SHIFT;
        do {
-               pages[numpages] = grab_cache_page(mapping, index);
+               pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS);
                if (!pages[numpages]) {
                        ret = -ENOMEM;
                        mlog_errno(ret);
index ec6d123395932b69b6a67ba0b5256be02b811c6f..c7ee03c22226253d970cce94beb11f6353b3e1d0 100644 (file)
@@ -439,7 +439,7 @@ int ocfs2_block_check_validate(void *data, size_t blocksize,
 
        ocfs2_blockcheck_inc_failure(stats);
        mlog(ML_ERROR,
-            "CRC32 failed: stored: %u, computed %u.  Applying ECC.\n",
+            "CRC32 failed: stored: 0x%x, computed 0x%x. Applying ECC.\n",
             (unsigned int)check.bc_crc32e, (unsigned int)crc);
 
        /* Ok, try ECC fixups */
@@ -453,7 +453,7 @@ int ocfs2_block_check_validate(void *data, size_t blocksize,
                goto out;
        }
 
-       mlog(ML_ERROR, "Fixed CRC32 failed: stored: %u, computed %u\n",
+       mlog(ML_ERROR, "Fixed CRC32 failed: stored: 0x%x, computed 0x%x\n",
             (unsigned int)check.bc_crc32e, (unsigned int)crc);
 
        rc = -EIO;
index 1361997cf205d132a04ddeb87760aa233c3e707d..cbe2f057cc2826cb6af4ed833319fe27c4bd565f 100644 (file)
@@ -977,7 +977,7 @@ static int o2net_tx_can_proceed(struct o2net_node *nn,
 int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
                           size_t caller_veclen, u8 target_node, int *status)
 {
-       int ret;
+       int ret = 0;
        struct o2net_msg *msg = NULL;
        size_t veclen, caller_bytes = 0;
        struct kvec *vec = NULL;
index f04ebcfffc4a5e1516c2a7307ffa8032db8e19d7..c49f6de0e7abb6e096ddc56e795957e0194bb8dd 100644 (file)
@@ -3931,6 +3931,15 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
                goto out_commit;
        }
 
+       cpos = split_hash;
+       ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle,
+                                      data_ac, meta_ac, new_dx_leaves,
+                                      num_dx_leaves);
+       if (ret) {
+               mlog_errno(ret);
+               goto out_commit;
+       }
+
        for (i = 0; i < num_dx_leaves; i++) {
                ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
                                              orig_dx_leaves[i],
@@ -3939,15 +3948,14 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
                        mlog_errno(ret);
                        goto out_commit;
                }
-       }
 
-       cpos = split_hash;
-       ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle,
-                                      data_ac, meta_ac, new_dx_leaves,
-                                      num_dx_leaves);
-       if (ret) {
-               mlog_errno(ret);
-               goto out_commit;
+               ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
+                                             new_dx_leaves[i],
+                                             OCFS2_JOURNAL_ACCESS_WRITE);
+               if (ret) {
+                       mlog_errno(ret);
+                       goto out_commit;
+               }
        }
 
        ocfs2_dx_dir_transfer_leaf(dir, split_hash, handle, tmp_dx_leaf,
index 4b6ae2c13b47a85c6a31f6f7d2a1072099354935..765298908f1d3905bbabf3c5e597a8bf72ebd4a9 100644 (file)
@@ -1030,6 +1030,7 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm,
                         struct dlm_lock_resource *res);
 void dlm_clean_master_list(struct dlm_ctxt *dlm,
                           u8 dead_node);
+void dlm_force_free_mles(struct dlm_ctxt *dlm);
 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock);
 int __dlm_lockres_has_locks(struct dlm_lock_resource *res);
 int __dlm_lockres_unused(struct dlm_lock_resource *res);
index 5efdd37dfe484f2f6207ac2c7c6927fda0cc1ea0..901ca52bf86b293639ea39c8e1f460b66bc1f265 100644 (file)
@@ -636,8 +636,14 @@ static void *lockres_seq_start(struct seq_file *m, loff_t *pos)
        spin_lock(&dlm->track_lock);
        if (oldres)
                track_list = &oldres->tracking;
-       else
+       else {
                track_list = &dlm->tracking_list;
+               if (list_empty(track_list)) {
+                       dl = NULL;
+                       spin_unlock(&dlm->track_lock);
+                       goto bail;
+               }
+       }
 
        list_for_each_entry(res, track_list, tracking) {
                if (&res->tracking == &dlm->tracking_list)
@@ -660,6 +666,7 @@ static void *lockres_seq_start(struct seq_file *m, loff_t *pos)
        } else
                dl = NULL;
 
+bail:
        /* passed to seq_show */
        return dl;
 }
index 153abb5abef024d2ca63f6d4a23ee1c126b89f13..11a5c87fd7f7c00de41c61c00fae625e46675973 100644 (file)
@@ -693,6 +693,7 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm)
 
                dlm_mark_domain_leaving(dlm);
                dlm_leave_domain(dlm);
+               dlm_force_free_mles(dlm);
                dlm_complete_dlm_shutdown(dlm);
        }
        dlm_put(dlm);
index ffb4c68dafa495bc739165eb30f718f1eb0865ec..f564b0e5f80d8c89e08eeba4a5e133b24cb67373 100644 (file)
@@ -3433,3 +3433,43 @@ void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
        wake_up(&res->wq);
        wake_up(&dlm->migration_wq);
 }
+
+void dlm_force_free_mles(struct dlm_ctxt *dlm)
+{
+       int i;
+       struct hlist_head *bucket;
+       struct dlm_master_list_entry *mle;
+       struct hlist_node *tmp, *list;
+
+       /*
+        * We notified all other nodes that we are exiting the domain and
+        * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still
+        * around we force free them and wake any processes that are waiting
+        * on the mles
+        */
+       spin_lock(&dlm->spinlock);
+       spin_lock(&dlm->master_lock);
+
+       BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING);
+       BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES));
+
+       for (i = 0; i < DLM_HASH_BUCKETS; i++) {
+               bucket = dlm_master_hash(dlm, i);
+               hlist_for_each_safe(list, tmp, bucket) {
+                       mle = hlist_entry(list, struct dlm_master_list_entry,
+                                         master_hash_node);
+                       if (mle->type != DLM_MLE_BLOCK) {
+                               mlog(ML_ERROR, "bad mle: %p\n", mle);
+                               dlm_print_one_mle(mle);
+                       }
+                       atomic_set(&mle->woken, 1);
+                       wake_up(&mle->wq);
+
+                       __dlm_unlink_mle(dlm, mle);
+                       __dlm_mle_detach_hb_events(dlm, mle);
+                       __dlm_put_mle(mle);
+               }
+       }
+       spin_unlock(&dlm->master_lock);
+       spin_unlock(&dlm->spinlock);
+}
index d1ce48e1b3d6029e5861cf2863cbb0a544511b40..1d596d8c4a4a55dfd185ecaff4d7d9900bf10b24 100644 (file)
@@ -84,6 +84,7 @@ enum {
        OI_LS_PARENT,
        OI_LS_RENAME1,
        OI_LS_RENAME2,
+       OI_LS_REFLINK_TARGET,
 };
 
 int ocfs2_dlm_init(struct ocfs2_super *osb);
index 81296b4e364632dd5936f59d8adeab9832f2d2fd..9a03c151b5ceabc169215d99777abb92e2d2ad36 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/writeback.h>
 #include <linux/falloc.h>
 #include <linux/quotaops.h>
+#include <linux/blkdev.h>
 
 #define MLOG_MASK_PREFIX ML_INODE
 #include <cluster/masklog.h>
@@ -190,8 +191,16 @@ static int ocfs2_sync_file(struct file *file, int datasync)
        if (err)
                goto bail;
 
-       if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
+       if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) {
+               /*
+                * We still have to flush drive's caches to get data to the
+                * platter
+                */
+               if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER)
+                       blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL,
+                                          NULL, BLKDEV_IFL_WAIT);
                goto bail;
+       }
 
        journal = osb->journal->j_journal;
        err = jbd2_journal_force_commit(journal);
@@ -774,7 +783,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
        BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT));
        BUG_ON(abs_from & (inode->i_blkbits - 1));
 
-       page = grab_cache_page(mapping, index);
+       page = find_or_create_page(mapping, index, GFP_NOFS);
        if (!page) {
                ret = -ENOMEM;
                mlog_errno(ret);
@@ -2329,7 +2338,7 @@ out_dio:
        BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
 
        if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
-           ((file->f_flags & O_DIRECT) && has_refcount)) {
+           ((file->f_flags & O_DIRECT) && !direct_io)) {
                ret = filemap_fdatawrite_range(file->f_mapping, pos,
                                               pos + count - 1);
                if (ret < 0)
index 0492464916b19324e73425e29c473956b0b4bd33..eece3e05d9d0124d04b81c940c1289f876e700bb 100644 (file)
@@ -488,7 +488,11 @@ static int ocfs2_read_locked_inode(struct inode *inode,
                                                     OCFS2_BH_IGNORE_CACHE);
        } else {
                status = ocfs2_read_blocks_sync(osb, args->fi_blkno, 1, &bh);
-               if (!status)
+               /*
+                * If buffer is in jbd, then its checksum may not have been
+                * computed as yet.
+                */
+               if (!status && !buffer_jbd(bh))
                        status = ocfs2_validate_inode_block(osb->sb, bh);
        }
        if (status < 0) {
index af2b8fe1f13999e26f6e2543a047847bcf517c4e..4c18f4ad93b43cae6e5ddcd5a2781fbc79292484 100644 (file)
@@ -74,9 +74,11 @@ static int __ocfs2_page_mkwrite(struct inode *inode, struct buffer_head *di_bh,
        /*
         * Another node might have truncated while we were waiting on
         * cluster locks.
+        * We don't check size == 0 before the shift. This is borrowed
+        * from do_generic_file_read.
         */
-       last_index = size >> PAGE_CACHE_SHIFT;
-       if (page->index > last_index) {
+       last_index = (size - 1) >> PAGE_CACHE_SHIFT;
+       if (unlikely(!size || page->index > last_index)) {
                ret = -EINVAL;
                goto out;
        }
@@ -107,7 +109,7 @@ static int __ocfs2_page_mkwrite(struct inode *inode, struct buffer_head *di_bh,
         * because the "write" would invalidate their data.
         */
        if (page->index == last_index)
-               len = size & ~PAGE_CACHE_MASK;
+               len = ((size - 1) & ~PAGE_CACHE_MASK) + 1;
 
        ret = ocfs2_write_begin_nolock(mapping, pos, len, 0, &locked_page,
                                       &fsdata, di_bh, page);
index f171b51a74f78d6e268b5d743a24df4e702f9643..a00dda2e4f16698e5c7d8e0946ba1f09506651c6 100644 (file)
@@ -472,32 +472,23 @@ leave:
        return status;
 }
 
-static int ocfs2_mknod_locked(struct ocfs2_super *osb,
-                             struct inode *dir,
-                             struct inode *inode,
-                             dev_t dev,
-                             struct buffer_head **new_fe_bh,
-                             struct buffer_head *parent_fe_bh,
-                             handle_t *handle,
-                             struct ocfs2_alloc_context *inode_ac)
+static int __ocfs2_mknod_locked(struct inode *dir,
+                               struct inode *inode,
+                               dev_t dev,
+                               struct buffer_head **new_fe_bh,
+                               struct buffer_head *parent_fe_bh,
+                               handle_t *handle,
+                               struct ocfs2_alloc_context *inode_ac,
+                               u64 fe_blkno, u64 suballoc_loc, u16 suballoc_bit)
 {
        int status = 0;
+       struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
        struct ocfs2_dinode *fe = NULL;
        struct ocfs2_extent_list *fel;
-       u64 suballoc_loc, fe_blkno = 0;
-       u16 suballoc_bit;
        u16 feat;
 
        *new_fe_bh = NULL;
 
-       status = ocfs2_claim_new_inode(handle, dir, parent_fe_bh,
-                                      inode_ac, &suballoc_loc,
-                                      &suballoc_bit, &fe_blkno);
-       if (status < 0) {
-               mlog_errno(status);
-               goto leave;
-       }
-
        /* populate as many fields early on as possible - many of
         * these are used by the support functions here and in
         * callers. */
@@ -591,6 +582,34 @@ leave:
        return status;
 }
 
+static int ocfs2_mknod_locked(struct ocfs2_super *osb,
+                             struct inode *dir,
+                             struct inode *inode,
+                             dev_t dev,
+                             struct buffer_head **new_fe_bh,
+                             struct buffer_head *parent_fe_bh,
+                             handle_t *handle,
+                             struct ocfs2_alloc_context *inode_ac)
+{
+       int status = 0;
+       u64 suballoc_loc, fe_blkno = 0;
+       u16 suballoc_bit;
+
+       *new_fe_bh = NULL;
+
+       status = ocfs2_claim_new_inode(handle, dir, parent_fe_bh,
+                                      inode_ac, &suballoc_loc,
+                                      &suballoc_bit, &fe_blkno);
+       if (status < 0) {
+               mlog_errno(status);
+               return status;
+       }
+
+       return __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh,
+                                   parent_fe_bh, handle, inode_ac,
+                                   fe_blkno, suballoc_loc, suballoc_bit);
+}
+
 static int ocfs2_mkdir(struct inode *dir,
                       struct dentry *dentry,
                       int mode)
@@ -1852,61 +1871,117 @@ bail:
        return status;
 }
 
-static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
-                                   struct inode **ret_orphan_dir,
-                                   u64 blkno,
-                                   char *name,
-                                   struct ocfs2_dir_lookup_result *lookup)
+static int ocfs2_lookup_lock_orphan_dir(struct ocfs2_super *osb,
+                                       struct inode **ret_orphan_dir,
+                                       struct buffer_head **ret_orphan_dir_bh)
 {
        struct inode *orphan_dir_inode;
        struct buffer_head *orphan_dir_bh = NULL;
-       int status = 0;
-
-       status = ocfs2_blkno_stringify(blkno, name);
-       if (status < 0) {
-               mlog_errno(status);
-               return status;
-       }
+       int ret = 0;
 
        orphan_dir_inode = ocfs2_get_system_file_inode(osb,
                                                       ORPHAN_DIR_SYSTEM_INODE,
                                                       osb->slot_num);
        if (!orphan_dir_inode) {
-               status = -ENOENT;
-               mlog_errno(status);
-               return status;
+               ret = -ENOENT;
+               mlog_errno(ret);
+               return ret;
        }
 
        mutex_lock(&orphan_dir_inode->i_mutex);
 
-       status = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1);
-       if (status < 0) {
-               mlog_errno(status);
-               goto leave;
+       ret = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1);
+       if (ret < 0) {
+               mutex_unlock(&orphan_dir_inode->i_mutex);
+               iput(orphan_dir_inode);
+
+               mlog_errno(ret);
+               return ret;
        }
 
-       status = ocfs2_prepare_dir_for_insert(osb, orphan_dir_inode,
-                                             orphan_dir_bh, name,
-                                             OCFS2_ORPHAN_NAMELEN, lookup);
-       if (status < 0) {
-               ocfs2_inode_unlock(orphan_dir_inode, 1);
+       *ret_orphan_dir = orphan_dir_inode;
+       *ret_orphan_dir_bh = orphan_dir_bh;
 
-               mlog_errno(status);
-               goto leave;
+       return 0;
+}
+
+static int __ocfs2_prepare_orphan_dir(struct inode *orphan_dir_inode,
+                                     struct buffer_head *orphan_dir_bh,
+                                     u64 blkno,
+                                     char *name,
+                                     struct ocfs2_dir_lookup_result *lookup)
+{
+       int ret;
+       struct ocfs2_super *osb = OCFS2_SB(orphan_dir_inode->i_sb);
+
+       ret = ocfs2_blkno_stringify(blkno, name);
+       if (ret < 0) {
+               mlog_errno(ret);
+               return ret;
+       }
+
+       ret = ocfs2_prepare_dir_for_insert(osb, orphan_dir_inode,
+                                          orphan_dir_bh, name,
+                                          OCFS2_ORPHAN_NAMELEN, lookup);
+       if (ret < 0) {
+               mlog_errno(ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+/**
+ * ocfs2_prepare_orphan_dir() - Prepare an orphan directory for
+ * insertion of an orphan.
+ * @osb: ocfs2 file system
+ * @ret_orphan_dir: Orphan dir inode - returned locked!
+ * @blkno: Actual block number of the inode to be inserted into orphan dir.
+ * @lookup: dir lookup result, to be passed back into functions like
+ *          ocfs2_orphan_add
+ *
+ * Returns zero on success and the ret_orphan_dir, name and lookup
+ * fields will be populated.
+ *
+ * Returns non-zero on failure. 
+ */
+static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
+                                   struct inode **ret_orphan_dir,
+                                   u64 blkno,
+                                   char *name,
+                                   struct ocfs2_dir_lookup_result *lookup)
+{
+       struct inode *orphan_dir_inode = NULL;
+       struct buffer_head *orphan_dir_bh = NULL;
+       int ret = 0;
+
+       ret = ocfs2_lookup_lock_orphan_dir(osb, &orphan_dir_inode,
+                                          &orphan_dir_bh);
+       if (ret < 0) {
+               mlog_errno(ret);
+               return ret;
+       }
+
+       ret = __ocfs2_prepare_orphan_dir(orphan_dir_inode, orphan_dir_bh,
+                                        blkno, name, lookup);
+       if (ret < 0) {
+               mlog_errno(ret);
+               goto out;
        }
 
        *ret_orphan_dir = orphan_dir_inode;
 
-leave:
-       if (status) {
+out:
+       brelse(orphan_dir_bh);
+
+       if (ret) {
+               ocfs2_inode_unlock(orphan_dir_inode, 1);
                mutex_unlock(&orphan_dir_inode->i_mutex);
                iput(orphan_dir_inode);
        }
 
-       brelse(orphan_dir_bh);
-
-       mlog_exit(status);
-       return status;
+       mlog_exit(ret);
+       return ret;
 }
 
 static int ocfs2_orphan_add(struct ocfs2_super *osb,
@@ -2053,6 +2128,99 @@ leave:
        return status;
 }
 
+/**
+ * ocfs2_prep_new_orphaned_file() - Prepare the orphan dir to recieve a newly
+ * allocated file. This is different from the typical 'add to orphan dir'
+ * operation in that the inode does not yet exist. This is a problem because
+ * the orphan dir stringifies the inode block number to come up with it's
+ * dirent. Obviously if the inode does not yet exist we have a chicken and egg
+ * problem. This function works around it by calling deeper into the orphan
+ * and suballoc code than other callers. Use this only by necessity.
+ * @dir: The directory which this inode will ultimately wind up under - not the
+ * orphan dir!
+ * @dir_bh: buffer_head the @dir inode block
+ * @orphan_name: string of length (CFS2_ORPHAN_NAMELEN + 1). Will be filled
+ * with the string to be used for orphan dirent. Pass back to the orphan dir
+ * code.
+ * @ret_orphan_dir: orphan dir inode returned to be passed back into orphan
+ * dir code.
+ * @ret_di_blkno: block number where the new inode will be allocated.
+ * @orphan_insert: Dir insert context to be passed back into orphan dir code.
+ * @ret_inode_ac: Inode alloc context to be passed back to the allocator.
+ *
+ * Returns zero on success and the ret_orphan_dir, name and lookup
+ * fields will be populated.
+ *
+ * Returns non-zero on failure. 
+ */
+static int ocfs2_prep_new_orphaned_file(struct inode *dir,
+                                       struct buffer_head *dir_bh,
+                                       char *orphan_name,
+                                       struct inode **ret_orphan_dir,
+                                       u64 *ret_di_blkno,
+                                       struct ocfs2_dir_lookup_result *orphan_insert,
+                                       struct ocfs2_alloc_context **ret_inode_ac)
+{
+       int ret;
+       u64 di_blkno;
+       struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
+       struct inode *orphan_dir = NULL;
+       struct buffer_head *orphan_dir_bh = NULL;
+       struct ocfs2_alloc_context *inode_ac = NULL;
+
+       ret = ocfs2_lookup_lock_orphan_dir(osb, &orphan_dir, &orphan_dir_bh);
+       if (ret < 0) {
+               mlog_errno(ret);
+               return ret;
+       }
+
+       /* reserve an inode spot */
+       ret = ocfs2_reserve_new_inode(osb, &inode_ac);
+       if (ret < 0) {
+               if (ret != -ENOSPC)
+                       mlog_errno(ret);
+               goto out;
+       }
+
+       ret = ocfs2_find_new_inode_loc(dir, dir_bh, inode_ac,
+                                      &di_blkno);
+       if (ret) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       ret = __ocfs2_prepare_orphan_dir(orphan_dir, orphan_dir_bh,
+                                        di_blkno, orphan_name, orphan_insert);
+       if (ret < 0) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+out:
+       if (ret == 0) {
+               *ret_orphan_dir = orphan_dir;
+               *ret_di_blkno = di_blkno;
+               *ret_inode_ac = inode_ac;
+               /*
+                * orphan_name and orphan_insert are already up to
+                * date via prepare_orphan_dir
+                */
+       } else {
+               /* Unroll reserve_new_inode* */
+               if (inode_ac)
+                       ocfs2_free_alloc_context(inode_ac);
+
+               /* Unroll orphan dir locking */
+               mutex_unlock(&orphan_dir->i_mutex);
+               ocfs2_inode_unlock(orphan_dir, 1);
+               iput(orphan_dir);
+       }
+
+       brelse(orphan_dir_bh);
+
+       return 0;
+}
+
 int ocfs2_create_inode_in_orphan(struct inode *dir,
                                 int mode,
                                 struct inode **new_inode)
@@ -2068,6 +2236,8 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
        struct buffer_head *new_di_bh = NULL;
        struct ocfs2_alloc_context *inode_ac = NULL;
        struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
+       u64 uninitialized_var(di_blkno), suballoc_loc;
+       u16 suballoc_bit;
 
        status = ocfs2_inode_lock(dir, &parent_di_bh, 1);
        if (status < 0) {
@@ -2076,20 +2246,9 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
                return status;
        }
 
-       /*
-        * We give the orphan dir the root blkno to fake an orphan name,
-        * and allocate enough space for our insertion.
-        */
-       status = ocfs2_prepare_orphan_dir(osb, &orphan_dir,
-                                         osb->root_blkno,
-                                         orphan_name, &orphan_insert);
-       if (status < 0) {
-               mlog_errno(status);
-               goto leave;
-       }
-
-       /* reserve an inode spot */
-       status = ocfs2_reserve_new_inode(osb, &inode_ac);
+       status = ocfs2_prep_new_orphaned_file(dir, parent_di_bh,
+                                             orphan_name, &orphan_dir,
+                                             &di_blkno, &orphan_insert, &inode_ac);
        if (status < 0) {
                if (status != -ENOSPC)
                        mlog_errno(status);
@@ -2116,17 +2275,20 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
                goto leave;
        did_quota_inode = 1;
 
-       inode->i_nlink = 0;
-       /* do the real work now. */
-       status = ocfs2_mknod_locked(osb, dir, inode,
-                                   0, &new_di_bh, parent_di_bh, handle,
-                                   inode_ac);
+       status = ocfs2_claim_new_inode_at_loc(handle, dir, inode_ac,
+                                             &suballoc_loc,
+                                             &suballoc_bit, di_blkno);
        if (status < 0) {
                mlog_errno(status);
                goto leave;
        }
 
-       status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, orphan_name);
+       inode->i_nlink = 0;
+       /* do the real work now. */
+       status = __ocfs2_mknod_locked(dir, inode,
+                                     0, &new_di_bh, parent_di_bh, handle,
+                                     inode_ac, di_blkno, suballoc_loc,
+                                     suballoc_bit);
        if (status < 0) {
                mlog_errno(status);
                goto leave;
index 33f1c9a8258d1d4de5a17fe1ebf1d0d39c4098c3..fa31d05e41b7e558fa24df43682dbd107b568c76 100644 (file)
 #define OCFS2_HAS_REFCOUNT_FL   (0x0010)
 
 /* Inode attributes, keep in sync with EXT2 */
-#define OCFS2_SECRM_FL         (0x00000001)    /* Secure deletion */
-#define OCFS2_UNRM_FL          (0x00000002)    /* Undelete */
-#define OCFS2_COMPR_FL         (0x00000004)    /* Compress file */
-#define OCFS2_SYNC_FL          (0x00000008)    /* Synchronous updates */
-#define OCFS2_IMMUTABLE_FL     (0x00000010)    /* Immutable file */
-#define OCFS2_APPEND_FL                (0x00000020)    /* writes to file may only append */
-#define OCFS2_NODUMP_FL                (0x00000040)    /* do not dump file */
-#define OCFS2_NOATIME_FL       (0x00000080)    /* do not update atime */
-#define OCFS2_DIRSYNC_FL       (0x00010000)    /* dirsync behaviour (directories only) */
-
-#define OCFS2_FL_VISIBLE       (0x000100FF)    /* User visible flags */
-#define OCFS2_FL_MODIFIABLE    (0x000100FF)    /* User modifiable flags */
+#define OCFS2_SECRM_FL                 FS_SECRM_FL     /* Secure deletion */
+#define OCFS2_UNRM_FL                  FS_UNRM_FL      /* Undelete */
+#define OCFS2_COMPR_FL                 FS_COMPR_FL     /* Compress file */
+#define OCFS2_SYNC_FL                  FS_SYNC_FL      /* Synchronous updates */
+#define OCFS2_IMMUTABLE_FL             FS_IMMUTABLE_FL /* Immutable file */
+#define OCFS2_APPEND_FL                        FS_APPEND_FL    /* writes to file may only append */
+#define OCFS2_NODUMP_FL                        FS_NODUMP_FL    /* do not dump file */
+#define OCFS2_NOATIME_FL               FS_NOATIME_FL   /* do not update atime */
+/* Reserved for compression usage... */
+#define OCFS2_DIRTY_FL                 FS_DIRTY_FL
+#define OCFS2_COMPRBLK_FL              FS_COMPRBLK_FL  /* One or more compressed clusters */
+#define OCFS2_NOCOMP_FL                        FS_NOCOMP_FL    /* Don't compress */
+#define OCFS2_ECOMPR_FL                        FS_ECOMPR_FL    /* Compression error */
+/* End compression flags --- maybe not all used */
+#define OCFS2_BTREE_FL                 FS_BTREE_FL     /* btree format dir */
+#define OCFS2_INDEX_FL                 FS_INDEX_FL     /* hash-indexed directory */
+#define OCFS2_IMAGIC_FL                        FS_IMAGIC_FL    /* AFS directory */
+#define OCFS2_JOURNAL_DATA_FL          FS_JOURNAL_DATA_FL /* Reserved for ext3 */
+#define OCFS2_NOTAIL_FL                        FS_NOTAIL_FL    /* file tail should not be merged */
+#define OCFS2_DIRSYNC_FL               FS_DIRSYNC_FL   /* dirsync behaviour (directories only) */
+#define OCFS2_TOPDIR_FL                        FS_TOPDIR_FL    /* Top of directory hierarchies*/
+#define OCFS2_RESERVED_FL              FS_RESERVED_FL  /* reserved for ext2 lib */
+
+#define OCFS2_FL_VISIBLE               FS_FL_USER_VISIBLE      /* User visible flags */
+#define OCFS2_FL_MODIFIABLE            FS_FL_USER_MODIFIABLE   /* User modifiable flags */
 
 /*
  * Extent record flags (e_node.leaf.flags)
index 2d3420af1a839e0e13626a34c289cd2227bf095a..5d241505690b71e2ec0b259cbc39f1ed46d683d5 100644 (file)
 /*
  * ioctl commands
  */
-#define OCFS2_IOC_GETFLAGS     _IOR('f', 1, long)
-#define OCFS2_IOC_SETFLAGS     _IOW('f', 2, long)
-#define OCFS2_IOC32_GETFLAGS   _IOR('f', 1, int)
-#define OCFS2_IOC32_SETFLAGS   _IOW('f', 2, int)
+#define OCFS2_IOC_GETFLAGS     FS_IOC_GETFLAGS
+#define OCFS2_IOC_SETFLAGS     FS_IOC_SETFLAGS
+#define OCFS2_IOC32_GETFLAGS   FS_IOC32_GETFLAGS
+#define OCFS2_IOC32_SETFLAGS   FS_IOC32_SETFLAGS
 
 /*
  * Space reservation / allocation / free ioctls and argument structure
index 73a11ccfd4c280681abe672c5e9cd81e3b229a93..efdd7560740655beba6f53dccef235b5ce89cdbf 100644 (file)
@@ -2960,7 +2960,7 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
                if (map_end & (PAGE_CACHE_SIZE - 1))
                        to = map_end & (PAGE_CACHE_SIZE - 1);
 
-               page = grab_cache_page(mapping, page_index);
+               page = find_or_create_page(mapping, page_index, GFP_NOFS);
 
                /*
                 * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page
@@ -3179,7 +3179,8 @@ static int ocfs2_cow_sync_writeback(struct super_block *sb,
                if (map_end > end)
                        map_end = end;
 
-               page = grab_cache_page(context->inode->i_mapping, page_index);
+               page = find_or_create_page(context->inode->i_mapping,
+                                          page_index, GFP_NOFS);
                BUG_ON(!page);
 
                wait_on_page_writeback(page);
@@ -4200,8 +4201,9 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
                goto out;
        }
 
-       mutex_lock(&new_inode->i_mutex);
-       ret = ocfs2_inode_lock(new_inode, &new_bh, 1);
+       mutex_lock_nested(&new_inode->i_mutex, I_MUTEX_CHILD);
+       ret = ocfs2_inode_lock_nested(new_inode, &new_bh, 1,
+                                     OI_LS_REFLINK_TARGET);
        if (ret) {
                mlog_errno(ret);
                goto out_unlock;
index d8b6e4259b80022cb824326f4ce2c665089b5818..3e78db361bc70b3ffc327a6f21b6d4580305d0eb 100644 (file)
@@ -732,25 +732,23 @@ int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap,
                           struct ocfs2_alloc_reservation *resv,
                           int *cstart, int *clen)
 {
-       unsigned int wanted = *clen;
-
        if (resv == NULL || ocfs2_resmap_disabled(resmap))
                return -ENOSPC;
 
        spin_lock(&resv_lock);
 
-       /*
-        * We don't want to over-allocate for temporary
-        * windows. Otherwise, we run the risk of fragmenting the
-        * allocation space.
-        */
-       wanted = ocfs2_resv_window_bits(resmap, resv);
-       if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen)
-               wanted = *clen;
-
        if (ocfs2_resv_empty(resv)) {
-               mlog(0, "empty reservation, find new window\n");
+               /*
+                * We don't want to over-allocate for temporary
+                * windows. Otherwise, we run the risk of fragmenting the
+                * allocation space.
+                */
+               unsigned int wanted = ocfs2_resv_window_bits(resmap, resv);
 
+               if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen)
+                       wanted = *clen;
+
+               mlog(0, "empty reservation, find new window\n");
                /*
                 * Try to get a window here. If it works, we must fall
                 * through and test the bitmap . This avoids some
index a8e6a95a353f03dcb8a34cf928ded84ff7e6d127..849c2f0e0a0e664e983adf61f620abedf87fcb72 100644 (file)
@@ -57,11 +57,28 @@ struct ocfs2_suballoc_result {
        u64             sr_bg_blkno;    /* The bg we allocated from.  Set
                                           to 0 when a block group is
                                           contiguous. */
+       u64             sr_bg_stable_blkno; /*
+                                            * Doesn't change, always
+                                            * set to target block
+                                            * group descriptor
+                                            * block.
+                                            */
        u64             sr_blkno;       /* The first allocated block */
        unsigned int    sr_bit_offset;  /* The bit in the bg */
        unsigned int    sr_bits;        /* How many bits we claimed */
 };
 
+static u64 ocfs2_group_from_res(struct ocfs2_suballoc_result *res)
+{
+       if (res->sr_blkno == 0)
+               return 0;
+
+       if (res->sr_bg_blkno)
+               return res->sr_bg_blkno;
+
+       return ocfs2_which_suballoc_group(res->sr_blkno, res->sr_bit_offset);
+}
+
 static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg);
 static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe);
 static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl);
@@ -138,6 +155,10 @@ void ocfs2_free_ac_resource(struct ocfs2_alloc_context *ac)
        brelse(ac->ac_bh);
        ac->ac_bh = NULL;
        ac->ac_resv = NULL;
+       if (ac->ac_find_loc_priv) {
+               kfree(ac->ac_find_loc_priv);
+               ac->ac_find_loc_priv = NULL;
+       }
 }
 
 void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac)
@@ -336,7 +357,7 @@ out:
 static void ocfs2_bg_discontig_add_extent(struct ocfs2_super *osb,
                                          struct ocfs2_group_desc *bg,
                                          struct ocfs2_chain_list *cl,
-                                         u64 p_blkno, u32 clusters)
+                                         u64 p_blkno, unsigned int clusters)
 {
        struct ocfs2_extent_list *el = &bg->bg_list;
        struct ocfs2_extent_rec *rec;
@@ -348,7 +369,7 @@ static void ocfs2_bg_discontig_add_extent(struct ocfs2_super *osb,
        rec->e_blkno = cpu_to_le64(p_blkno);
        rec->e_cpos = cpu_to_le32(le16_to_cpu(bg->bg_bits) /
                                  le16_to_cpu(cl->cl_bpc));
-       rec->e_leaf_clusters = cpu_to_le32(clusters);
+       rec->e_leaf_clusters = cpu_to_le16(clusters);
        le16_add_cpu(&bg->bg_bits, clusters * le16_to_cpu(cl->cl_bpc));
        le16_add_cpu(&bg->bg_free_bits_count,
                     clusters * le16_to_cpu(cl->cl_bpc));
@@ -1678,6 +1699,15 @@ static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac,
        if (!ret)
                ocfs2_bg_discontig_fix_result(ac, gd, res);
 
+       /*
+        * sr_bg_blkno might have been changed by
+        * ocfs2_bg_discontig_fix_result
+        */
+       res->sr_bg_stable_blkno = group_bh->b_blocknr;
+
+       if (ac->ac_find_loc_only)
+               goto out_loc_only;
+
        ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh,
                                               res->sr_bits,
                                               le16_to_cpu(gd->bg_chain));
@@ -1691,6 +1721,7 @@ static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac,
        if (ret < 0)
                mlog_errno(ret);
 
+out_loc_only:
        *bits_left = le16_to_cpu(gd->bg_free_bits_count);
 
 out:
@@ -1708,7 +1739,6 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
 {
        int status;
        u16 chain;
-       u32 tmp_used;
        u64 next_group;
        struct inode *alloc_inode = ac->ac_inode;
        struct buffer_head *group_bh = NULL;
@@ -1770,6 +1800,11 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
        if (!status)
                ocfs2_bg_discontig_fix_result(ac, bg, res);
 
+       /*
+        * sr_bg_blkno might have been changed by
+        * ocfs2_bg_discontig_fix_result
+        */
+       res->sr_bg_stable_blkno = group_bh->b_blocknr;
 
        /*
         * Keep track of previous block descriptor read. When
@@ -1796,22 +1831,17 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
                }
        }
 
-       /* Ok, claim our bits now: set the info on dinode, chainlist
-        * and then the group */
-       status = ocfs2_journal_access_di(handle,
-                                        INODE_CACHE(alloc_inode),
-                                        ac->ac_bh,
-                                        OCFS2_JOURNAL_ACCESS_WRITE);
-       if (status < 0) {
+       if (ac->ac_find_loc_only)
+               goto out_loc_only;
+
+       status = ocfs2_alloc_dinode_update_counts(alloc_inode, handle,
+                                                 ac->ac_bh, res->sr_bits,
+                                                 chain);
+       if (status) {
                mlog_errno(status);
                goto bail;
        }
 
-       tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
-       fe->id1.bitmap1.i_used = cpu_to_le32(res->sr_bits + tmp_used);
-       le32_add_cpu(&cl->cl_recs[chain].c_free, -res->sr_bits);
-       ocfs2_journal_dirty(handle, ac->ac_bh);
-
        status = ocfs2_block_group_set_bits(handle,
                                            alloc_inode,
                                            bg,
@@ -1826,6 +1856,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
        mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits,
             (unsigned long long)le64_to_cpu(fe->i_blkno));
 
+out_loc_only:
        *bits_left = le16_to_cpu(bg->bg_free_bits_count);
 bail:
        brelse(group_bh);
@@ -1845,6 +1876,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
        int status;
        u16 victim, i;
        u16 bits_left = 0;
+       u64 hint = ac->ac_last_group;
        struct ocfs2_chain_list *cl;
        struct ocfs2_dinode *fe;
 
@@ -1872,7 +1904,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
                goto bail;
        }
 
-       res->sr_bg_blkno = ac->ac_last_group;
+       res->sr_bg_blkno = hint;
        if (res->sr_bg_blkno) {
                /* Attempt to short-circuit the usual search mechanism
                 * by jumping straight to the most recently used
@@ -1896,8 +1928,10 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
 
        status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
                                    res, &bits_left);
-       if (!status)
+       if (!status) {
+               hint = ocfs2_group_from_res(res);
                goto set_hint;
+       }
        if (status < 0 && status != -ENOSPC) {
                mlog_errno(status);
                goto bail;
@@ -1920,8 +1954,10 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
                ac->ac_chain = i;
                status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
                                            res, &bits_left);
-               if (!status)
+               if (!status) {
+                       hint = ocfs2_group_from_res(res);
                        break;
+               }
                if (status < 0 && status != -ENOSPC) {
                        mlog_errno(status);
                        goto bail;
@@ -1936,7 +1972,7 @@ set_hint:
                if (bits_left < min_bits)
                        ac->ac_last_group = 0;
                else
-                       ac->ac_last_group = res->sr_bg_blkno;
+                       ac->ac_last_group = hint;
        }
 
 bail:
@@ -2016,6 +2052,136 @@ static inline void ocfs2_save_inode_ac_group(struct inode *dir,
        OCFS2_I(dir)->ip_last_used_slot = ac->ac_alloc_slot;
 }
 
+int ocfs2_find_new_inode_loc(struct inode *dir,
+                            struct buffer_head *parent_fe_bh,
+                            struct ocfs2_alloc_context *ac,
+                            u64 *fe_blkno)
+{
+       int ret;
+       handle_t *handle = NULL;
+       struct ocfs2_suballoc_result *res;
+
+       BUG_ON(!ac);
+       BUG_ON(ac->ac_bits_given != 0);
+       BUG_ON(ac->ac_bits_wanted != 1);
+       BUG_ON(ac->ac_which != OCFS2_AC_USE_INODE);
+
+       res = kzalloc(sizeof(*res), GFP_NOFS);
+       if (res == NULL) {
+               ret = -ENOMEM;
+               mlog_errno(ret);
+               goto out;
+       }
+
+       ocfs2_init_inode_ac_group(dir, parent_fe_bh, ac);
+
+       /*
+        * The handle started here is for chain relink. Alternatively,
+        * we could just disable relink for these calls.
+        */
+       handle = ocfs2_start_trans(OCFS2_SB(dir->i_sb), OCFS2_SUBALLOC_ALLOC);
+       if (IS_ERR(handle)) {
+               ret = PTR_ERR(handle);
+               handle = NULL;
+               mlog_errno(ret);
+               goto out;
+       }
+
+       /*
+        * This will instruct ocfs2_claim_suballoc_bits and
+        * ocfs2_search_one_group to search but save actual allocation
+        * for later.
+        */
+       ac->ac_find_loc_only = 1;
+
+       ret = ocfs2_claim_suballoc_bits(ac, handle, 1, 1, res);
+       if (ret < 0) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       ac->ac_find_loc_priv = res;
+       *fe_blkno = res->sr_blkno;
+
+out:
+       if (handle)
+               ocfs2_commit_trans(OCFS2_SB(dir->i_sb), handle);
+
+       if (ret)
+               kfree(res);
+
+       return ret;
+}
+
+int ocfs2_claim_new_inode_at_loc(handle_t *handle,
+                                struct inode *dir,
+                                struct ocfs2_alloc_context *ac,
+                                u64 *suballoc_loc,
+                                u16 *suballoc_bit,
+                                u64 di_blkno)
+{
+       int ret;
+       u16 chain;
+       struct ocfs2_suballoc_result *res = ac->ac_find_loc_priv;
+       struct buffer_head *bg_bh = NULL;
+       struct ocfs2_group_desc *bg;
+       struct ocfs2_dinode *di = (struct ocfs2_dinode *) ac->ac_bh->b_data;
+
+       /*
+        * Since di_blkno is being passed back in, we check for any
+        * inconsistencies which may have happened between
+        * calls. These are code bugs as di_blkno is not expected to
+        * change once returned from ocfs2_find_new_inode_loc()
+        */
+       BUG_ON(res->sr_blkno != di_blkno);
+
+       ret = ocfs2_read_group_descriptor(ac->ac_inode, di,
+                                         res->sr_bg_stable_blkno, &bg_bh);
+       if (ret) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       bg = (struct ocfs2_group_desc *) bg_bh->b_data;
+       chain = le16_to_cpu(bg->bg_chain);
+
+       ret = ocfs2_alloc_dinode_update_counts(ac->ac_inode, handle,
+                                              ac->ac_bh, res->sr_bits,
+                                              chain);
+       if (ret) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       ret = ocfs2_block_group_set_bits(handle,
+                                        ac->ac_inode,
+                                        bg,
+                                        bg_bh,
+                                        res->sr_bit_offset,
+                                        res->sr_bits);
+       if (ret < 0) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits,
+            (unsigned long long)di_blkno);
+
+       atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
+
+       BUG_ON(res->sr_bits != 1);
+
+       *suballoc_loc = res->sr_bg_blkno;
+       *suballoc_bit = res->sr_bit_offset;
+       ac->ac_bits_given++;
+       ocfs2_save_inode_ac_group(dir, ac);
+
+out:
+       brelse(bg_bh);
+
+       return ret;
+}
+
 int ocfs2_claim_new_inode(handle_t *handle,
                          struct inode *dir,
                          struct buffer_head *parent_fe_bh,
@@ -2567,7 +2733,8 @@ out:
  * suballoc_bit.
  */
 static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno,
-                                      u16 *suballoc_slot, u16 *suballoc_bit)
+                                      u16 *suballoc_slot, u64 *group_blkno,
+                                      u16 *suballoc_bit)
 {
        int status;
        struct buffer_head *inode_bh = NULL;
@@ -2604,6 +2771,8 @@ static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno,
                *suballoc_slot = le16_to_cpu(inode_fe->i_suballoc_slot);
        if (suballoc_bit)
                *suballoc_bit = le16_to_cpu(inode_fe->i_suballoc_bit);
+       if (group_blkno)
+               *group_blkno = le64_to_cpu(inode_fe->i_suballoc_loc);
 
 bail:
        brelse(inode_bh);
@@ -2621,7 +2790,8 @@ bail:
  */
 static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb,
                                   struct inode *suballoc,
-                                  struct buffer_head *alloc_bh, u64 blkno,
+                                  struct buffer_head *alloc_bh,
+                                  u64 group_blkno, u64 blkno,
                                   u16 bit, int *res)
 {
        struct ocfs2_dinode *alloc_di;
@@ -2642,10 +2812,8 @@ static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb,
                goto bail;
        }
 
-       if (alloc_di->i_suballoc_loc)
-               bg_blkno = le64_to_cpu(alloc_di->i_suballoc_loc);
-       else
-               bg_blkno = ocfs2_which_suballoc_group(blkno, bit);
+       bg_blkno = group_blkno ? group_blkno :
+                  ocfs2_which_suballoc_group(blkno, bit);
        status = ocfs2_read_group_descriptor(suballoc, alloc_di, bg_blkno,
                                             &group_bh);
        if (status < 0) {
@@ -2680,6 +2848,7 @@ bail:
 int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
 {
        int status;
+       u64 group_blkno = 0;
        u16 suballoc_bit = 0, suballoc_slot = 0;
        struct inode *inode_alloc_inode;
        struct buffer_head *alloc_bh = NULL;
@@ -2687,7 +2856,7 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
        mlog_entry("blkno: %llu", (unsigned long long)blkno);
 
        status = ocfs2_get_suballoc_slot_bit(osb, blkno, &suballoc_slot,
-                                            &suballoc_bit);
+                                            &group_blkno, &suballoc_bit);
        if (status < 0) {
                mlog(ML_ERROR, "get alloc slot and bit failed %d\n", status);
                goto bail;
@@ -2715,7 +2884,7 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
        }
 
        status = ocfs2_test_suballoc_bit(osb, inode_alloc_inode, alloc_bh,
-                                        blkno, suballoc_bit, res);
+                                        group_blkno, blkno, suballoc_bit, res);
        if (status < 0)
                mlog(ML_ERROR, "test suballoc bit failed %d\n", status);
 
index a017dd3ee7d9ce2d6c0429d090b57ac077ed585a..b8afabfeede4c43694bdb8bf0a9664b0befd24a6 100644 (file)
@@ -56,6 +56,9 @@ struct ocfs2_alloc_context {
        u64    ac_max_block;  /* Highest block number to allocate. 0 is
                                 is the same as ~0 - unlimited */
 
+       int    ac_find_loc_only;  /* hack for reflink operation ordering */
+       struct ocfs2_suballoc_result *ac_find_loc_priv; /* */
+
        struct ocfs2_alloc_reservation  *ac_resv;
 };
 
@@ -197,4 +200,22 @@ int ocfs2_lock_allocators(struct inode *inode, struct ocfs2_extent_tree *et,
                          struct ocfs2_alloc_context **meta_ac);
 
 int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res);
+
+
+
+/*
+ * The following two interfaces are for ocfs2_create_inode_in_orphan().
+ */
+int ocfs2_find_new_inode_loc(struct inode *dir,
+                            struct buffer_head *parent_fe_bh,
+                            struct ocfs2_alloc_context *ac,
+                            u64 *fe_blkno);
+
+int ocfs2_claim_new_inode_at_loc(handle_t *handle,
+                                struct inode *dir,
+                                struct ocfs2_alloc_context *ac,
+                                u64 *suballoc_loc,
+                                u16 *suballoc_bit,
+                                u64 di_blkno);
+
 #endif /* _CHAINALLOC_H_ */
index 32499d213fc4f80f37efde6f71196283236896bf..9975457c981f904ca18a512dd7bcbe0f092ac664 100644 (file)
@@ -128,7 +128,7 @@ static void *ocfs2_fast_follow_link(struct dentry *dentry,
        }
 
        /* Fast symlinks can't be large */
-       len = strlen(target);
+       len = strnlen(target, ocfs2_fast_symlink_chars(inode->i_sb));
        link = kzalloc(len + 1, GFP_NOFS);
        if (!link) {
                status = -ENOMEM;
index d03469f618012ea4aff64b62f1f77e3e8b8ec47d..06fa5e77c40ef7aa0a3c0483ddbf3b9dd59f9dcf 100644 (file)
@@ -1286,13 +1286,11 @@ int ocfs2_xattr_get_nolock(struct inode *inode,
        xis.inode_bh = xbs.inode_bh = di_bh;
        di = (struct ocfs2_dinode *)di_bh->b_data;
 
-       down_read(&oi->ip_xattr_sem);
        ret = ocfs2_xattr_ibody_get(inode, name_index, name, buffer,
                                    buffer_size, &xis);
        if (ret == -ENODATA && di->i_xattr_loc)
                ret = ocfs2_xattr_block_get(inode, name_index, name, buffer,
                                            buffer_size, &xbs);
-       up_read(&oi->ip_xattr_sem);
 
        return ret;
 }
@@ -1316,8 +1314,10 @@ static int ocfs2_xattr_get(struct inode *inode,
                mlog_errno(ret);
                return ret;
        }
+       down_read(&OCFS2_I(inode)->ip_xattr_sem);
        ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index,
                                     name, buffer, buffer_size);
+       up_read(&OCFS2_I(inode)->ip_xattr_sem);
 
        ocfs2_inode_unlock(inode, 0);
 
index a1c43e7c8a7be4ce70c729f3338eae09097f4025..8e4addaa542458badbe0e62da644ec1c64194f99 100644 (file)
@@ -2675,7 +2675,7 @@ static const struct pid_entry tgid_base_stuff[] = {
        INF("auxv",       S_IRUSR, proc_pid_auxv),
        ONE("status",     S_IRUGO, proc_pid_status),
        ONE("personality", S_IRUSR, proc_pid_personality),
-       INF("limits",     S_IRUSR, proc_pid_limits),
+       INF("limits",     S_IRUGO, proc_pid_limits),
 #ifdef CONFIG_SCHED_DEBUG
        REG("sched",      S_IRUGO|S_IWUSR, proc_pid_sched_operations),
 #endif
@@ -3011,7 +3011,7 @@ static const struct pid_entry tid_base_stuff[] = {
        INF("auxv",      S_IRUSR, proc_pid_auxv),
        ONE("status",    S_IRUGO, proc_pid_status),
        ONE("personality", S_IRUSR, proc_pid_personality),
-       INF("limits",    S_IRUSR, proc_pid_limits),
+       INF("limits",    S_IRUGO, proc_pid_limits),
 #ifdef CONFIG_SCHED_DEBUG
        REG("sched",     S_IRUGO|S_IWUSR, proc_pid_sched_operations),
 #endif
index 180cf5a0bd67119218c170b265cf944c103b889b..3b8b456603318f1ff017056cdeda40129d559ab4 100644 (file)
@@ -146,7 +146,7 @@ u64 stable_page_flags(struct page *page)
        u |= kpf_copy_bit(k, KPF_HWPOISON,      PG_hwpoison);
 #endif
 
-#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
+#ifdef CONFIG_ARCH_USES_PG_UNCACHED
        u |= kpf_copy_bit(k, KPF_UNCACHED,      PG_uncached);
 #endif
 
index 439fc1f1c1c41487ad76d23523d995a9f416b926..1dbca4e8cc164d08276d4a1f5a875e7726fc7020 100644 (file)
@@ -224,7 +224,8 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
        /* We don't show the stack guard page in /proc/maps */
        start = vma->vm_start;
        if (vma->vm_flags & VM_GROWSDOWN)
-               start += PAGE_SIZE;
+               if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
+                       start += PAGE_SIZE;
 
        seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
                        start,
@@ -362,13 +363,13 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
                        mss->referenced += PAGE_SIZE;
                mapcount = page_mapcount(page);
                if (mapcount >= 2) {
-                       if (pte_dirty(ptent))
+                       if (pte_dirty(ptent) || PageDirty(page))
                                mss->shared_dirty += PAGE_SIZE;
                        else
                                mss->shared_clean += PAGE_SIZE;
                        mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
                } else {
-                       if (pte_dirty(ptent))
+                       if (pte_dirty(ptent) || PageDirty(page))
                                mss->private_dirty += PAGE_SIZE;
                        else
                                mss->private_clean += PAGE_SIZE;
index 91c817ff02c3847a874e1e99f7638b1148869a77..2367fb3f70bc6ba468ab0ccb628b5b88fabac955 100644 (file)
@@ -163,7 +163,7 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
 
 static const struct file_operations proc_vmcore_operations = {
        .read           = read_vmcore,
-       .llseek         = generic_file_llseek,
+       .llseek         = default_llseek,
 };
 
 static struct vmcore* __init get_new_element(void)
index f53505de071217399e39bf2013304ba46bd0c7f5..5cbb81e134aca031b21e3c88661400ff1c1b174f 100644 (file)
@@ -170,6 +170,7 @@ int reiserfs_prepare_write(struct file *f, struct page *page,
 int reiserfs_unpack(struct inode *inode, struct file *filp)
 {
        int retval = 0;
+       int depth;
        int index;
        struct page *page;
        struct address_space *mapping;
@@ -188,8 +189,8 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
        /* we need to make sure nobody is changing the file size beneath
         ** us
         */
-       mutex_lock(&inode->i_mutex);
-       reiserfs_write_lock(inode->i_sb);
+       reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb);
+       depth = reiserfs_write_lock_once(inode->i_sb);
 
        write_from = inode->i_size & (blocksize - 1);
        /* if we are on a block boundary, we are already unpacked.  */
@@ -224,6 +225,6 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
 
       out:
        mutex_unlock(&inode->i_mutex);
-       reiserfs_write_unlock(inode->i_sb);
+       reiserfs_write_unlock_once(inode->i_sb, depth);
        return retval;
 }
index d72cf2bb054a54bd8fcfbd7dc75f582ad2d29591..286e36e21dae587672216d7b7722488e91b45758 100644 (file)
@@ -1932,7 +1932,8 @@ xfs_buf_init(void)
        if (!xfs_buf_zone)
                goto out;
 
-       xfslogd_workqueue = create_workqueue("xfslogd");
+       xfslogd_workqueue = alloc_workqueue("xfslogd",
+                                       WQ_RESCUER | WQ_HIGHPRI, 1);
        if (!xfslogd_workqueue)
                goto out_free_buf_zone;
 
index 4fec427b83efc9dbf42ae9cda3aa64c5c71cdbec..3b9e626f7cd1562877cc340d0c43c5a776dd2e35 100644 (file)
@@ -785,6 +785,8 @@ xfs_ioc_fsgetxattr(
 {
        struct fsxattr          fa;
 
+       memset(&fa, 0, sizeof(struct fsxattr));
+
        xfs_ilock(ip, XFS_ILOCK_SHARED);
        fa.fsx_xflags = xfs_ip2xflags(ip);
        fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog;
index d59c4a65d492c9b6b0713accaec1ab1c2ba7ea5f..81976ffed7d6f031f1bef0d7a5995cbbebbed69b 100644 (file)
@@ -668,14 +668,11 @@ xfs_inode_set_reclaim_tag(
        xfs_perag_put(pag);
 }
 
-void
-__xfs_inode_clear_reclaim_tag(
-       xfs_mount_t     *mp,
+STATIC void
+__xfs_inode_clear_reclaim(
        xfs_perag_t     *pag,
        xfs_inode_t     *ip)
 {
-       radix_tree_tag_clear(&pag->pag_ici_root,
-                       XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
        pag->pag_ici_reclaimable--;
        if (!pag->pag_ici_reclaimable) {
                /* clear the reclaim tag from the perag radix tree */
@@ -689,6 +686,17 @@ __xfs_inode_clear_reclaim_tag(
        }
 }
 
+void
+__xfs_inode_clear_reclaim_tag(
+       xfs_mount_t     *mp,
+       xfs_perag_t     *pag,
+       xfs_inode_t     *ip)
+{
+       radix_tree_tag_clear(&pag->pag_ici_root,
+                       XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
+       __xfs_inode_clear_reclaim(pag, ip);
+}
+
 /*
  * Inodes in different states need to be treated differently, and the return
  * value of xfs_iflush is not sufficient to get this right. The following table
@@ -838,6 +846,7 @@ reclaim:
        if (!radix_tree_delete(&pag->pag_ici_root,
                                XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
                ASSERT(0);
+       __xfs_inode_clear_reclaim(pag, ip);
        write_unlock(&pag->pag_ici_lock);
 
        /*
index ed575fb4b49597806200f676680ff787d5be9d12..7e206fc1fa362ed4bb761a8f5ad11ef39c2dd5c0 100644 (file)
@@ -405,9 +405,15 @@ xlog_cil_push(
        new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS);
        new_ctx->ticket = xlog_cil_ticket_alloc(log);
 
-       /* lock out transaction commit, but don't block on background push */
+       /*
+        * Lock out transaction commit, but don't block for background pushes
+        * unless we are well over the CIL space limit. See the definition of
+        * XLOG_CIL_HARD_SPACE_LIMIT() for the full explanation of the logic
+        * used here.
+        */
        if (!down_write_trylock(&cil->xc_ctx_lock)) {
-               if (!push_seq)
+               if (!push_seq &&
+                   cil->xc_ctx->space_used < XLOG_CIL_HARD_SPACE_LIMIT(log))
                        goto out_free_ticket;
                down_write(&cil->xc_ctx_lock);
        }
@@ -422,7 +428,7 @@ xlog_cil_push(
                goto out_skip;
 
        /* check for a previously pushed seqeunce */
-       if (push_seq < cil->xc_ctx->sequence)
+       if (push_seq && push_seq < cil->xc_ctx->sequence)
                goto out_skip;
 
        /*
index ced52b98b322e3eb1be0e0c7dfc70f6096d0cd80..edcdfe01617f673bc1047caca3acc243f66a0658 100644 (file)
@@ -426,13 +426,13 @@ struct xfs_cil {
 };
 
 /*
- * The amount of log space we should the CIL to aggregate is difficult to size.
- * Whatever we chose we have to make we can get a reservation for the log space
- * effectively, that it is large enough to capture sufficient relogging to
- * reduce log buffer IO significantly, but it is not too large for the log or
- * induces too much latency when writing out through the iclogs. We track both
- * space consumed and the number of vectors in the checkpoint context, so we
- * need to decide which to use for limiting.
+ * The amount of log space we allow the CIL to aggregate is difficult to size.
+ * Whatever we choose, we have to make sure we can get a reservation for the
+ * log space effectively, that it is large enough to capture sufficient
+ * relogging to reduce log buffer IO significantly, but it is not too large for
+ * the log or induces too much latency when writing out through the iclogs. We
+ * track both space consumed and the number of vectors in the checkpoint
+ * context, so we need to decide which to use for limiting.
  *
  * Every log buffer we write out during a push needs a header reserved, which
  * is at least one sector and more for v2 logs. Hence we need a reservation of
@@ -459,16 +459,21 @@ struct xfs_cil {
  * checkpoint transaction ticket is specific to the checkpoint context, rather
  * than the CIL itself.
  *
- * With dynamic reservations, we can basically make up arbitrary limits for the
- * checkpoint size so long as they don't violate any other size rules.  Hence
- * the initial maximum size for the checkpoint transaction will be set to a
- * quarter of the log or 8MB, which ever is smaller. 8MB is an arbitrary limit
- * right now based on the latency of writing out a large amount of data through
- * the circular iclog buffers.
+ * With dynamic reservations, we can effectively make up arbitrary limits for
+ * the checkpoint size so long as they don't violate any other size rules.
+ * Recovery imposes a rule that no transaction exceed half the log, so we are
+ * limited by that.  Furthermore, the log transaction reservation subsystem
+ * tries to keep 25% of the log free, so we need to keep below that limit or we
+ * risk running out of free log space to start any new transactions.
+ *
+ * In order to keep background CIL push efficient, we will set a lower
+ * threshold at which background pushing is attempted without blocking current
+ * transaction commits.  A separate, higher bound defines when CIL pushes are
+ * enforced to ensure we stay within our maximum checkpoint size bounds.
+ * threshold, yet give us plenty of space for aggregation on large logs.
  */
-
-#define XLOG_CIL_SPACE_LIMIT(log)      \
-       (min((log->l_logsize >> 2), (8 * 1024 * 1024)))
+#define XLOG_CIL_SPACE_LIMIT(log)      (log->l_logsize >> 3)
+#define XLOG_CIL_HARD_SPACE_LIMIT(log) (3 * (log->l_logsize >> 4))
 
 /*
  * The reservation head lsn is not made up of a cycle number and block number.
index c0786d446a00b88adf144ab97f05af06679cccf7..984cdc62e30bc52da4cef907f3dd5094aa5ffd71 100644 (file)
@@ -55,7 +55,7 @@
 extern u8 acpi_gbl_permanent_mmap;
 
 /*
- * Globals that are publically available, allowing for
+ * Globals that are publicly available, allowing for
  * run time configuration
  */
 extern u32 acpi_dbg_level;
index c7376bf80b0604bf8f8b9394989178d6de45ecc2..8ca18e26d7e39fe429a8179d48f2f9f17f58a589 100644 (file)
  * While the GPIO programming interface defines valid GPIO numbers
  * to be in the range 0..MAX_INT, this library restricts them to the
  * smaller range 0..ARCH_NR_GPIOS-1.
+ *
+ * ARCH_NR_GPIOS is somewhat arbitrary; it usually reflects the sum of
+ * builtin/SoC GPIOs plus a number of GPIOs on expanders; the latter is
+ * actually an estimate of a board-specific value.
  */
 
 #ifndef ARCH_NR_GPIOS
 #define ARCH_NR_GPIOS          256
 #endif
 
+/*
+ * "valid" GPIO numbers are nonnegative and may be passed to
+ * setup routines like gpio_request().  only some valid numbers
+ * can successfully be requested and used.
+ *
+ * Invalid GPIO numbers are useful for indicating no-such-GPIO in
+ * platform data and other tables.
+ */
+
 static inline int gpio_is_valid(int number)
 {
-       /* only some non-negative numbers are valid */
        return ((unsigned)number) < ARCH_NR_GPIOS;
 }
 
index 62f59080e5cc215edb843f928763d9898055071f..04d0a977cd431fc5eb2c2f34cbd5390bfeb0db05 100644 (file)
@@ -3,13 +3,13 @@
 
 #include <linux/cache.h>
 #include <linux/threads.h>
-#include <linux/irq.h>
 
 typedef struct {
        unsigned int __softirq_pending;
 } ____cacheline_aligned irq_cpustat_t;
 
 #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
+#include <linux/irq.h>
 
 #ifndef ack_bad_irq
 static inline void ack_bad_irq(unsigned int irq)
index e2bd73e8f9c0b4db75755d1e4f0aadaaa0e20c2a..f4d4120e5128a8fe76580286151017ddb68c497b 100644 (file)
@@ -129,6 +129,10 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
 #define move_pte(pte, prot, old_addr, new_addr)        (pte)
 #endif
 
+#ifndef flush_tlb_fix_spurious_fault
+#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
+#endif
+
 #ifndef pgprot_noncached
 #define pgprot_noncached(prot) (prot)
 #endif
index 8a92a170fb7dfd87710bb26b3c7f384dd6801363..f4229fb315e1d7d81caddb7557cbcb6c65072fed 100644 (file)
                                                                        \
        BUG_TABLE                                                       \
                                                                        \
+       JUMP_TABLE                                                      \
+                                                                       \
        /* PCI quirks */                                                \
        .pci_fixup        : AT(ADDR(.pci_fixup) - LOAD_OFFSET) {        \
                VMLINUX_SYMBOL(__start_pci_fixups_early) = .;           \
 #define BUG_TABLE
 #endif
 
+#define JUMP_TABLE                                                     \
+       . = ALIGN(8);                                                   \
+       __jump_table : AT(ADDR(__jump_table) - LOAD_OFFSET) {           \
+               VMLINUX_SYMBOL(__start___jump_table) = .;               \
+               *(__jump_table)                                         \
+               VMLINUX_SYMBOL(__stop___jump_table) = .;                \
+       }
+
 #ifdef CONFIG_PM_TRACE
 #define TRACEDATA                                                      \
        . = ALIGN(4);                                                   \
                                - LOAD_OFFSET) {                        \
                VMLINUX_SYMBOL(__per_cpu_start) = .;                    \
                *(.data..percpu..first)                                 \
+               . = ALIGN(PAGE_SIZE);                                   \
                *(.data..percpu..page_aligned)                          \
+               *(.data..percpu..readmostly)                            \
                *(.data..percpu)                                        \
                *(.data..percpu..shared_aligned)                        \
                VMLINUX_SYMBOL(__per_cpu_end) = .;                      \
                VMLINUX_SYMBOL(__per_cpu_load) = .;                     \
                VMLINUX_SYMBOL(__per_cpu_start) = .;                    \
                *(.data..percpu..first)                                 \
+               . = ALIGN(PAGE_SIZE);                                   \
                *(.data..percpu..page_aligned)                          \
+               *(.data..percpu..readmostly)                            \
                *(.data..percpu)                                        \
                *(.data..percpu..shared_aligned)                        \
                VMLINUX_SYMBOL(__per_cpu_end) = .;                      \
index 7809d230adee3f90c9537f6c00ec66bb53c1bd27..4c9461a4f9e67b4b3e67c5bb73192aed44695079 100644 (file)
@@ -612,7 +612,7 @@ struct drm_gem_object {
        struct kref refcount;
 
        /** Handle count of this object. Each handle also holds a reference */
-       struct kref handlecount;
+       atomic_t handle_count; /* number of handles on this object */
 
        /** Related drm device */
        struct drm_device *dev;
@@ -808,7 +808,6 @@ struct drm_driver {
         */
        int (*gem_init_object) (struct drm_gem_object *obj);
        void (*gem_free_object) (struct drm_gem_object *obj);
-       void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
 
        /* vga arb irq handler */
        void (*vgaarb_irq)(struct drm_device *dev, bool state);
@@ -1175,6 +1174,7 @@ extern int drm_release(struct inode *inode, struct file *filp);
 extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
 extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma);
 extern void drm_vm_open_locked(struct vm_area_struct *vma);
+extern void drm_vm_close_locked(struct vm_area_struct *vma);
 extern resource_size_t drm_core_get_map_ofs(struct drm_local_map * map);
 extern resource_size_t drm_core_get_reg_ofs(struct drm_device *dev);
 extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
@@ -1455,12 +1455,11 @@ int drm_gem_init(struct drm_device *dev);
 void drm_gem_destroy(struct drm_device *dev);
 void drm_gem_object_release(struct drm_gem_object *obj);
 void drm_gem_object_free(struct kref *kref);
-void drm_gem_object_free_unlocked(struct kref *kref);
 struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
                                            size_t size);
 int drm_gem_object_init(struct drm_device *dev,
                        struct drm_gem_object *obj, size_t size);
-void drm_gem_object_handle_free(struct kref *kref);
+void drm_gem_object_handle_free(struct drm_gem_object *obj);
 void drm_gem_vm_open(struct vm_area_struct *vma);
 void drm_gem_vm_close(struct vm_area_struct *vma);
 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
@@ -1483,8 +1482,12 @@ drm_gem_object_unreference(struct drm_gem_object *obj)
 static inline void
 drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
 {
-       if (obj != NULL)
-               kref_put(&obj->refcount, drm_gem_object_free_unlocked);
+       if (obj != NULL) {
+               struct drm_device *dev = obj->dev;
+               mutex_lock(&dev->struct_mutex);
+               kref_put(&obj->refcount, drm_gem_object_free);
+               mutex_unlock(&dev->struct_mutex);
+       }
 }
 
 int drm_gem_handle_create(struct drm_file *file_priv,
@@ -1495,7 +1498,7 @@ static inline void
 drm_gem_object_handle_reference(struct drm_gem_object *obj)
 {
        drm_gem_object_reference(obj);
-       kref_get(&obj->handlecount);
+       atomic_inc(&obj->handle_count);
 }
 
 static inline void
@@ -1504,12 +1507,15 @@ drm_gem_object_handle_unreference(struct drm_gem_object *obj)
        if (obj == NULL)
                return;
 
+       if (atomic_read(&obj->handle_count) == 0)
+               return;
        /*
         * Must bump handle count first as this may be the last
         * ref, in which case the object would disappear before we
         * checked for a name
         */
-       kref_put(&obj->handlecount, drm_gem_object_handle_free);
+       if (atomic_dec_and_test(&obj->handle_count))
+               drm_gem_object_handle_free(obj);
        drm_gem_object_unreference(obj);
 }
 
@@ -1519,12 +1525,17 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
        if (obj == NULL)
                return;
 
+       if (atomic_read(&obj->handle_count) == 0)
+               return;
+
        /*
        * Must bump handle count first as this may be the last
        * ref, in which case the object would disappear before we
        * checked for a name
        */
-       kref_put(&obj->handlecount, drm_gem_object_handle_free);
+
+       if (atomic_dec_and_test(&obj->handle_count))
+               drm_gem_object_handle_free(obj);
        drm_gem_object_unreference_unlocked(obj);
 }
 
index c9f3cc5949a82eb0ae5ba4b463891df5f96806dd..3e5a51af757c76ba07f514d00265a5e7300b6627 100644 (file)
@@ -386,7 +386,15 @@ struct drm_connector_funcs {
        void (*dpms)(struct drm_connector *connector, int mode);
        void (*save)(struct drm_connector *connector);
        void (*restore)(struct drm_connector *connector);
-       enum drm_connector_status (*detect)(struct drm_connector *connector);
+
+       /* Check to see if anything is attached to the connector.
+        * @force is set to false whilst polling, true when checking the
+        * connector due to user request. @force can be used by the driver
+        * to avoid expensive, destructive operations during automated
+        * probing.
+        */
+       enum drm_connector_status (*detect)(struct drm_connector *connector,
+                                           bool force);
        int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height);
        int (*set_property)(struct drm_connector *connector, struct drm_property *property,
                             uint64_t val);
index 3a9940ef728bb5d2412c4cb54d84ed746e15d870..883c1d4398996d8ba807ca5d54940cec28c7809e 100644 (file)
@@ -85,7 +85,6 @@
        {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
        {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
        {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
-       {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \
        {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
        {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
index 267a86c74e2e5cc089dfb5c72a1f1da3dbdae721..2040e6c4f1729a7de3001d5077277b8521427fb9 100644 (file)
@@ -246,9 +246,11 @@ struct ttm_buffer_object {
 
        atomic_t reserved;
 
-
        /**
         * Members protected by the bo::lock
+        * In addition, setting sync_obj to anything else
+        * than NULL requires bo::reserved to be held. This allows for
+        * checking NULL while reserved but not holding bo::lock.
         */
 
        void *sync_obj_arg;
index 626b629429ff2fc3f30fcf63b25fb52211f184c0..4e8ea8c8ec1e7f6fc04ebf992294b63cb23b1bbb 100644 (file)
@@ -118,7 +118,6 @@ header-y += eventpoll.h
 header-y += ext2_fs.h
 header-y += fadvise.h
 header-y += falloc.h
-header-y += fanotify.h
 header-y += fb.h
 header-y += fcntl.h
 header-y += fd.h
index 7e3d2859be50230b9b2755288a53d8fb693145b0..1d0ef1ae80362d50f1b620b54c01313d6c46b936 100644 (file)
@@ -25,8 +25,6 @@ static inline u32 acpi_pm_read_early(void)
        return acpi_pm_read_verified() & ACPI_PM_MASK;
 }
 
-extern void pmtimer_wait(unsigned);
-
 #else
 
 static inline u32 acpi_pm_read_early(void)
similarity index 97%
rename from fs/ceph/auth.h
rename to include/linux/ceph/auth.h
index d38a2fb4a13762fc6e37578f840b166ee85efd16..7fff521d7eb5e00741fcf2fc97f7ce8c40fdbfc8 100644 (file)
@@ -1,8 +1,8 @@
 #ifndef _FS_CEPH_AUTH_H
 #define _FS_CEPH_AUTH_H
 
-#include "types.h"
-#include "buffer.h"
+#include <linux/ceph/types.h>
+#include <linux/ceph/buffer.h>
 
 /*
  * Abstract interface for communicating with the authenticate module.
similarity index 100%
rename from fs/ceph/buffer.h
rename to include/linux/ceph/buffer.h
similarity index 86%
rename from fs/ceph/ceph_debug.h
rename to include/linux/ceph/ceph_debug.h
index 1818c2305610e28f6dbb7d3e126e7137d28c1365..aa2e19182d990eedda70de69bf638db5856811d1 100644 (file)
@@ -3,7 +3,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-#ifdef CONFIG_CEPH_FS_PRETTYDEBUG
+#ifdef CONFIG_CEPH_LIB_PRETTYDEBUG
 
 /*
  * wrap pr_debug to include a filename:lineno prefix on each line.
@@ -14,7 +14,8 @@
 # if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
 extern const char *ceph_file_part(const char *s, int len);
 #  define dout(fmt, ...)                                               \
-       pr_debug(" %12.12s:%-4d : " fmt,                                \
+       pr_debug("%.*s %12.12s:%-4d : " fmt,                            \
+                8 - (int)sizeof(KBUILD_MODNAME), "    ",               \
                 ceph_file_part(__FILE__, sizeof(__FILE__)),            \
                 __LINE__, ##__VA_ARGS__)
 # else
similarity index 99%
rename from fs/ceph/ceph_fs.h
rename to include/linux/ceph/ceph_fs.h
index d5619ac86711a6b41c56a00da7660099352d5327..c3c74aef289d0d34afb2758392ef144d82b59cc6 100644 (file)
@@ -299,6 +299,7 @@ enum {
        CEPH_MDS_OP_SETATTR    = 0x01108,
        CEPH_MDS_OP_SETFILELOCK= 0x01109,
        CEPH_MDS_OP_GETFILELOCK= 0x00110,
+       CEPH_MDS_OP_SETDIRLAYOUT=0x0110a,
 
        CEPH_MDS_OP_MKNOD      = 0x01201,
        CEPH_MDS_OP_LINK       = 0x01202,
diff --git a/include/linux/ceph/debugfs.h b/include/linux/ceph/debugfs.h
new file mode 100644 (file)
index 0000000..2a79702
--- /dev/null
@@ -0,0 +1,33 @@
+#ifndef _FS_CEPH_DEBUGFS_H
+#define _FS_CEPH_DEBUGFS_H
+
+#include "ceph_debug.h"
+#include "types.h"
+
+#define CEPH_DEFINE_SHOW_FUNC(name)                                    \
+static int name##_open(struct inode *inode, struct file *file)         \
+{                                                                      \
+       struct seq_file *sf;                                            \
+       int ret;                                                        \
+                                                                       \
+       ret = single_open(file, name, NULL);                            \
+       sf = file->private_data;                                        \
+       sf->private = inode->i_private;                                 \
+       return ret;                                                     \
+}                                                                      \
+                                                                       \
+static const struct file_operations name##_fops = {                    \
+       .open           = name##_open,                                  \
+       .read           = seq_read,                                     \
+       .llseek         = seq_lseek,                                    \
+       .release        = single_release,                               \
+};
+
+/* debugfs.c */
+extern int ceph_debugfs_init(void);
+extern void ceph_debugfs_cleanup(void);
+extern int ceph_debugfs_client_init(struct ceph_client *client);
+extern void ceph_debugfs_client_cleanup(struct ceph_client *client);
+
+#endif
+
similarity index 96%
rename from fs/ceph/decode.h
rename to include/linux/ceph/decode.h
index 3d25415afe6368debff12c405463e52c7683edc6..c5b6939fb32af578501a280f73f2aa7b4276ac37 100644 (file)
@@ -191,6 +191,11 @@ static inline void ceph_encode_string(void **p, void *end,
                ceph_encode_need(p, end, n, bad);               \
                ceph_encode_copy(p, pv, n);                     \
        } while (0)
+#define ceph_encode_string_safe(p, end, s, n, bad)             \
+       do {                                                    \
+               ceph_encode_need(p, end, n, bad);               \
+               ceph_encode_string(p, end, s, n);               \
+       } while (0)
 
 
 #endif
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
new file mode 100644 (file)
index 0000000..f22b2e9
--- /dev/null
@@ -0,0 +1,249 @@
+#ifndef _FS_CEPH_LIBCEPH_H
+#define _FS_CEPH_LIBCEPH_H
+
+#include "ceph_debug.h"
+
+#include <asm/unaligned.h>
+#include <linux/backing-dev.h>
+#include <linux/completion.h>
+#include <linux/exportfs.h>
+#include <linux/fs.h>
+#include <linux/mempool.h>
+#include <linux/pagemap.h>
+#include <linux/wait.h>
+#include <linux/writeback.h>
+#include <linux/slab.h>
+
+#include "types.h"
+#include "messenger.h"
+#include "msgpool.h"
+#include "mon_client.h"
+#include "osd_client.h"
+#include "ceph_fs.h"
+
+/*
+ * Supported features
+ */
+#define CEPH_FEATURE_SUPPORTED_DEFAULT CEPH_FEATURE_NOSRCADDR
+#define CEPH_FEATURE_REQUIRED_DEFAULT  CEPH_FEATURE_NOSRCADDR
+
+/*
+ * mount options
+ */
+#define CEPH_OPT_FSID             (1<<0)
+#define CEPH_OPT_NOSHARE          (1<<1) /* don't share client with other sbs */
+#define CEPH_OPT_MYIP             (1<<2) /* specified my ip */
+#define CEPH_OPT_NOCRC            (1<<3) /* no data crc on writes */
+
+#define CEPH_OPT_DEFAULT   (0);
+
+#define ceph_set_opt(client, opt) \
+       (client)->options->flags |= CEPH_OPT_##opt;
+#define ceph_test_opt(client, opt) \
+       (!!((client)->options->flags & CEPH_OPT_##opt))
+
+struct ceph_options {
+       int flags;
+       struct ceph_fsid fsid;
+       struct ceph_entity_addr my_addr;
+       int mount_timeout;
+       int osd_idle_ttl;
+       int osd_timeout;
+       int osd_keepalive_timeout;
+
+       /*
+        * any type that can't be simply compared or doesn't need need
+        * to be compared should go beyond this point,
+        * ceph_compare_options() should be updated accordingly
+        */
+
+       struct ceph_entity_addr *mon_addr; /* should be the first
+                                             pointer type of args */
+       int num_mon;
+       char *name;
+       char *secret;
+};
+
+/*
+ * defaults
+ */
+#define CEPH_MOUNT_TIMEOUT_DEFAULT  60
+#define CEPH_OSD_TIMEOUT_DEFAULT    60  /* seconds */
+#define CEPH_OSD_KEEPALIVE_DEFAULT  5
+#define CEPH_OSD_IDLE_TTL_DEFAULT    60
+#define CEPH_MOUNT_RSIZE_DEFAULT    (512*1024) /* readahead */
+
+#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
+#define CEPH_MSG_MAX_DATA_LEN  (16*1024*1024)
+
+#define CEPH_AUTH_NAME_DEFAULT   "guest"
+
+/*
+ * Delay telling the MDS we no longer want caps, in case we reopen
+ * the file.  Delay a minimum amount of time, even if we send a cap
+ * message for some other reason.  Otherwise, take the oppotunity to
+ * update the mds to avoid sending another message later.
+ */
+#define CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT      5  /* cap release delay */
+#define CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT     60  /* cap release delay */
+
+#define CEPH_CAP_RELEASE_SAFETY_DEFAULT        (CEPH_CAPS_PER_RELEASE * 4)
+
+/* mount state */
+enum {
+       CEPH_MOUNT_MOUNTING,
+       CEPH_MOUNT_MOUNTED,
+       CEPH_MOUNT_UNMOUNTING,
+       CEPH_MOUNT_UNMOUNTED,
+       CEPH_MOUNT_SHUTDOWN,
+};
+
+/*
+ * subtract jiffies
+ */
+static inline unsigned long time_sub(unsigned long a, unsigned long b)
+{
+       BUG_ON(time_after(b, a));
+       return (long)a - (long)b;
+}
+
+struct ceph_mds_client;
+
+/*
+ * per client state
+ *
+ * possibly shared by multiple mount points, if they are
+ * mounting the same ceph filesystem/cluster.
+ */
+struct ceph_client {
+       struct ceph_fsid fsid;
+       bool have_fsid;
+
+       void *private;
+
+       struct ceph_options *options;
+
+       struct mutex mount_mutex;      /* serialize mount attempts */
+       wait_queue_head_t auth_wq;
+       int auth_err;
+
+       int (*extra_mon_dispatch)(struct ceph_client *, struct ceph_msg *);
+
+       u32 supported_features;
+       u32 required_features;
+
+       struct ceph_messenger *msgr;   /* messenger instance */
+       struct ceph_mon_client monc;
+       struct ceph_osd_client osdc;
+
+#ifdef CONFIG_DEBUG_FS
+       struct dentry *debugfs_dir;
+       struct dentry *debugfs_monmap;
+       struct dentry *debugfs_osdmap;
+#endif
+};
+
+
+
+/*
+ * snapshots
+ */
+
+/*
+ * A "snap context" is the set of existing snapshots when we
+ * write data.  It is used by the OSD to guide its COW behavior.
+ *
+ * The ceph_snap_context is refcounted, and attached to each dirty
+ * page, indicating which context the dirty data belonged when it was
+ * dirtied.
+ */
+struct ceph_snap_context {
+       atomic_t nref;
+       u64 seq;
+       int num_snaps;
+       u64 snaps[];
+};
+
+static inline struct ceph_snap_context *
+ceph_get_snap_context(struct ceph_snap_context *sc)
+{
+       /*
+       printk("get_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref),
+              atomic_read(&sc->nref)+1);
+       */
+       if (sc)
+               atomic_inc(&sc->nref);
+       return sc;
+}
+
+static inline void ceph_put_snap_context(struct ceph_snap_context *sc)
+{
+       if (!sc)
+               return;
+       /*
+       printk("put_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref),
+              atomic_read(&sc->nref)-1);
+       */
+       if (atomic_dec_and_test(&sc->nref)) {
+               /*printk(" deleting snap_context %p\n", sc);*/
+               kfree(sc);
+       }
+}
+
+/*
+ * calculate the number of pages a given length and offset map onto,
+ * if we align the data.
+ */
+static inline int calc_pages_for(u64 off, u64 len)
+{
+       return ((off+len+PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT) -
+               (off >> PAGE_CACHE_SHIFT);
+}
+
+/* ceph_common.c */
+extern const char *ceph_msg_type_name(int type);
+extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid);
+extern struct kmem_cache *ceph_inode_cachep;
+extern struct kmem_cache *ceph_cap_cachep;
+extern struct kmem_cache *ceph_dentry_cachep;
+extern struct kmem_cache *ceph_file_cachep;
+
+extern int ceph_parse_options(struct ceph_options **popt, char *options,
+                             const char *dev_name, const char *dev_name_end,
+                             int (*parse_extra_token)(char *c, void *private),
+                             void *private);
+extern void ceph_destroy_options(struct ceph_options *opt);
+extern int ceph_compare_options(struct ceph_options *new_opt,
+                               struct ceph_client *client);
+extern struct ceph_client *ceph_create_client(struct ceph_options *opt,
+                                             void *private);
+extern u64 ceph_client_id(struct ceph_client *client);
+extern void ceph_destroy_client(struct ceph_client *client);
+extern int __ceph_open_session(struct ceph_client *client,
+                              unsigned long started);
+extern int ceph_open_session(struct ceph_client *client);
+
+/* pagevec.c */
+extern void ceph_release_page_vector(struct page **pages, int num_pages);
+
+extern struct page **ceph_get_direct_page_vector(const char __user *data,
+                                           int num_pages,
+                                           loff_t off, size_t len);
+extern void ceph_put_page_vector(struct page **pages, int num_pages);
+extern void ceph_release_page_vector(struct page **pages, int num_pages);
+extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
+extern int ceph_copy_user_to_page_vector(struct page **pages,
+                                        const char __user *data,
+                                        loff_t off, size_t len);
+extern int ceph_copy_to_page_vector(struct page **pages,
+                                   const char *data,
+                                   loff_t off, size_t len);
+extern int ceph_copy_from_page_vector(struct page **pages,
+                                   char *data,
+                                   loff_t off, size_t len);
+extern int ceph_copy_page_vector_to_user(struct page **pages, char __user *data,
+                                   loff_t off, size_t len);
+extern void ceph_zero_page_vector_range(int off, int len, struct page **pages);
+
+
+#endif /* _FS_CEPH_SUPER_H */
similarity index 100%
rename from fs/ceph/mdsmap.h
rename to include/linux/ceph/mdsmap.h
similarity index 95%
rename from fs/ceph/messenger.h
rename to include/linux/ceph/messenger.h
index 76fbc957bc137dab063cdd69abcc1b432800aa51..5956d62c3057139ac6634df8e13556659d07725e 100644 (file)
@@ -65,6 +65,9 @@ struct ceph_messenger {
         */
        u32 global_seq;
        spinlock_t global_seq_lock;
+
+       u32 supported_features;
+       u32 required_features;
 };
 
 /*
@@ -82,6 +85,10 @@ struct ceph_msg {
        struct ceph_pagelist *pagelist; /* instead of pages */
        struct list_head list_head;
        struct kref kref;
+       struct bio  *bio;               /* instead of pages/pagelist */
+       struct bio  *bio_iter;          /* bio iterator */
+       int bio_seg;                    /* current bio segment */
+       struct ceph_pagelist *trail;    /* the trailing part of the data */
        bool front_is_vmalloc;
        bool more_to_follow;
        bool needs_out_seq;
@@ -205,7 +212,7 @@ struct ceph_connection {
 };
 
 
-extern const char *pr_addr(const struct sockaddr_storage *ss);
+extern const char *ceph_pr_addr(const struct sockaddr_storage *ss);
 extern int ceph_parse_ips(const char *c, const char *end,
                          struct ceph_entity_addr *addr,
                          int max_count, int *count);
@@ -216,7 +223,8 @@ extern void ceph_msgr_exit(void);
 extern void ceph_msgr_flush(void);
 
 extern struct ceph_messenger *ceph_messenger_create(
-       struct ceph_entity_addr *myaddr);
+       struct ceph_entity_addr *myaddr,
+       u32 features, u32 required);
 extern void ceph_messenger_destroy(struct ceph_messenger *);
 
 extern void ceph_con_init(struct ceph_messenger *msgr,
similarity index 99%
rename from fs/ceph/mon_client.h
rename to include/linux/ceph/mon_client.h
index 8e396f2c09637f29d0a82311244d036441a51c1c..545f85917780ab3cd65a314553f7011b655dbb50 100644 (file)
@@ -79,6 +79,7 @@ struct ceph_mon_client {
        u64 last_tid;
 
        /* mds/osd map */
+       int want_mdsmap;
        int want_next_osdmap; /* 1 = want, 2 = want+asked */
        u32 have_osdmap, have_mdsmap;
 
similarity index 100%
rename from fs/ceph/msgr.h
rename to include/linux/ceph/msgr.h
similarity index 76%
rename from fs/ceph/osd_client.h
rename to include/linux/ceph/osd_client.h
index ce776989ef6a76d73d1475ce2335ce558b653e2d..6c91fb032c39cd907a3ac509db75a3a860e564c1 100644 (file)
@@ -15,6 +15,7 @@ struct ceph_snap_context;
 struct ceph_osd_request;
 struct ceph_osd_client;
 struct ceph_authorizer;
+struct ceph_pagelist;
 
 /*
  * completion callback for async writepages
@@ -68,6 +69,7 @@ struct ceph_osd_request {
        struct list_head  r_unsafe_item;
 
        struct inode *r_inode;                /* for use by callbacks */
+       void *r_priv;                         /* ditto */
 
        char              r_oid[40];          /* object name */
        int               r_oid_len;
@@ -80,6 +82,11 @@ struct ceph_osd_request {
        struct page     **r_pages;            /* pages for data payload */
        int               r_pages_from_pool;
        int               r_own_pages;        /* if true, i own page list */
+#ifdef CONFIG_BLOCK
+       struct bio       *r_bio;              /* instead of pages */
+#endif
+
+       struct ceph_pagelist *r_trail;        /* trailing part of the data */
 };
 
 struct ceph_osd_client {
@@ -110,6 +117,42 @@ struct ceph_osd_client {
        struct ceph_msgpool     msgpool_op_reply;
 };
 
+struct ceph_osd_req_op {
+       u16 op;           /* CEPH_OSD_OP_* */
+       u32 flags;        /* CEPH_OSD_FLAG_* */
+       union {
+               struct {
+                       u64 offset, length;
+                       u64 truncate_size;
+                       u32 truncate_seq;
+               } extent;
+               struct {
+                       const char *name;
+                       u32 name_len;
+                       const char  *val;
+                       u32 value_len;
+                       __u8 cmp_op;       /* CEPH_OSD_CMPXATTR_OP_* */
+                       __u8 cmp_mode;     /* CEPH_OSD_CMPXATTR_MODE_* */
+               } xattr;
+               struct {
+                       const char *class_name;
+                       __u8 class_len;
+                       const char *method_name;
+                       __u8 method_len;
+                       __u8 argc;
+                       const char *indata;
+                       u32 indata_len;
+               } cls;
+               struct {
+                       u64 cookie, count;
+               } pgls;
+               struct {
+                       u64 snapid;
+               } snap;
+       };
+       u32 payload_len;
+};
+
 extern int ceph_osdc_init(struct ceph_osd_client *osdc,
                          struct ceph_client *client);
 extern void ceph_osdc_stop(struct ceph_osd_client *osdc);
@@ -119,6 +162,30 @@ extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc,
 extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
                                 struct ceph_msg *msg);
 
+extern void ceph_calc_raw_layout(struct ceph_osd_client *osdc,
+                       struct ceph_file_layout *layout,
+                       u64 snapid,
+                       u64 off, u64 *plen, u64 *bno,
+                       struct ceph_osd_request *req,
+                       struct ceph_osd_req_op *op);
+
+extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
+                                              int flags,
+                                              struct ceph_snap_context *snapc,
+                                              struct ceph_osd_req_op *ops,
+                                              bool use_mempool,
+                                              gfp_t gfp_flags,
+                                              struct page **pages,
+                                              struct bio *bio);
+
+extern void ceph_osdc_build_request(struct ceph_osd_request *req,
+                                   u64 off, u64 *plen,
+                                   struct ceph_osd_req_op *src_ops,
+                                   struct ceph_snap_context *snapc,
+                                   struct timespec *mtime,
+                                   const char *oid,
+                                   int oid_len);
+
 extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
                                      struct ceph_file_layout *layout,
                                      struct ceph_vino vino,
similarity index 97%
rename from fs/ceph/osdmap.h
rename to include/linux/ceph/osdmap.h
index 970b547e510dbd0edb8ba8c2ec95c45932be6106..ba4c205cbb016a141495e2e872ee6a3cfd57253a 100644 (file)
@@ -4,7 +4,7 @@
 #include <linux/rbtree.h>
 #include "types.h"
 #include "ceph_fs.h"
-#include "crush/crush.h"
+#include <linux/crush/crush.h>
 
 /*
  * The osd map describes the current membership of the osd cluster and
@@ -125,4 +125,6 @@ extern int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
 extern int ceph_calc_pg_primary(struct ceph_osdmap *osdmap,
                                struct ceph_pg pgid);
 
+extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name);
+
 #endif
similarity index 62%
rename from fs/ceph/pagelist.h
rename to include/linux/ceph/pagelist.h
index e8a4187e1087aa7fbc86a2620764f87f234dfe0a..9660d6b0a35d780dbe0806edf6193d1f7d5ef8f5 100644 (file)
@@ -8,6 +8,14 @@ struct ceph_pagelist {
        void *mapped_tail;
        size_t length;
        size_t room;
+       struct list_head free_list;
+       size_t num_pages_free;
+};
+
+struct ceph_pagelist_cursor {
+       struct ceph_pagelist *pl;   /* pagelist, for error checking */
+       struct list_head *page_lru; /* page in list */
+       size_t room;                /* room remaining to reset to */
 };
 
 static inline void ceph_pagelist_init(struct ceph_pagelist *pl)
@@ -16,10 +24,23 @@ static inline void ceph_pagelist_init(struct ceph_pagelist *pl)
        pl->mapped_tail = NULL;
        pl->length = 0;
        pl->room = 0;
+       INIT_LIST_HEAD(&pl->free_list);
+       pl->num_pages_free = 0;
 }
+
 extern int ceph_pagelist_release(struct ceph_pagelist *pl);
 
-extern int ceph_pagelist_append(struct ceph_pagelist *pl, void *d, size_t l);
+extern int ceph_pagelist_append(struct ceph_pagelist *pl, const void *d, size_t l);
+
+extern int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space);
+
+extern int ceph_pagelist_free_reserve(struct ceph_pagelist *pl);
+
+extern void ceph_pagelist_set_cursor(struct ceph_pagelist *pl,
+                                    struct ceph_pagelist_cursor *c);
+
+extern int ceph_pagelist_truncate(struct ceph_pagelist *pl,
+                                 struct ceph_pagelist_cursor *c);
 
 static inline int ceph_pagelist_encode_64(struct ceph_pagelist *pl, u64 v)
 {
similarity index 100%
rename from fs/ceph/rados.h
rename to include/linux/ceph/rados.h
similarity index 100%
rename from fs/ceph/types.h
rename to include/linux/ceph/types.h
index ed3e92e41c6e5683ad3dbb823e48259f5150ac33..709dfb901d1124c75656fb0f7591826683bd2c5d 100644 (file)
@@ -75,7 +75,7 @@ struct cgroup_subsys_state {
 
        unsigned long flags;
        /* ID for this css, if possible */
-       struct css_id *id;
+       struct css_id __rcu *id;
 };
 
 /* bits in struct cgroup_subsys_state flags field */
@@ -205,7 +205,7 @@ struct cgroup {
        struct list_head children;      /* my children */
 
        struct cgroup *parent;          /* my parent */
-       struct dentry *dentry;          /* cgroup fs entry, RCU protected */
+       struct dentry __rcu *dentry;    /* cgroup fs entry, RCU protected */
 
        /* Private pointers for each registered subsystem */
        struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
@@ -578,7 +578,12 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
 void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
 int cgroup_scan_tasks(struct cgroup_scanner *scan);
 int cgroup_attach_task(struct cgroup *, struct task_struct *);
-int cgroup_attach_task_current_cg(struct task_struct *);
+int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
+
+static inline int cgroup_attach_task_current_cg(struct task_struct *tsk)
+{
+       return cgroup_attach_task_all(current, tsk);
+}
 
 /*
  * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
@@ -636,6 +641,11 @@ static inline int cgroupstats_build(struct cgroupstats *stats,
 }
 
 /* No cgroups - nothing to do */
+static inline int cgroup_attach_task_all(struct task_struct *from,
+                                        struct task_struct *t)
+{
+       return 0;
+}
 static inline int cgroup_attach_task_current_cg(struct task_struct *t)
 {
        return 0;
index 9ddc8780e8db7bfe45496587bc47e5fb00d12073..5778b559d59c3222ee825897b7430523aa96c353 100644 (file)
@@ -360,5 +360,8 @@ extern ssize_t compat_rw_copy_check_uvector(int type,
                const struct compat_iovec __user *uvector, unsigned long nr_segs,
                unsigned long fast_segs, struct iovec *fast_pointer,
                struct iovec **ret_pointer);
+
+extern void __user *compat_alloc_user_space(unsigned long len);
+
 #endif /* CONFIG_COMPAT */
 #endif /* _LINUX_COMPAT_H */
index c1a62c56a660226b1592bc6bc269087adac58649..320d6c94ff848d5db94fb1fd76576501a88e9a3a 100644 (file)
 # define __release(x)  __context__(x,-1)
 # define __cond_lock(x,c)      ((c) ? ({ __acquire(x); 1; }) : 0)
 # define __percpu      __attribute__((noderef, address_space(3)))
+#ifdef CONFIG_SPARSE_RCU_POINTER
+# define __rcu         __attribute__((noderef, address_space(4)))
+#else
 # define __rcu
+#endif
 extern void __chk_user_ptr(const volatile void __user *);
 extern void __chk_io_ptr(const volatile void __iomem *);
 #else
index 8ba66a9d9022c7d0ddbbcb984b8839dabb2b91f8..ba4b85a6d9b8bc71853de37df2a36b865707b745 100644 (file)
@@ -9,37 +9,7 @@
  * These are the only things you should do on a core-file: use only these
  * functions to write out all the necessary info.
  */
-static inline int dump_write(struct file *file, const void *addr, int nr)
-{
-       return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
-}
-
-static inline int dump_seek(struct file *file, loff_t off)
-{
-       int ret = 1;
-
-       if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
-               if (file->f_op->llseek(file, off, SEEK_CUR) < 0)
-                       return 0;
-       } else {
-               char *buf = (char *)get_zeroed_page(GFP_KERNEL);
-
-               if (!buf)
-                       return 0;
-               while (off > 0) {
-                       unsigned long n = off;
-
-                       if (n > PAGE_SIZE)
-                               n = PAGE_SIZE;
-                       if (!dump_write(file, buf, n)) {
-                               ret = 0;
-                               break;
-                       }
-                       off -= n;
-               }
-               free_page((unsigned long)buf);
-       }
-       return ret;
-}
+extern int dump_write(struct file *file, const void *addr, int nr);
+extern int dump_seek(struct file *file, loff_t off);
 
 #endif /* _LINUX_COREDUMP_H */
index 36ca9721a0c28a0150b05153b4f442d2c558d6d4..1be416bbbb82540802a0a742ba2c22934a9a6659 100644 (file)
@@ -53,6 +53,7 @@ struct cpuidle_state {
 #define CPUIDLE_FLAG_BALANCED  (0x40) /* medium latency, moderate savings */
 #define CPUIDLE_FLAG_DEEP      (0x80) /* high latency, large savings */
 #define CPUIDLE_FLAG_IGNORE    (0x100) /* ignore during this idle period */
+#define CPUIDLE_FLAG_TLB_FLUSHED (0x200) /* tlb will be flushed */
 
 #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
 
index 4d2c39573f3694cdeef07d83c6f141b4804a5f36..4aaeab3764469961f1106d988e57e58a91e1a16e 100644 (file)
@@ -84,7 +84,7 @@ struct thread_group_cred {
        atomic_t        usage;
        pid_t           tgid;                   /* thread group process ID */
        spinlock_t      lock;
-       struct key      *session_keyring;       /* keyring inherited over fork */
+       struct key __rcu *session_keyring;      /* keyring inherited over fork */
        struct key      *process_keyring;       /* keyring private to this process */
        struct rcu_head rcu;                    /* RCU deletion hook */
 };
index 29b3ce3f2a1d0cd948523d8890c0bb8420834e09..2833452ea01c8ecec5207a7a8631da3f2b439fa3 100644 (file)
@@ -49,7 +49,6 @@ struct task_struct;
 
 #ifdef CONFIG_LOCKDEP
 extern void debug_show_all_locks(void);
-extern void __debug_show_held_locks(struct task_struct *task);
 extern void debug_show_held_locks(struct task_struct *task);
 extern void debug_check_no_locks_freed(const void *from, unsigned long len);
 extern void debug_check_no_locks_held(struct task_struct *task);
@@ -58,10 +57,6 @@ static inline void debug_show_all_locks(void)
 {
 }
 
-static inline void __debug_show_held_locks(struct task_struct *task)
-{
-}
-
 static inline void debug_show_held_locks(struct task_struct *task)
 {
 }
index ce29b8151198b6c0d40b8f420efe9c2c920d9b4f..ba8319ae5fcc3e75edc0d52240da0b16e18da68d 100644 (file)
@@ -102,6 +102,9 @@ static inline u64 dma_get_mask(struct device *dev)
        return DMA_BIT_MASK(32);
 }
 
+#ifdef ARCH_HAS_DMA_SET_COHERENT_MASK
+int dma_set_coherent_mask(struct device *dev, u64 mask);
+#else
 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
 {
        if (!dma_supported(dev, mask))
@@ -109,6 +112,7 @@ static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
        dev->coherent_dma_mask = mask;
        return 0;
 }
+#endif
 
 extern u64 dma_get_required_mask(struct device *dev);
 
index c61d4ca27bcc26906699101b4f8eee7ce8e8525b..e2106495cc11383ad9a14d7ac119400b18a8f83a 100644 (file)
@@ -548,7 +548,7 @@ static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
        return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
 }
 
-static unsigned short dma_dev_to_maxpq(struct dma_device *dma)
+static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
 {
        return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
 }
index 52c0da4bdd18fe5969fbf9c583652c0f7f00ba07..bef3cda44c4c4bb54570600a99fb0003db7038e7 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _DYNAMIC_DEBUG_H
 #define _DYNAMIC_DEBUG_H
 
+#include <linux/jump_label.h>
+
 /* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which
  * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
  * use independent hash functions, to reduce the chance of false positives.
@@ -22,8 +24,6 @@ struct _ddebug {
        const char *function;
        const char *filename;
        const char *format;
-       char primary_hash;
-       char secondary_hash;
        unsigned int lineno:24;
        /*
         * The flags field controls the behaviour at the callsite.
@@ -33,6 +33,7 @@ struct _ddebug {
 #define _DPRINTK_FLAGS_PRINT   (1<<0)  /* printk() a message using the format */
 #define _DPRINTK_FLAGS_DEFAULT 0
        unsigned int flags:8;
+       char enabled;
 } __attribute__((aligned(8)));
 
 
@@ -42,33 +43,35 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n,
 #if defined(CONFIG_DYNAMIC_DEBUG)
 extern int ddebug_remove_module(const char *mod_name);
 
-#define __dynamic_dbg_enabled(dd)  ({       \
-       int __ret = 0;                                                       \
-       if (unlikely((dynamic_debug_enabled & (1LL << DEBUG_HASH)) &&        \
-                       (dynamic_debug_enabled2 & (1LL << DEBUG_HASH2))))   \
-                               if (unlikely(dd.flags))                      \
-                                       __ret = 1;                           \
-       __ret; })
-
 #define dynamic_pr_debug(fmt, ...) do {                                        \
+       __label__ do_printk;                                            \
+       __label__ out;                                                  \
        static struct _ddebug descriptor                                \
        __used                                                          \
        __attribute__((section("__verbose"), aligned(8))) =             \
-       { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH,  \
-               DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT };        \
-       if (__dynamic_dbg_enabled(descriptor))                          \
-               printk(KERN_DEBUG pr_fmt(fmt),  ##__VA_ARGS__);         \
+       { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__,            \
+               _DPRINTK_FLAGS_DEFAULT };                               \
+       JUMP_LABEL(&descriptor.enabled, do_printk);                     \
+       goto out;                                                       \
+do_printk:                                                             \
+       printk(KERN_DEBUG pr_fmt(fmt),  ##__VA_ARGS__);                 \
+out:   ;                                                               \
        } while (0)
 
 
 #define dynamic_dev_dbg(dev, fmt, ...) do {                            \
+       __label__ do_printk;                                            \
+       __label__ out;                                                  \
        static struct _ddebug descriptor                                \
        __used                                                          \
        __attribute__((section("__verbose"), aligned(8))) =             \
-       { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH,  \
-               DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT };        \
-       if (__dynamic_dbg_enabled(descriptor))                          \
-               dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__);        \
+       { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__,            \
+               _DPRINTK_FLAGS_DEFAULT };                               \
+       JUMP_LABEL(&descriptor.enabled, do_printk);                     \
+       goto out;                                                       \
+do_printk:                                                             \
+       dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__);                \
+out:   ;                                                               \
        } while (0)
 
 #else
index 7cf92e8a4196c0f6a38257cec095b1aa6d04939f..36c66443bdfd260def7dd6e9724f751dd17c7cc6 100644 (file)
@@ -13,6 +13,7 @@
 #define _LINUX_EDAC_H_
 
 #include <asm/atomic.h>
+#include <linux/sysdev.h>
 
 #define EDAC_OPSTATE_INVAL     -1
 #define EDAC_OPSTATE_POLL      0
 extern int edac_op_state;
 extern int edac_err_assert;
 extern atomic_t edac_handlers;
+extern struct sysdev_class edac_class;
 
 extern int edac_handler_set(void);
 extern void edac_atomic_assert_error(void);
+extern struct sysdev_class *edac_get_sysfs_class(void);
+extern void edac_put_sysfs_class(void);
 
 static inline void opstate_init(void)
 {
index 2c958f4fce1ed6f1d6f4a8c3fc865155cd1eac81..4fd978e7eb83ef8d689d0d313b5441b0631ea275 100644 (file)
@@ -93,6 +93,7 @@ struct elevator_queue
        struct elevator_type *elevator_type;
        struct mutex sysfs_lock;
        struct hlist_head *hash;
+       unsigned int registered:1;
 };
 
 /*
@@ -136,6 +137,7 @@ extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
 
 extern int elevator_init(struct request_queue *, char *);
 extern void elevator_exit(struct elevator_queue *);
+extern int elevator_change(struct request_queue *, const char *);
 extern int elv_rq_merge_ok(struct request *, struct bio *);
 
 /*
index f59ed297b661fceb086a6ca188b54c3ff0006f85..133c0ba25e306a68199d399cf26fc15c242d347a 100644 (file)
@@ -31,7 +31,7 @@ struct embedded_fd_set {
 
 struct fdtable {
        unsigned int max_fds;
-       struct file ** fd;      /* current fd array */
+       struct file __rcu **fd;      /* current fd array */
        fd_set *close_on_exec;
        fd_set *open_fds;
        struct rcu_head rcu;
@@ -46,7 +46,7 @@ struct files_struct {
    * read mostly part
    */
        atomic_t count;
-       struct fdtable *fdt;
+       struct fdtable __rcu *fdt;
        struct fdtable fdtab;
   /*
    * written part on a separate cache line in SMP
@@ -55,7 +55,7 @@ struct files_struct {
        int next_fd;
        struct embedded_fd_set close_on_exec_init;
        struct embedded_fd_set open_fds_init;
-       struct file * fd_array[NR_OPEN_DEFAULT];
+       struct file __rcu * fd_array[NR_OPEN_DEFAULT];
 };
 
 #define rcu_dereference_check_fdtable(files, fdtfd) \
index 76041b6147582ef62eb0daedafbf1771a8e844c6..3168dcfb94f2bedfa02fe42b7345055d3bb154e2 100644 (file)
@@ -1093,6 +1093,10 @@ struct file_lock {
 
 #include <linux/fcntl.h>
 
+/* temporary stubs for BKL removal */
+#define lock_flocks() lock_kernel()
+#define unlock_flocks() unlock_kernel()
+
 extern void send_sigio(struct fown_struct *fown, int fd, int band);
 
 #ifdef CONFIG_FILE_LOCKING
@@ -1380,7 +1384,7 @@ struct super_block {
         * Saved mount options for lazy filesystems using
         * generic_show_options()
         */
-       char *s_options;
+       char __rcu *s_options;
 };
 
 extern struct timespec current_fs_time(struct super_block *sb);
index 02b8b24f8f51f0e37156731ba94da19ba2d19131..8beabb958f61d5147c8893f1e780415a91fcb2e6 100644 (file)
@@ -191,8 +191,8 @@ struct ftrace_event_call {
        unsigned int            flags;
 
 #ifdef CONFIG_PERF_EVENTS
-       int                     perf_refcount;
-       struct hlist_head       *perf_events;
+       int                             perf_refcount;
+       struct hlist_head __percpu      *perf_events;
 #endif
 };
 
@@ -252,8 +252,8 @@ DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
 
 extern int  perf_trace_init(struct perf_event *event);
 extern void perf_trace_destroy(struct perf_event *event);
-extern int  perf_trace_enable(struct perf_event *event);
-extern void perf_trace_disable(struct perf_event *event);
+extern int  perf_trace_add(struct perf_event *event, int flags);
+extern void perf_trace_del(struct perf_event *event, int flags);
 extern int  ftrace_profile_set_filter(struct perf_event *event, int event_id,
                                     char *filter_str);
 extern void ftrace_profile_free_filter(struct perf_event *event);
index 5f2f4c4d8fb0594720bfc1643cec75fbf12aaa36..af3f06b41dc1e520a7729eb10e73f22b26ed8dff 100644 (file)
@@ -129,8 +129,8 @@ struct blk_scsi_cmd_filter {
 struct disk_part_tbl {
        struct rcu_head rcu_head;
        int len;
-       struct hd_struct *last_lookup;
-       struct hd_struct *part[];
+       struct hd_struct __rcu *last_lookup;
+       struct hd_struct __rcu *part[];
 };
 
 struct gendisk {
@@ -149,7 +149,7 @@ struct gendisk {
         * non-critical accesses use RCU.  Always access through
         * helpers.
         */
-       struct disk_part_tbl *part_tbl;
+       struct disk_part_tbl __rcu *part_tbl;
        struct hd_struct part0;
 
        const struct block_device_operations *fops;
index 03f616b78cfa8b857e727ea220a60c05a87bb89f..e41f7dd1ae676eb778a00c0f7cdced047312eff7 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/errno.h>
 
 struct device;
+struct gpio_chip;
 
 /*
  * Some platforms don't support the GPIO programming interface.
index d5b387669dab1f044406826847fdfa7646ab829d..96c323ac44dfea066a48641cf10b3093a1cfb63c 100644 (file)
@@ -64,6 +64,8 @@
 #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
 #define NMI_OFFSET     (1UL << NMI_SHIFT)
 
+#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
+
 #ifndef PREEMPT_ACTIVE
 #define PREEMPT_ACTIVE_BITS    1
 #define PREEMPT_ACTIVE_SHIFT   (NMI_SHIFT + NMI_BITS)
 /*
  * Are we doing bottom half or hardware interrupt processing?
  * Are we in a softirq context? Interrupt context?
+ * in_softirq - Are we currently processing softirq or have bh disabled?
+ * in_serving_softirq - Are we currently processing softirq?
  */
 #define in_irq()               (hardirq_count())
 #define in_softirq()           (softirq_count())
 #define in_interrupt()         (irq_count())
+#define in_serving_softirq()   (softirq_count() & SOFTIRQ_OFFSET)
 
 /*
  * Are we in NMI context?
@@ -132,14 +137,16 @@ extern void synchronize_irq(unsigned int irq);
 
 struct task_struct;
 
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+#if !defined(CONFIG_VIRT_CPU_ACCOUNTING) && !defined(CONFIG_IRQ_TIME_ACCOUNTING)
 static inline void account_system_vtime(struct task_struct *tsk)
 {
 }
+#else
+extern void account_system_vtime(struct task_struct *tsk);
 #endif
 
 #if defined(CONFIG_NO_HZ)
-#if defined(CONFIG_TINY_RCU)
+#if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU)
 extern void rcu_enter_nohz(void);
 extern void rcu_exit_nohz(void);
 
index ee3049cb9ba5782e27447d3984a914a2d39ae87b..52baa79d69a763f7f94f728b115fe0337190addd 100644 (file)
@@ -63,6 +63,9 @@
  *            IRQ lines will appear.  Similarly to gpio_base, the expander
  *            will create a block of irqs beginning at this number.
  *            This value is ignored if irq_summary is < 0.
+ * @reset_during_probe: If set to true, the driver will trigger a full
+ *                      reset of the chip at the beginning of the probe
+ *                      in order to place it in a known state.
  */
 struct sx150x_platform_data {
        unsigned gpio_base;
@@ -73,6 +76,7 @@ struct sx150x_platform_data {
        u16      io_polarity;
        int      irq_summary;
        unsigned irq_base;
+       bool     reset_during_probe;
 };
 
 #endif /* __LINUX_I2C_SX150X_H */
index e968db71e33a94548160cb38c216289ab1577c30..cdb715e58e3e6de8b2e60485e31ce79ea99da8f4 100644 (file)
 
 struct idr_layer {
        unsigned long            bitmap; /* A zero bit means "space here" */
-       struct idr_layer        *ary[1<<IDR_BITS];
+       struct idr_layer __rcu  *ary[1<<IDR_BITS];
        int                      count;  /* When zero, we can release it */
        int                      layer;  /* distance from leaf */
        struct rcu_head          rcu_head;
 };
 
 struct idr {
-       struct idr_layer *top;
+       struct idr_layer __rcu *top;
        struct idr_layer *id_free;
        int               layers; /* only valid without concurrent changes */
        int               id_free_cnt;
index 1f43fa56f6001f821736e4b66e9fac5e6e0375ed..2fea6c8ef6babea0564ccf3b061698cacba1d14e 100644 (file)
@@ -82,11 +82,17 @@ extern struct group_info init_groups;
 # define CAP_INIT_BSET  CAP_FULL_SET
 
 #ifdef CONFIG_TREE_PREEMPT_RCU
+#define INIT_TASK_RCU_TREE_PREEMPT()                                   \
+       .rcu_blocked_node = NULL,
+#else
+#define INIT_TASK_RCU_TREE_PREEMPT(tsk)
+#endif
+#ifdef CONFIG_PREEMPT_RCU
 #define INIT_TASK_RCU_PREEMPT(tsk)                                     \
        .rcu_read_lock_nesting = 0,                                     \
        .rcu_read_unlock_special = 0,                                   \
-       .rcu_blocked_node = NULL,                                       \
-       .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry),
+       .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry),           \
+       INIT_TASK_RCU_TREE_PREEMPT()
 #else
 #define INIT_TASK_RCU_PREEMPT(tsk)
 #endif
@@ -137,8 +143,8 @@ extern struct cred init_cred;
        .children       = LIST_HEAD_INIT(tsk.children),                 \
        .sibling        = LIST_HEAD_INIT(tsk.sibling),                  \
        .group_leader   = &tsk,                                         \
-       .real_cred      = &init_cred,                                   \
-       .cred           = &init_cred,                                   \
+       RCU_INIT_POINTER(.real_cred, &init_cred),                       \
+       RCU_INIT_POINTER(.cred, &init_cred),                            \
        .cred_guard_mutex =                                             \
                 __MUTEX_INITIALIZER(tsk.cred_guard_mutex),             \
        .comm           = "swapper",                                    \
index 896a92227bc429a9abdd27da5e3b0d77be122d95..d6ae1761be97fae4cbbc66602d19d9bdd60f3bdd 100644 (file)
@@ -1196,7 +1196,7 @@ struct input_dev {
        int (*flush)(struct input_dev *dev, struct file *file);
        int (*event)(struct input_dev *dev, unsigned int type, unsigned int code, int value);
 
-       struct input_handle *grab;
+       struct input_handle __rcu *grab;
 
        spinlock_t event_lock;
        struct mutex mutex;
index a0384a4d1e6f4da4d39a02c0f8ba6634842dd236..531495db17081efabcb649a3740673f03a531e67 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/atomic.h>
 #include <asm/ptrace.h>
 #include <asm/system.h>
+#include <trace/events/irq.h>
 
 /*
  * These correspond to the IORESOURCE_IRQ_* defines in
@@ -407,7 +408,12 @@ asmlinkage void do_softirq(void);
 asmlinkage void __do_softirq(void);
 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
 extern void softirq_init(void);
-#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
+static inline void __raise_softirq_irqoff(unsigned int nr)
+{
+       trace_softirq_raise((struct softirq_action *)(unsigned long)nr, NULL);
+       or_softirq_pending(1UL << nr);
+}
+
 extern void raise_softirq_irqoff(unsigned int nr);
 extern void raise_softirq(unsigned int nr);
 extern void wakeup_softirqd(void);
index 0a6b3d5c490ccfcd3ab9a3dbdba9b41f88e1c4b8..7fb59279373823339f6fdda86158952e3e6fac45 100644 (file)
@@ -79,7 +79,7 @@ io_mapping_free(struct io_mapping *mapping)
 }
 
 /* Atomic map/unmap */
-static inline void *
+static inline void __iomem *
 io_mapping_map_atomic_wc(struct io_mapping *mapping,
                         unsigned long offset,
                         int slot)
@@ -94,12 +94,12 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
 }
 
 static inline void
-io_mapping_unmap_atomic(void *vaddr, int slot)
+io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
 {
        iounmap_atomic(vaddr, slot);
 }
 
-static inline void *
+static inline void __iomem *
 io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
 {
        resource_size_t phys_addr;
@@ -111,7 +111,7 @@ io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
 }
 
 static inline void
-io_mapping_unmap(void *vaddr)
+io_mapping_unmap(void __iomem *vaddr)
 {
        iounmap(vaddr);
 }
@@ -125,38 +125,38 @@ struct io_mapping;
 static inline struct io_mapping *
 io_mapping_create_wc(resource_size_t base, unsigned long size)
 {
-       return (struct io_mapping *) ioremap_wc(base, size);
+       return (struct io_mapping __force *) ioremap_wc(base, size);
 }
 
 static inline void
 io_mapping_free(struct io_mapping *mapping)
 {
-       iounmap(mapping);
+       iounmap((void __force __iomem *) mapping);
 }
 
 /* Atomic map/unmap */
-static inline void *
+static inline void __iomem *
 io_mapping_map_atomic_wc(struct io_mapping *mapping,
                         unsigned long offset,
                         int slot)
 {
-       return ((char *) mapping) + offset;
+       return ((char __force __iomem *) mapping) + offset;
 }
 
 static inline void
-io_mapping_unmap_atomic(void *vaddr, int slot)
+io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
 {
 }
 
 /* Non-atomic map/unmap */
-static inline void *
+static inline void __iomem *
 io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
 {
-       return ((char *) mapping) + offset;
+       return ((char __force __iomem *) mapping) + offset;
 }
 
 static inline void
-io_mapping_unmap(void *vaddr)
+io_mapping_unmap(void __iomem *vaddr)
 {
 }
 
index 64d5291330312ac718ba7f649e3d428063127f39..3e70b21884a948880f90e28684e5d5778e7a3d35 100644 (file)
@@ -53,7 +53,7 @@ struct io_context {
 
        struct radix_tree_root radix_root;
        struct hlist_head cic_list;
-       void *ioc_data;
+       void __rcu *ioc_data;
 };
 
 static inline struct io_context *ioc_task_link(struct io_context *ioc)
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
new file mode 100644 (file)
index 0000000..4fa09d4
--- /dev/null
@@ -0,0 +1,20 @@
+#ifndef _LINUX_IRQ_WORK_H
+#define _LINUX_IRQ_WORK_H
+
+struct irq_work {
+       struct irq_work *next;
+       void (*func)(struct irq_work *);
+};
+
+static inline
+void init_irq_work(struct irq_work *entry, void (*func)(struct irq_work *))
+{
+       entry->next = NULL;
+       entry->func = func;
+}
+
+bool irq_work_queue(struct irq_work *entry);
+void irq_work_run(void);
+void irq_work_sync(struct irq_work *entry);
+
+#endif /* _LINUX_IRQ_WORK_H */
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
new file mode 100644 (file)
index 0000000..b67cb18
--- /dev/null
@@ -0,0 +1,74 @@
+#ifndef _LINUX_JUMP_LABEL_H
+#define _LINUX_JUMP_LABEL_H
+
+#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_HAVE_ARCH_JUMP_LABEL)
+# include <asm/jump_label.h>
+# define HAVE_JUMP_LABEL
+#endif
+
+enum jump_label_type {
+       JUMP_LABEL_ENABLE,
+       JUMP_LABEL_DISABLE
+};
+
+struct module;
+
+#ifdef HAVE_JUMP_LABEL
+
+extern struct jump_entry __start___jump_table[];
+extern struct jump_entry __stop___jump_table[];
+
+extern void arch_jump_label_transform(struct jump_entry *entry,
+                                enum jump_label_type type);
+extern void arch_jump_label_text_poke_early(jump_label_t addr);
+extern void jump_label_update(unsigned long key, enum jump_label_type type);
+extern void jump_label_apply_nops(struct module *mod);
+extern int jump_label_text_reserved(void *start, void *end);
+
+#define jump_label_enable(key) \
+       jump_label_update((unsigned long)key, JUMP_LABEL_ENABLE);
+
+#define jump_label_disable(key) \
+       jump_label_update((unsigned long)key, JUMP_LABEL_DISABLE);
+
+#else
+
+#define JUMP_LABEL(key, label)                 \
+do {                                           \
+       if (unlikely(*key))                     \
+               goto label;                     \
+} while (0)
+
+#define jump_label_enable(cond_var)    \
+do {                                   \
+       *(cond_var) = 1;                        \
+} while (0)
+
+#define jump_label_disable(cond_var)   \
+do {                                   \
+       *(cond_var) = 0;                        \
+} while (0)
+
+static inline int jump_label_apply_nops(struct module *mod)
+{
+       return 0;
+}
+
+static inline int jump_label_text_reserved(void *start, void *end)
+{
+       return 0;
+}
+
+#endif
+
+#define COND_STMT(key, stmt)                                   \
+do {                                                           \
+       __label__ jl_enabled;                                   \
+       JUMP_LABEL(key, jl_enabled);                            \
+       if (0) {                                                \
+jl_enabled:                                                    \
+               stmt;                                           \
+       }                                                       \
+} while (0)
+
+#endif
diff --git a/include/linux/jump_label_ref.h b/include/linux/jump_label_ref.h
new file mode 100644 (file)
index 0000000..e5d012a
--- /dev/null
@@ -0,0 +1,44 @@
+#ifndef _LINUX_JUMP_LABEL_REF_H
+#define _LINUX_JUMP_LABEL_REF_H
+
+#include <linux/jump_label.h>
+#include <asm/atomic.h>
+
+#ifdef HAVE_JUMP_LABEL
+
+static inline void jump_label_inc(atomic_t *key)
+{
+       if (atomic_add_return(1, key) == 1)
+               jump_label_enable(key);
+}
+
+static inline void jump_label_dec(atomic_t *key)
+{
+       if (atomic_dec_and_test(key))
+               jump_label_disable(key);
+}
+
+#else /* !HAVE_JUMP_LABEL */
+
+static inline void jump_label_inc(atomic_t *key)
+{
+       atomic_inc(key);
+}
+
+static inline void jump_label_dec(atomic_t *key)
+{
+       atomic_dec(key);
+}
+
+#undef JUMP_LABEL
+#define JUMP_LABEL(key, label)                                         \
+do {                                                                   \
+       if (unlikely(__builtin_choose_expr(                             \
+             __builtin_types_compatible_p(typeof(key), atomic_t *),    \
+             atomic_read((atomic_t *)(key)), *(key))))                 \
+               goto label;                                             \
+} while (0)
+
+#endif /* HAVE_JUMP_LABEL */
+
+#endif /* _LINUX_JUMP_LABEL_REF_H */
index 2b0a35e6bc691896609944c114328b414be37a12..1759ba5adce845fa08d4b1fd899920ac65436938 100644 (file)
@@ -58,7 +58,18 @@ extern const char linux_proc_banner[];
 
 #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
 #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
-#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
+#define roundup(x, y) (                                        \
+{                                                      \
+       typeof(y) __y = y;                              \
+       (((x) + (__y - 1)) / __y) * __y;                \
+}                                                      \
+)
+#define rounddown(x, y) (                              \
+{                                                      \
+       typeof(x) __x = (x);                            \
+       __x - (__x % (y));                              \
+}                                                      \
+)
 #define DIV_ROUND_CLOSEST(x, divisor)(                 \
 {                                                      \
        typeof(divisor) __divisor = divisor;            \
index cd50dfa1d4c224de2a26e2d8ef3926d7fc74435e..3db0adce1fdabd00d034ad2e111b9d3411146dfd 100644 (file)
@@ -178,8 +178,9 @@ struct key {
         */
        union {
                unsigned long           value;
+               void __rcu              *rcudata;
                void                    *data;
-               struct keyring_list     *subscriptions;
+               struct keyring_list __rcu *subscriptions;
        } payload;
 };
 
index 4aa95f203f3ee773a6ab4bbdbb632efaf970785e..62dbee554f608c91fe7b2b3bf01f390260821c52 100644 (file)
@@ -214,7 +214,7 @@ __kfifo_must_check_helper(unsigned int val)
  */
 #define kfifo_reset(fifo) \
 (void)({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        __tmp->kfifo.in = __tmp->kfifo.out = 0; \
 })
 
@@ -228,7 +228,7 @@ __kfifo_must_check_helper(unsigned int val)
  */
 #define kfifo_reset_out(fifo)  \
 (void)({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        __tmp->kfifo.out = __tmp->kfifo.in; \
 })
 
@@ -238,7 +238,7 @@ __kfifo_must_check_helper(unsigned int val)
  */
 #define kfifo_len(fifo) \
 ({ \
-       typeof(fifo + 1) __tmpl = (fifo); \
+       typeof((fifo) + 1) __tmpl = (fifo); \
        __tmpl->kfifo.in - __tmpl->kfifo.out; \
 })
 
@@ -248,7 +248,7 @@ __kfifo_must_check_helper(unsigned int val)
  */
 #define        kfifo_is_empty(fifo) \
 ({ \
-       typeof(fifo + 1) __tmpq = (fifo); \
+       typeof((fifo) + 1) __tmpq = (fifo); \
        __tmpq->kfifo.in == __tmpq->kfifo.out; \
 })
 
@@ -258,7 +258,7 @@ __kfifo_must_check_helper(unsigned int val)
  */
 #define        kfifo_is_full(fifo) \
 ({ \
-       typeof(fifo + 1) __tmpq = (fifo); \
+       typeof((fifo) + 1) __tmpq = (fifo); \
        kfifo_len(__tmpq) > __tmpq->kfifo.mask; \
 })
 
@@ -269,7 +269,7 @@ __kfifo_must_check_helper(unsigned int val)
 #define        kfifo_avail(fifo) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmpq = (fifo); \
+       typeof((fifo) + 1) __tmpq = (fifo); \
        const size_t __recsize = sizeof(*__tmpq->rectype); \
        unsigned int __avail = kfifo_size(__tmpq) - kfifo_len(__tmpq); \
        (__recsize) ? ((__avail <= __recsize) ? 0 : \
@@ -284,7 +284,7 @@ __kfifo_must_check_helper( \
  */
 #define        kfifo_skip(fifo) \
 (void)({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
        if (__recsize) \
@@ -302,7 +302,7 @@ __kfifo_must_check_helper( \
 #define kfifo_peek_len(fifo) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
        (!__recsize) ? kfifo_len(__tmp) * sizeof(*__tmp->type) : \
@@ -325,7 +325,7 @@ __kfifo_must_check_helper( \
 #define kfifo_alloc(fifo, size, gfp_mask) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
        __is_kfifo_ptr(__tmp) ? \
        __kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \
@@ -339,7 +339,7 @@ __kfifo_must_check_helper( \
  */
 #define kfifo_free(fifo) \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
        if (__is_kfifo_ptr(__tmp)) \
                __kfifo_free(__kfifo); \
@@ -358,7 +358,7 @@ __kfifo_must_check_helper( \
  */
 #define kfifo_init(fifo, buffer, size) \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
        __is_kfifo_ptr(__tmp) ? \
        __kfifo_init(__kfifo, buffer, size, sizeof(*__tmp->type)) : \
@@ -379,8 +379,8 @@ __kfifo_must_check_helper( \
  */
 #define        kfifo_put(fifo, val) \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
-       typeof(val + 1) __val = (val); \
+       typeof((fifo) + 1) __tmp = (fifo); \
+       typeof((val) + 1) __val = (val); \
        unsigned int __ret; \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -421,8 +421,8 @@ __kfifo_must_check_helper( \
 #define        kfifo_get(fifo, val) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
-       typeof(val + 1) __val = (val); \
+       typeof((fifo) + 1) __tmp = (fifo); \
+       typeof((val) + 1) __val = (val); \
        unsigned int __ret; \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -462,8 +462,8 @@ __kfifo_must_check_helper( \
 #define        kfifo_peek(fifo, val) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
-       typeof(val + 1) __val = (val); \
+       typeof((fifo) + 1) __tmp = (fifo); \
+       typeof((val) + 1) __val = (val); \
        unsigned int __ret; \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -501,8 +501,8 @@ __kfifo_must_check_helper( \
  */
 #define        kfifo_in(fifo, buf, n) \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
-       typeof(buf + 1) __buf = (buf); \
+       typeof((fifo) + 1) __tmp = (fifo); \
+       typeof((buf) + 1) __buf = (buf); \
        unsigned long __n = (n); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -554,8 +554,8 @@ __kfifo_must_check_helper( \
 #define        kfifo_out(fifo, buf, n) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
-       typeof(buf + 1) __buf = (buf); \
+       typeof((fifo) + 1) __tmp = (fifo); \
+       typeof((buf) + 1) __buf = (buf); \
        unsigned long __n = (n); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -611,7 +611,7 @@ __kfifo_must_check_helper( \
 #define        kfifo_from_user(fifo, from, len, copied) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        const void __user *__from = (from); \
        unsigned int __len = (len); \
        unsigned int *__copied = (copied); \
@@ -639,7 +639,7 @@ __kfifo_must_check_helper( \
 #define        kfifo_to_user(fifo, to, len, copied) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        void __user *__to = (to); \
        unsigned int __len = (len); \
        unsigned int *__copied = (copied); \
@@ -666,7 +666,7 @@ __kfifo_must_check_helper( \
  */
 #define        kfifo_dma_in_prepare(fifo, sgl, nents, len) \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        struct scatterlist *__sgl = (sgl); \
        int __nents = (nents); \
        unsigned int __len = (len); \
@@ -690,7 +690,7 @@ __kfifo_must_check_helper( \
  */
 #define kfifo_dma_in_finish(fifo, len) \
 (void)({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        unsigned int __len = (len); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -717,7 +717,7 @@ __kfifo_must_check_helper( \
  */
 #define        kfifo_dma_out_prepare(fifo, sgl, nents, len) \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo);  \
        struct scatterlist *__sgl = (sgl); \
        int __nents = (nents); \
        unsigned int __len = (len); \
@@ -741,7 +741,7 @@ __kfifo_must_check_helper( \
  */
 #define kfifo_dma_out_finish(fifo, len) \
 (void)({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        unsigned int __len = (len); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -766,8 +766,8 @@ __kfifo_must_check_helper( \
 #define        kfifo_out_peek(fifo, buf, n) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
-       typeof(buf + 1) __buf = (buf); \
+       typeof((fifo) + 1) __tmp = (fifo); \
+       typeof((buf) + 1) __buf = (buf); \
        unsigned long __n = (n); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
index 74d691ee9121c5bb3aa8336d7eeffcc03d88cc46..3319a6967626e02f91c340080b21950a8310a67f 100644 (file)
@@ -16,6 +16,9 @@
 struct stable_node;
 struct mem_cgroup;
 
+struct page *ksm_does_need_to_copy(struct page *page,
+                       struct vm_area_struct *vma, unsigned long address);
+
 #ifdef CONFIG_KSM
 int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
                unsigned long end, int advice, unsigned long *vm_flags);
@@ -70,19 +73,14 @@ static inline void set_page_stable_node(struct page *page,
  * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
  * but what if the vma was unmerged while the page was swapped out?
  */
-struct page *ksm_does_need_to_copy(struct page *page,
-                       struct vm_area_struct *vma, unsigned long address);
-static inline struct page *ksm_might_need_to_copy(struct page *page,
+static inline int ksm_might_need_to_copy(struct page *page,
                        struct vm_area_struct *vma, unsigned long address)
 {
        struct anon_vma *anon_vma = page_anon_vma(page);
 
-       if (!anon_vma ||
-           (anon_vma->root == vma->anon_vma->root &&
-            page->index == linear_page_index(vma, address)))
-               return page;
-
-       return ksm_does_need_to_copy(page, vma, address);
+       return anon_vma &&
+               (anon_vma->root != vma->anon_vma->root ||
+                page->index != linear_page_index(vma, address));
 }
 
 int page_referenced_ksm(struct page *page,
@@ -115,10 +113,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
        return 0;
 }
 
-static inline struct page *ksm_might_need_to_copy(struct page *page,
+static inline int ksm_might_need_to_copy(struct page *page,
                        struct vm_area_struct *vma, unsigned long address)
 {
-       return page;
+       return 0;
 }
 
 static inline int page_referenced_ksm(struct page *page,
index c13cc48697aa73d2f8daa55dcbaf94204ff3db38..ac740b26eb1071950a26ce64ddca68f8212e8742 100644 (file)
@@ -205,7 +205,7 @@ struct kvm {
 
        struct mutex irq_lock;
 #ifdef CONFIG_HAVE_KVM_IRQCHIP
-       struct kvm_irq_routing_table *irq_routing;
+       struct kvm_irq_routing_table __rcu *irq_routing;
        struct hlist_head mask_notifier_list;
        struct hlist_head irq_ack_notifier_list;
 #endif
index b288cb713b902182cca71156e5d9f75c7452a117..f549056fb20bd5533555918cc1b1f9805c2cdcc3 100644 (file)
        int i;                                                          \
        preempt_disable();                                              \
        rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);           \
-       for_each_online_cpu(i) {                                        \
+       for_each_possible_cpu(i) {                                      \
                arch_spinlock_t *lock;                                  \
                lock = &per_cpu(name##_lock, i);                        \
                arch_spin_lock(lock);                                   \
  void name##_global_unlock(void) {                                     \
        int i;                                                          \
        rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);              \
-       for_each_online_cpu(i) {                                        \
+       for_each_possible_cpu(i) {                                      \
                arch_spinlock_t *lock;                                  \
                lock = &per_cpu(name##_lock, i);                        \
                arch_spin_unlock(lock);                                 \
index f010f18a0f863f39e139d65469debae94232b259..45fb2967b66d6949094289a467ae120d12014c0d 100644 (file)
@@ -335,6 +335,7 @@ enum {
        ATA_EHI_HOTPLUGGED      = (1 << 0),  /* could have been hotplugged */
        ATA_EHI_NO_AUTOPSY      = (1 << 2),  /* no autopsy */
        ATA_EHI_QUIET           = (1 << 3),  /* be quiet */
+       ATA_EHI_NO_RECOVERY     = (1 << 4),  /* no recovery */
 
        ATA_EHI_DID_SOFTRESET   = (1 << 16), /* already soft-reset this port */
        ATA_EHI_DID_HARDRESET   = (1 << 17), /* already soft-reset this port */
@@ -723,6 +724,7 @@ struct ata_port {
        struct ata_ioports      ioaddr; /* ATA cmd/ctl/dma register blocks */
        u8                      ctl;    /* cache of ATA control register */
        u8                      last_ctl;       /* Cache last written value */
+       struct ata_link*        sff_pio_task_link; /* link currently used */
        struct delayed_work     sff_pio_task;
 #ifdef CONFIG_ATA_BMDMA
        struct ata_bmdma_prd    *bmdma_prd;     /* BMDMA SG list */
@@ -1594,7 +1596,7 @@ extern void ata_sff_irq_on(struct ata_port *ap);
 extern void ata_sff_irq_clear(struct ata_port *ap);
 extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
                            u8 status, int in_wq);
-extern void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay);
+extern void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay);
 extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc);
 extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc);
 extern unsigned int ata_sff_port_intr(struct ata_port *ap,
index 06aed8305bf3bc38061dc42c168c3b9d83f29492..2186a64ee4b568f09649347b5bf04e5ed7a66cdb 100644 (file)
@@ -31,6 +31,17 @@ extern int lock_stat;
 
 #define MAX_LOCKDEP_SUBCLASSES         8UL
 
+/*
+ * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
+ * cached in the instance of lockdep_map
+ *
+ * Currently main class (subclass == 0) and signle depth subclass
+ * are cached in lockdep_map. This optimization is mainly targeting
+ * on rq->lock. double_rq_lock() acquires this highly competitive with
+ * single depth.
+ */
+#define NR_LOCKDEP_CACHING_CLASSES     2
+
 /*
  * Lock-classes are keyed via unique addresses, by embedding the
  * lockclass-key into the kernel (or module) .data section. (For
@@ -138,7 +149,7 @@ void clear_lock_stats(struct lock_class *class);
  */
 struct lockdep_map {
        struct lock_class_key           *key;
-       struct lock_class               *class_cache;
+       struct lock_class               *class_cache[NR_LOCKDEP_CACHING_CLASSES];
        const char                      *name;
 #ifdef CONFIG_LOCK_STAT
        int                             cpu;
index e6b1210772ceace3fc70817a32e1a411096a6b22..74949fbef8c608b9c5ef6dab3b508041a11243f3 100644 (file)
@@ -864,6 +864,12 @@ int set_page_dirty(struct page *page);
 int set_page_dirty_lock(struct page *page);
 int clear_page_dirty_for_io(struct page *page);
 
+/* Is the vma a continuation of the stack vma above it? */
+static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
+{
+       return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
+}
+
 extern unsigned long move_page_tables(struct vm_area_struct *vma,
                unsigned long old_addr, struct vm_area_struct *new_vma,
                unsigned long new_addr, unsigned long len);
index ee7e258627f9f5b9999e4c3033f7fa78c5927b48..cb57d657ce4d2643c58e21eb23b2b1434f0f7a2f 100644 (file)
@@ -299,7 +299,7 @@ struct mm_struct {
         * new_owner->mm == mm
         * new_owner->alloc_lock is held
         */
-       struct task_struct *owner;
+       struct task_struct __rcu *owner;
 #endif
 
 #ifdef CONFIG_PROC_FS
index 329a8faa6e37bb32bd6e65f1b8fd758ac45baa36..245cdacee5443791eb5418d10bde3daa190351a9 100644 (file)
@@ -38,6 +38,8 @@
  *      [8:0] Byte/block count
  */
 
+#define R4_MEMORY_PRESENT (1 << 27)
+
 /*
   SDIO status in R5
   Type
index 6e6e62648a4d4a6d792fe207d42105563b112aaa..3984c4eb41fdc9c85759dbffe308df5a806391f4 100644 (file)
@@ -283,6 +283,13 @@ struct zone {
        /* zone watermarks, access with *_wmark_pages(zone) macros */
        unsigned long watermark[NR_WMARK];
 
+       /*
+        * When free pages are below this point, additional steps are taken
+        * when reading the number of free pages to avoid per-cpu counter
+        * drift allowing watermarks to be breached
+        */
+       unsigned long percpu_drift_mark;
+
        /*
         * We don't know if the memory that we're going to allocate will be freeable
         * or/and it will be released eventually, so to avoid totally wasting several
@@ -441,6 +448,12 @@ static inline int zone_is_oom_locked(const struct zone *zone)
        return test_bit(ZONE_OOM_LOCKED, &zone->flags);
 }
 
+#ifdef CONFIG_SMP
+unsigned long zone_nr_free_pages(struct zone *zone);
+#else
+#define zone_nr_free_pages(zone) zone_page_state(zone, NR_FREE_PAGES)
+#endif /* CONFIG_SMP */
+
 /*
  * The "priority" of VM scanning is how much of the queues we will scan in one
  * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
index 8a6b9fdc7ffae0c20335b8a2c73726396a934ee2..b29e7458b96642b2f36162aa755b10ff76b15abe 100644 (file)
@@ -350,7 +350,10 @@ struct module
        struct tracepoint *tracepoints;
        unsigned int num_tracepoints;
 #endif
-
+#ifdef HAVE_JUMP_LABEL
+       struct jump_entry *jump_entries;
+       unsigned int num_jump_entries;
+#endif
 #ifdef CONFIG_TRACING
        const char **trace_bprintk_fmt_start;
        unsigned int num_trace_bprintk_fmt;
@@ -686,17 +689,16 @@ extern int module_sysfs_initialized;
 
 
 #ifdef CONFIG_GENERIC_BUG
-int  module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *,
+void module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *,
                         struct module *);
 void module_bug_cleanup(struct module *);
 
 #else  /* !CONFIG_GENERIC_BUG */
 
-static inline int  module_bug_finalize(const Elf_Ehdr *hdr,
+static inline void module_bug_finalize(const Elf_Ehdr *hdr,
                                        const Elf_Shdr *sechdrs,
                                        struct module *mod)
 {
-       return 0;
 }
 static inline void module_bug_cleanup(struct module *mod) {}
 #endif /* CONFIG_GENERIC_BUG */
index 878cab4f5fcc5db95585184c22aea5d905a34295..f363bc8fdc74c821c99aa59d5bfcb9554c012c9a 100644 (file)
@@ -78,6 +78,14 @@ struct mutex_waiter {
 # include <linux/mutex-debug.h>
 #else
 # define __DEBUG_MUTEX_INITIALIZER(lockname)
+/**
+ * mutex_init - initialize the mutex
+ * @mutex: the mutex to be initialized
+ *
+ * Initialize the mutex to unlocked state.
+ *
+ * It is not allowed to initialize an already locked mutex.
+ */
 # define mutex_init(mutex) \
 do {                                                   \
        static struct lock_class_key __key;             \
index 9ed534c991b9312d84876c9162ed63e5ef2b5c66..70cd0603911c97b865bc576a3f4490f523e2fc08 100644 (file)
@@ -39,8 +39,9 @@ enum ctattr_type {
        CTA_TUPLE_MASTER,
        CTA_NAT_SEQ_ADJ_ORIG,
        CTA_NAT_SEQ_ADJ_REPLY,
-       CTA_SECMARK,
+       CTA_SECMARK,            /* obsolete */
        CTA_ZONE,
+       CTA_SECCTX,
        __CTA_MAX
 };
 #define CTA_MAX (__CTA_MAX - 1)
@@ -172,4 +173,11 @@ enum ctattr_help {
 };
 #define CTA_HELP_MAX (__CTA_HELP_MAX - 1)
 
+enum ctattr_secctx {
+       CTA_SECCTX_UNSPEC,
+       CTA_SECCTX_NAME,
+       __CTA_SECCTX_MAX
+};
+#define CTA_SECCTX_MAX (__CTA_SECCTX_MAX - 1)
+
 #endif /* _IPCONNTRACK_NETLINK_H */
index 6fcd3448b18631f04e081cde470f85218dd7f9b8..989092bd6274b44585ccc0faf4f005a0d7b909b7 100644 (file)
  * packets are being marked for.
  */
 #define SECMARK_MODE_SEL       0x01            /* SELinux */
-#define SECMARK_SELCTX_MAX     256
-
-struct xt_secmark_target_selinux_info {
-       __u32 selsid;
-       char selctx[SECMARK_SELCTX_MAX];
-};
+#define SECMARK_SECCTX_MAX     256
 
 struct xt_secmark_target_info {
        __u8 mode;
-       union {
-               struct xt_secmark_target_selinux_info sel;
-       } u;
+       __u32 secid;
+       char secctx[SECMARK_SECCTX_MAX];
 };
 
 #endif /*_XT_SECMARK_H_target */
index 59d066936ab9e0414d2cddc5123a9b7e0ac3c3ad..123566912d7312f276bf975cd1a48c97b2ffa830 100644 (file)
@@ -27,8 +27,6 @@
 
 #define MAX_LINKS 32           
 
-struct net;
-
 struct sockaddr_nl {
        sa_family_t     nl_family;      /* AF_NETLINK   */
        unsigned short  nl_pad;         /* zero         */
@@ -151,6 +149,8 @@ struct nlattr {
 #include <linux/capability.h>
 #include <linux/skbuff.h>
 
+struct net;
+
 static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
 {
        return (struct nlmsghdr *)skb->data;
index 791d5109f34c12207de65f06cca05fa4b35b44b8..50d8009be86c023cbee9607cb728927ba86fb531 100644 (file)
@@ -63,20 +63,20 @@ static inline bool netpoll_rx(struct sk_buff *skb)
        unsigned long flags;
        bool ret = false;
 
-       rcu_read_lock_bh();
+       local_irq_save(flags);
        npinfo = rcu_dereference_bh(skb->dev->npinfo);
 
        if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
                goto out;
 
-       spin_lock_irqsave(&npinfo->rx_lock, flags);
+       spin_lock(&npinfo->rx_lock);
        /* check rx_flags again with the lock held */
        if (npinfo->rx_flags && __netpoll_rx(skb))
                ret = true;
-       spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+       spin_unlock(&npinfo->rx_lock);
 
 out:
-       rcu_read_unlock_bh();
+       local_irq_restore(flags);
        return ret;
 }
 
index 508f8cf6da379bc7179b5c8f22d534a63a539ad0..d0edf7d823ae3ef60ec2ba6c6daa3ff0b34643cd 100644 (file)
@@ -185,7 +185,7 @@ struct nfs_inode {
        struct nfs4_cached_acl  *nfs4_acl;
         /* NFSv4 state */
        struct list_head        open_states;
-       struct nfs_delegation   *delegation;
+       struct nfs_delegation __rcu *delegation;
        fmode_t                  delegation_state;
        struct rw_semaphore     rwsem;
 #endif /* CONFIG_NFS_V4*/
index b2f1a4d835506b7d0d8fdce8831d608fa28759bd..2026f9e1ceb8e5cb5d6a9f3a2a1b4c4ff02d5953 100644 (file)
 
 struct notifier_block {
        int (*notifier_call)(struct notifier_block *, unsigned long, void *);
-       struct notifier_block *next;
+       struct notifier_block __rcu *next;
        int priority;
 };
 
 struct atomic_notifier_head {
        spinlock_t lock;
-       struct notifier_block *head;
+       struct notifier_block __rcu *head;
 };
 
 struct blocking_notifier_head {
        struct rw_semaphore rwsem;
-       struct notifier_block *head;
+       struct notifier_block __rcu *head;
 };
 
 struct raw_notifier_head {
-       struct notifier_block *head;
+       struct notifier_block __rcu *head;
 };
 
 struct srcu_notifier_head {
        struct mutex mutex;
        struct srcu_struct srcu;
-       struct notifier_block *head;
+       struct notifier_block __rcu *head;
 };
 
 #define ATOMIC_INIT_NOTIFIER_HEAD(name) do {   \
index 5171639ecf0ff070e74d884deb8c969e3b88132d..32fb81212fd153c0a71ba347ead6429d0a8dcd06 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <linux/types.h>
 #include <linux/spinlock.h>
+#include <linux/init.h>
 #include <asm/atomic.h>
  
 /* Each escaped entry is prefixed by ESCAPE_CODE
@@ -185,4 +186,10 @@ int oprofile_add_data(struct op_entry *entry, unsigned long val);
 int oprofile_add_data64(struct op_entry *entry, u64 val);
 int oprofile_write_commit(struct op_entry *entry);
 
+#ifdef CONFIG_PERF_EVENTS
+int __init oprofile_perf_init(struct oprofile_operations *ops);
+void oprofile_perf_exit(void);
+char *op_name_from_perf_id(void);
+#endif /* CONFIG_PERF_EVENTS */
+
 #endif /* OPROFILE_H */
index 10d33309e9a61351aa4d3597540f201c97dc9294..2615c37c8fe507c0126174dfa8572e6b5e78651c 100644 (file)
 #define PCI_DEVICE_ID_VLSI_82C147      0x0105
 #define PCI_DEVICE_ID_VLSI_VAS96011    0x0702
 
+/* AMD RD890 Chipset */
+#define PCI_DEVICE_ID_RD890_IOMMU      0x5a23
+
 #define PCI_VENDOR_ID_ADL              0x1005
 #define PCI_DEVICE_ID_ADL_2301         0x2301
 
 #define PCI_DEVICE_ID_AMD_11H_NB_DRAM  0x1302
 #define PCI_DEVICE_ID_AMD_11H_NB_MISC  0x1303
 #define PCI_DEVICE_ID_AMD_11H_NB_LINK  0x1304
+#define PCI_DEVICE_ID_AMD_15H_NB_MISC  0x1603
 #define PCI_DEVICE_ID_AMD_LANCE                0x2000
 #define PCI_DEVICE_ID_AMD_LANCE_HOME   0x2001
 #define PCI_DEVICE_ID_AMD_SCSI         0x2020
index ce2dc655cd1d40a6765acc2dd3f49e104bca4f5c..27ef6b190ea6cc5ee84c53dbcd9a38f5c09164d6 100644 (file)
        DEFINE_PER_CPU_SECTION(type, name, "..page_aligned")            \
        __aligned(PAGE_SIZE)
 
+/*
+ * Declaration/definition used for per-CPU variables that must be read mostly.
+ */
+#define DECLARE_PER_CPU_READ_MOSTLY(type, name)                        \
+       DECLARE_PER_CPU_SECTION(type, name, "..readmostly")
+
+#define DEFINE_PER_CPU_READ_MOSTLY(type, name)                         \
+       DEFINE_PER_CPU_SECTION(type, name, "..readmostly")
+
 /*
  * Intermodule exports for per-CPU variables.  sparse forgets about
  * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to
index 49466b13c5c6b310f36b31c052573864df6f02a0..0eb50832aa00fd1bbc31cc23837abfc698412402 100644 (file)
        preempt_enable();                               \
 } while (0)
 
+#define get_cpu_ptr(var) ({                            \
+       preempt_disable();                              \
+       this_cpu_ptr(var); })
+
+#define put_cpu_ptr(var) do {                          \
+       (void)(var);                                    \
+       preempt_enable();                               \
+} while (0)
+
 #ifdef CONFIG_SMP
 
 /* minimum unit size, also is the maximum supported allocation size */
index 716f99b682c1a57fb3b6f1f72e90aec3982ca5fd..057bf22a8323463a6bd9279d6928d3fea7ed8eb8 100644 (file)
@@ -486,6 +486,8 @@ struct perf_guest_info_callbacks {
 #include <linux/workqueue.h>
 #include <linux/ftrace.h>
 #include <linux/cpu.h>
+#include <linux/irq_work.h>
+#include <linux/jump_label_ref.h>
 #include <asm/atomic.h>
 #include <asm/local.h>
 
@@ -529,16 +531,22 @@ struct hw_perf_event {
                        int             last_cpu;
                };
                struct { /* software */
-                       s64             remaining;
                        struct hrtimer  hrtimer;
                };
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
                struct { /* breakpoint */
                        struct arch_hw_breakpoint       info;
                        struct list_head                bp_list;
+                       /*
+                        * Crufty hack to avoid the chicken and egg
+                        * problem hw_breakpoint has with context
+                        * creation and event initalization.
+                        */
+                       struct task_struct              *bp_target;
                };
 #endif
        };
+       int                             state;
        local64_t                       prev_count;
        u64                             sample_period;
        u64                             last_period;
@@ -550,6 +558,13 @@ struct hw_perf_event {
 #endif
 };
 
+/*
+ * hw_perf_event::state flags
+ */
+#define PERF_HES_STOPPED       0x01 /* the counter is stopped */
+#define PERF_HES_UPTODATE      0x02 /* event->count up-to-date */
+#define PERF_HES_ARCH          0x04
+
 struct perf_event;
 
 /*
@@ -561,36 +576,70 @@ struct perf_event;
  * struct pmu - generic performance monitoring unit
  */
 struct pmu {
-       int (*enable)                   (struct perf_event *event);
-       void (*disable)                 (struct perf_event *event);
-       int (*start)                    (struct perf_event *event);
-       void (*stop)                    (struct perf_event *event);
-       void (*read)                    (struct perf_event *event);
-       void (*unthrottle)              (struct perf_event *event);
+       struct list_head                entry;
+
+       int * __percpu                  pmu_disable_count;
+       struct perf_cpu_context * __percpu pmu_cpu_context;
+       int                             task_ctx_nr;
+
+       /*
+        * Fully disable/enable this PMU, can be used to protect from the PMI
+        * as well as for lazy/batch writing of the MSRs.
+        */
+       void (*pmu_enable)              (struct pmu *pmu); /* optional */
+       void (*pmu_disable)             (struct pmu *pmu); /* optional */
 
        /*
-        * Group events scheduling is treated as a transaction, add group
-        * events as a whole and perform one schedulability test. If the test
-        * fails, roll back the whole group
+        * Try and initialize the event for this PMU.
+        * Should return -ENOENT when the @event doesn't match this PMU.
         */
+       int (*event_init)               (struct perf_event *event);
+
+#define PERF_EF_START  0x01            /* start the counter when adding    */
+#define PERF_EF_RELOAD 0x02            /* reload the counter when starting */
+#define PERF_EF_UPDATE 0x04            /* update the counter when stopping */
 
        /*
-        * Start the transaction, after this ->enable() doesn't need
-        * to do schedulability tests.
+        * Adds/Removes a counter to/from the PMU, can be done inside
+        * a transaction, see the ->*_txn() methods.
         */
-       void (*start_txn)       (const struct pmu *pmu);
+       int  (*add)                     (struct perf_event *event, int flags);
+       void (*del)                     (struct perf_event *event, int flags);
+
        /*
-        * If ->start_txn() disabled the ->enable() schedulability test
+        * Starts/Stops a counter present on the PMU. The PMI handler
+        * should stop the counter when perf_event_overflow() returns
+        * !0. ->start() will be used to continue.
+        */
+       void (*start)                   (struct perf_event *event, int flags);
+       void (*stop)                    (struct perf_event *event, int flags);
+
+       /*
+        * Updates the counter value of the event.
+        */
+       void (*read)                    (struct perf_event *event);
+
+       /*
+        * Group events scheduling is treated as a transaction, add
+        * group events as a whole and perform one schedulability test.
+        * If the test fails, roll back the whole group
+        *
+        * Start the transaction, after this ->add() doesn't need to
+        * do schedulability tests.
+        */
+       void (*start_txn)       (struct pmu *pmu); /* optional */
+       /*
+        * If ->start_txn() disabled the ->add() schedulability test
         * then ->commit_txn() is required to perform one. On success
         * the transaction is closed. On error the transaction is kept
         * open until ->cancel_txn() is called.
         */
-       int  (*commit_txn)      (const struct pmu *pmu);
+       int  (*commit_txn)      (struct pmu *pmu); /* optional */
        /*
-        * Will cancel the transaction, assumes ->disable() is called for
-        * each successfull ->enable() during the transaction.
+        * Will cancel the transaction, assumes ->del() is called
+        * for each successfull ->add() during the transaction.
         */
-       void (*cancel_txn)      (const struct pmu *pmu);
+       void (*cancel_txn)      (struct pmu *pmu); /* optional */
 };
 
 /**
@@ -631,11 +680,6 @@ struct perf_buffer {
        void                            *data_pages[0];
 };
 
-struct perf_pending_entry {
-       struct perf_pending_entry *next;
-       void (*func)(struct perf_pending_entry *);
-};
-
 struct perf_sample_data;
 
 typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
@@ -656,6 +700,7 @@ struct swevent_hlist {
 
 #define PERF_ATTACH_CONTEXT    0x01
 #define PERF_ATTACH_GROUP      0x02
+#define PERF_ATTACH_TASK       0x04
 
 /**
  * struct perf_event - performance event kernel representation:
@@ -669,7 +714,7 @@ struct perf_event {
        int                             nr_siblings;
        int                             group_flags;
        struct perf_event               *group_leader;
-       const struct pmu                *pmu;
+       struct pmu                      *pmu;
 
        enum perf_event_active_state    state;
        unsigned int                    attach_state;
@@ -743,7 +788,7 @@ struct perf_event {
        int                             pending_wakeup;
        int                             pending_kill;
        int                             pending_disable;
-       struct perf_pending_entry       pending;
+       struct irq_work                 pending;
 
        atomic_t                        event_limit;
 
@@ -763,12 +808,19 @@ struct perf_event {
 #endif /* CONFIG_PERF_EVENTS */
 };
 
+enum perf_event_context_type {
+       task_context,
+       cpu_context,
+};
+
 /**
  * struct perf_event_context - event context structure
  *
  * Used as a container for task events and CPU events as well:
  */
 struct perf_event_context {
+       enum perf_event_context_type    type;
+       struct pmu                      *pmu;
        /*
         * Protect the states of the events in the list,
         * nr_active, and the list:
@@ -808,6 +860,12 @@ struct perf_event_context {
        struct rcu_head                 rcu_head;
 };
 
+/*
+ * Number of contexts where an event can trigger:
+ *     task, softirq, hardirq, nmi.
+ */
+#define PERF_NR_CONTEXTS       4
+
 /**
  * struct perf_event_cpu_context - per cpu event context structure
  */
@@ -815,18 +873,9 @@ struct perf_cpu_context {
        struct perf_event_context       ctx;
        struct perf_event_context       *task_ctx;
        int                             active_oncpu;
-       int                             max_pertask;
        int                             exclusive;
-       struct swevent_hlist            *swevent_hlist;
-       struct mutex                    hlist_mutex;
-       int                             hlist_refcount;
-
-       /*
-        * Recursion avoidance:
-        *
-        * task, softirq, irq, nmi context
-        */
-       int                             recursion[4];
+       struct list_head                rotation_list;
+       int                             jiffies_interval;
 };
 
 struct perf_output_handle {
@@ -842,26 +891,34 @@ struct perf_output_handle {
 
 #ifdef CONFIG_PERF_EVENTS
 
-/*
- * Set by architecture code:
- */
-extern int perf_max_events;
+extern int perf_pmu_register(struct pmu *pmu);
+extern void perf_pmu_unregister(struct pmu *pmu);
+
+extern int perf_num_counters(void);
+extern const char *perf_pmu_name(void);
+extern void __perf_event_task_sched_in(struct task_struct *task);
+extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
 
-extern const struct pmu *hw_perf_event_init(struct perf_event *event);
+extern atomic_t perf_task_events;
+
+static inline void perf_event_task_sched_in(struct task_struct *task)
+{
+       COND_STMT(&perf_task_events, __perf_event_task_sched_in(task));
+}
+
+static inline
+void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
+{
+       COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next));
+}
 
-extern void perf_event_task_sched_in(struct task_struct *task);
-extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
-extern void perf_event_task_tick(struct task_struct *task);
 extern int perf_event_init_task(struct task_struct *child);
 extern void perf_event_exit_task(struct task_struct *child);
 extern void perf_event_free_task(struct task_struct *task);
-extern void set_perf_event_pending(void);
-extern void perf_event_do_pending(void);
+extern void perf_event_delayed_put(struct task_struct *task);
 extern void perf_event_print_debug(void);
-extern void __perf_disable(void);
-extern bool __perf_enable(void);
-extern void perf_disable(void);
-extern void perf_enable(void);
+extern void perf_pmu_disable(struct pmu *pmu);
+extern void perf_pmu_enable(struct pmu *pmu);
 extern int perf_event_task_disable(void);
 extern int perf_event_task_enable(void);
 extern void perf_event_update_userpage(struct perf_event *event);
@@ -869,7 +926,7 @@ extern int perf_event_release_kernel(struct perf_event *event);
 extern struct perf_event *
 perf_event_create_kernel_counter(struct perf_event_attr *attr,
                                int cpu,
-                               pid_t pid,
+                               struct task_struct *task,
                                perf_overflow_handler_t callback);
 extern u64 perf_event_read_value(struct perf_event *event,
                                 u64 *enabled, u64 *running);
@@ -920,14 +977,7 @@ extern int perf_event_overflow(struct perf_event *event, int nmi,
  */
 static inline int is_software_event(struct perf_event *event)
 {
-       switch (event->attr.type) {
-       case PERF_TYPE_SOFTWARE:
-       case PERF_TYPE_TRACEPOINT:
-       /* for now the breakpoint stuff also works as software event */
-       case PERF_TYPE_BREAKPOINT:
-               return 1;
-       }
-       return 0;
+       return event->pmu->task_ctx_nr == perf_sw_context;
 }
 
 extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
@@ -954,18 +1004,20 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs)
        perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
 }
 
-static inline void
+static __always_inline void
 perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
 {
-       if (atomic_read(&perf_swevent_enabled[event_id])) {
-               struct pt_regs hot_regs;
-
-               if (!regs) {
-                       perf_fetch_caller_regs(&hot_regs);
-                       regs = &hot_regs;
-               }
-               __perf_sw_event(event_id, nr, nmi, regs, addr);
+       struct pt_regs hot_regs;
+
+       JUMP_LABEL(&perf_swevent_enabled[event_id], have_event);
+       return;
+
+have_event:
+       if (!regs) {
+               perf_fetch_caller_regs(&hot_regs);
+               regs = &hot_regs;
        }
+       __perf_sw_event(event_id, nr, nmi, regs, addr);
 }
 
 extern void perf_event_mmap(struct vm_area_struct *vma);
@@ -976,7 +1028,21 @@ extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks
 extern void perf_event_comm(struct task_struct *tsk);
 extern void perf_event_fork(struct task_struct *tsk);
 
-extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
+/* Callchains */
+DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
+
+extern void perf_callchain_user(struct perf_callchain_entry *entry,
+                               struct pt_regs *regs);
+extern void perf_callchain_kernel(struct perf_callchain_entry *entry,
+                                 struct pt_regs *regs);
+
+
+static inline void
+perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
+{
+       if (entry->nr < PERF_MAX_STACK_DEPTH)
+               entry->ip[entry->nr++] = ip;
+}
 
 extern int sysctl_perf_event_paranoid;
 extern int sysctl_perf_event_mlock;
@@ -1019,21 +1085,18 @@ extern int perf_swevent_get_recursion_context(void);
 extern void perf_swevent_put_recursion_context(int rctx);
 extern void perf_event_enable(struct perf_event *event);
 extern void perf_event_disable(struct perf_event *event);
+extern void perf_event_task_tick(void);
 #else
 static inline void
 perf_event_task_sched_in(struct task_struct *task)                     { }
 static inline void
 perf_event_task_sched_out(struct task_struct *task,
                            struct task_struct *next)                   { }
-static inline void
-perf_event_task_tick(struct task_struct *task)                         { }
 static inline int perf_event_init_task(struct task_struct *child)      { return 0; }
 static inline void perf_event_exit_task(struct task_struct *child)     { }
 static inline void perf_event_free_task(struct task_struct *task)      { }
-static inline void perf_event_do_pending(void)                         { }
+static inline void perf_event_delayed_put(struct task_struct *task)    { }
 static inline void perf_event_print_debug(void)                                { }
-static inline void perf_disable(void)                                  { }
-static inline void perf_enable(void)                                   { }
 static inline int perf_event_task_disable(void)                                { return -EINVAL; }
 static inline int perf_event_task_enable(void)                         { return -EINVAL; }
 
@@ -1056,6 +1119,7 @@ static inline int  perf_swevent_get_recursion_context(void)               { return -1; }
 static inline void perf_swevent_put_recursion_context(int rctx)                { }
 static inline void perf_event_enable(struct perf_event *event)         { }
 static inline void perf_event_disable(struct perf_event *event)                { }
+static inline void perf_event_task_tick(void)                          { }
 #endif
 
 #define perf_output_put(handle, x) \
index d50ba858cfe0c15325ff5fcf410d7a18f1a8aad3..d1a9193960f17601d4637bfae03f3c9a3ac5bc83 100644 (file)
@@ -274,8 +274,14 @@ static inline int dquot_alloc_space(struct inode *inode, qsize_t nr)
        int ret;
 
        ret = dquot_alloc_space_nodirty(inode, nr);
-       if (!ret)
-               mark_inode_dirty_sync(inode);
+       if (!ret) {
+               /*
+                * Mark inode fully dirty. Since we are allocating blocks, inode
+                * would become fully dirty soon anyway and it reportedly
+                * reduces inode_lock contention.
+                */
+               mark_inode_dirty(inode);
+       }
        return ret;
 }
 
index 634b8e674ac578e2916b110a28b992bf2e8dfd22..a39cbed9ee17a5d771f7c3e7ca129e3a05171220 100644 (file)
@@ -47,6 +47,8 @@ static inline void *radix_tree_indirect_to_ptr(void *ptr)
 {
        return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR);
 }
+#define radix_tree_indirect_to_ptr(ptr) \
+       radix_tree_indirect_to_ptr((void __force *)(ptr))
 
 static inline int radix_tree_is_indirect_ptr(void *ptr)
 {
@@ -61,7 +63,7 @@ static inline int radix_tree_is_indirect_ptr(void *ptr)
 struct radix_tree_root {
        unsigned int            height;
        gfp_t                   gfp_mask;
-       struct radix_tree_node  *rnode;
+       struct radix_tree_node  __rcu *rnode;
 };
 
 #define RADIX_TREE_INIT(mask)  {                                       \
index 4ec3b38ce9c584049229b71bbf537770d6fbe263..f31ef61f1c650b585bd6faf969f7cec754dffe2d 100644 (file)
@@ -9,6 +9,21 @@
 #include <linux/list.h>
 #include <linux/rcupdate.h>
 
+/*
+ * Why is there no list_empty_rcu()?  Because list_empty() serves this
+ * purpose.  The list_empty() function fetches the RCU-protected pointer
+ * and compares it to the address of the list head, but neither dereferences
+ * this pointer itself nor provides this pointer to the caller.  Therefore,
+ * it is not necessary to use rcu_dereference(), so that list_empty() can
+ * be used anywhere you would want to use a list_empty_rcu().
+ */
+
+/*
+ * return the ->next pointer of a list_head in an rcu safe
+ * way, we must not access it directly
+ */
+#define list_next_rcu(list)    (*((struct list_head __rcu **)(&(list)->next)))
+
 /*
  * Insert a new entry between two known consecutive entries.
  *
@@ -20,7 +35,7 @@ static inline void __list_add_rcu(struct list_head *new,
 {
        new->next = next;
        new->prev = prev;
-       rcu_assign_pointer(prev->next, new);
+       rcu_assign_pointer(list_next_rcu(prev), new);
        next->prev = new;
 }
 
@@ -138,7 +153,7 @@ static inline void list_replace_rcu(struct list_head *old,
 {
        new->next = old->next;
        new->prev = old->prev;
-       rcu_assign_pointer(new->prev->next, new);
+       rcu_assign_pointer(list_next_rcu(new->prev), new);
        new->next->prev = new;
        old->prev = LIST_POISON2;
 }
@@ -193,7 +208,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
         */
 
        last->next = at;
-       rcu_assign_pointer(head->next, first);
+       rcu_assign_pointer(list_next_rcu(head), first);
        first->prev = head;
        at->prev = last;
 }
@@ -208,7 +223,9 @@ static inline void list_splice_init_rcu(struct list_head *list,
  * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
  */
 #define list_entry_rcu(ptr, type, member) \
-       container_of(rcu_dereference_raw(ptr), type, member)
+       ({typeof (*ptr) __rcu *__ptr = (typeof (*ptr) __rcu __force *)ptr; \
+        container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \
+       })
 
 /**
  * list_first_entry_rcu - get the first element from a list
@@ -225,9 +242,9 @@ static inline void list_splice_init_rcu(struct list_head *list,
        list_entry_rcu((ptr)->next, type, member)
 
 #define __list_for_each_rcu(pos, head) \
-       for (pos = rcu_dereference_raw((head)->next); \
+       for (pos = rcu_dereference_raw(list_next_rcu(head)); \
                pos != (head); \
-               pos = rcu_dereference_raw(pos->next))
+               pos = rcu_dereference_raw(list_next_rcu((pos)))
 
 /**
  * list_for_each_entry_rcu     -       iterate over rcu list of given type
@@ -257,9 +274,9 @@ static inline void list_splice_init_rcu(struct list_head *list,
  * as long as the traversal is guarded by rcu_read_lock().
  */
 #define list_for_each_continue_rcu(pos, head) \
-       for ((pos) = rcu_dereference_raw((pos)->next); \
+       for ((pos) = rcu_dereference_raw(list_next_rcu(pos)); \
                prefetch((pos)->next), (pos) != (head); \
-               (pos) = rcu_dereference_raw((pos)->next))
+               (pos) = rcu_dereference_raw(list_next_rcu(pos)))
 
 /**
  * list_for_each_entry_continue_rcu - continue iteration over list of given type
@@ -314,12 +331,19 @@ static inline void hlist_replace_rcu(struct hlist_node *old,
 
        new->next = next;
        new->pprev = old->pprev;
-       rcu_assign_pointer(*new->pprev, new);
+       rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new);
        if (next)
                new->next->pprev = &new->next;
        old->pprev = LIST_POISON2;
 }
 
+/*
+ * return the first or the next element in an RCU protected hlist
+ */
+#define hlist_first_rcu(head)  (*((struct hlist_node __rcu **)(&(head)->first)))
+#define hlist_next_rcu(node)   (*((struct hlist_node __rcu **)(&(node)->next)))
+#define hlist_pprev_rcu(node)  (*((struct hlist_node __rcu **)((node)->pprev)))
+
 /**
  * hlist_add_head_rcu
  * @n: the element to add to the hash list.
@@ -346,7 +370,7 @@ static inline void hlist_add_head_rcu(struct hlist_node *n,
 
        n->next = first;
        n->pprev = &h->first;
-       rcu_assign_pointer(h->first, n);
+       rcu_assign_pointer(hlist_first_rcu(h), n);
        if (first)
                first->pprev = &n->next;
 }
@@ -374,7 +398,7 @@ static inline void hlist_add_before_rcu(struct hlist_node *n,
 {
        n->pprev = next->pprev;
        n->next = next;
-       rcu_assign_pointer(*(n->pprev), n);
+       rcu_assign_pointer(hlist_pprev_rcu(n), n);
        next->pprev = &n->next;
 }
 
@@ -401,15 +425,15 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
 {
        n->next = prev->next;
        n->pprev = &prev->next;
-       rcu_assign_pointer(prev->next, n);
+       rcu_assign_pointer(hlist_next_rcu(prev), n);
        if (n->next)
                n->next->pprev = &n->next;
 }
 
-#define __hlist_for_each_rcu(pos, head)                        \
-       for (pos = rcu_dereference((head)->first);      \
-            pos && ({ prefetch(pos->next); 1; });      \
-            pos = rcu_dereference(pos->next))
+#define __hlist_for_each_rcu(pos, head)                                \
+       for (pos = rcu_dereference(hlist_first_rcu(head));      \
+            pos && ({ prefetch(pos->next); 1; });              \
+            pos = rcu_dereference(hlist_next_rcu(pos)))
 
 /**
  * hlist_for_each_entry_rcu - iterate over rcu list of given type
@@ -422,11 +446,11 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
  * the _rcu list-mutation primitives such as hlist_add_head_rcu()
  * as long as the traversal is guarded by rcu_read_lock().
  */
-#define hlist_for_each_entry_rcu(tpos, pos, head, member)               \
-       for (pos = rcu_dereference_raw((head)->first);                   \
+#define hlist_for_each_entry_rcu(tpos, pos, head, member)              \
+       for (pos = rcu_dereference_raw(hlist_first_rcu(head));          \
                pos && ({ prefetch(pos->next); 1; }) &&                  \
                ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
-               pos = rcu_dereference_raw(pos->next))
+               pos = rcu_dereference_raw(hlist_next_rcu(pos)))
 
 /**
  * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
index b70ffe53cb9fe77a668f57c2bc584216f90f95b6..2ae13714828bc42568e77684fedfc2cf929c291d 100644 (file)
@@ -37,6 +37,12 @@ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
        }
 }
 
+#define hlist_nulls_first_rcu(head) \
+       (*((struct hlist_nulls_node __rcu __force **)&(head)->first))
+
+#define hlist_nulls_next_rcu(node) \
+       (*((struct hlist_nulls_node __rcu __force **)&(node)->next))
+
 /**
  * hlist_nulls_del_rcu - deletes entry from hash list without re-initialization
  * @n: the element to delete from the hash list.
@@ -88,7 +94,7 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
 
        n->next = first;
        n->pprev = &h->first;
-       rcu_assign_pointer(h->first, n);
+       rcu_assign_pointer(hlist_nulls_first_rcu(h), n);
        if (!is_a_nulls(first))
                first->pprev = &n->next;
 }
@@ -100,11 +106,11 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
  * @member:    the name of the hlist_nulls_node within the struct.
  *
  */
-#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \
-       for (pos = rcu_dereference_raw((head)->first);                   \
-               (!is_a_nulls(pos)) &&                   \
+#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member)                        \
+       for (pos = rcu_dereference_raw(hlist_nulls_first_rcu(head));            \
+               (!is_a_nulls(pos)) &&                                           \
                ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
-               pos = rcu_dereference_raw(pos->next))
+               pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)))
 
 #endif
 #endif
index 9fbc54a2585d42cb9276adf2c2d168f53e883f63..03cda7bed98587b128c5a9953316644a8debb4d2 100644 (file)
 #include <linux/lockdep.h>
 #include <linux/completion.h>
 #include <linux/debugobjects.h>
+#include <linux/compiler.h>
 
 #ifdef CONFIG_RCU_TORTURE_TEST
 extern int rcutorture_runnable; /* for sysctl */
 #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
 
+#define ULONG_CMP_GE(a, b)     (ULONG_MAX / 2 >= (a) - (b))
+#define ULONG_CMP_LT(a, b)     (ULONG_MAX / 2 < (a) - (b))
+
 /**
  * struct rcu_head - callback structure for use with RCU
  * @next: next update requests in a list
@@ -57,29 +61,94 @@ struct rcu_head {
 };
 
 /* Exported common interfaces */
-extern void rcu_barrier(void);
+extern void call_rcu_sched(struct rcu_head *head,
+                          void (*func)(struct rcu_head *rcu));
+extern void synchronize_sched(void);
 extern void rcu_barrier_bh(void);
 extern void rcu_barrier_sched(void);
 extern void synchronize_sched_expedited(void);
 extern int sched_expedited_torture_stats(char *page);
 
+static inline void __rcu_read_lock_bh(void)
+{
+       local_bh_disable();
+}
+
+static inline void __rcu_read_unlock_bh(void)
+{
+       local_bh_enable();
+}
+
+#ifdef CONFIG_PREEMPT_RCU
+
+extern void __rcu_read_lock(void);
+extern void __rcu_read_unlock(void);
+void synchronize_rcu(void);
+
+/*
+ * Defined as a macro as it is a very low level header included from
+ * areas that don't even know about current.  This gives the rcu_read_lock()
+ * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other
+ * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
+ */
+#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
+
+#else /* #ifdef CONFIG_PREEMPT_RCU */
+
+static inline void __rcu_read_lock(void)
+{
+       preempt_disable();
+}
+
+static inline void __rcu_read_unlock(void)
+{
+       preempt_enable();
+}
+
+static inline void synchronize_rcu(void)
+{
+       synchronize_sched();
+}
+
+static inline int rcu_preempt_depth(void)
+{
+       return 0;
+}
+
+#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+
 /* Internal to kernel */
 extern void rcu_init(void);
+extern void rcu_sched_qs(int cpu);
+extern void rcu_bh_qs(int cpu);
+extern void rcu_check_callbacks(int cpu, int user);
+struct notifier_block;
+
+#ifdef CONFIG_NO_HZ
+
+extern void rcu_enter_nohz(void);
+extern void rcu_exit_nohz(void);
+
+#else /* #ifdef CONFIG_NO_HZ */
+
+static inline void rcu_enter_nohz(void)
+{
+}
+
+static inline void rcu_exit_nohz(void)
+{
+}
+
+#endif /* #else #ifdef CONFIG_NO_HZ */
 
 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
 #include <linux/rcutree.h>
-#elif defined(CONFIG_TINY_RCU)
+#elif defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU)
 #include <linux/rcutiny.h>
 #else
 #error "Unknown RCU implementation specified to kernel configuration"
 #endif
 
-#define RCU_HEAD_INIT  { .next = NULL, .func = NULL }
-#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
-#define INIT_RCU_HEAD(ptr) do { \
-       (ptr)->next = NULL; (ptr)->func = NULL; \
-} while (0)
-
 /*
  * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
  * initialization and destruction of rcu_head on the stack. rcu_head structures
@@ -120,14 +189,15 @@ extern struct lockdep_map rcu_sched_lock_map;
 extern int debug_lockdep_rcu_enabled(void);
 
 /**
- * rcu_read_lock_held - might we be in RCU read-side critical section?
+ * rcu_read_lock_held() - might we be in RCU read-side critical section?
  *
  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
  * read-side critical section.  In absence of CONFIG_DEBUG_LOCK_ALLOC,
  * this assumes we are in an RCU read-side critical section unless it can
- * prove otherwise.
+ * prove otherwise.  This is useful for debug checks in functions that
+ * require that they be called within an RCU read-side critical section.
  *
- * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
+ * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
  * and while lockdep is disabled.
  */
 static inline int rcu_read_lock_held(void)
@@ -144,14 +214,16 @@ static inline int rcu_read_lock_held(void)
 extern int rcu_read_lock_bh_held(void);
 
 /**
- * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section?
+ * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
  *
  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
  * RCU-sched read-side critical section.  In absence of
  * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
  * critical section unless it can prove otherwise.  Note that disabling
  * of preemption (including disabling irqs) counts as an RCU-sched
- * read-side critical section.
+ * read-side critical section.  This is useful for debug checks in functions
+ * that required that they be called within an RCU-sched read-side
+ * critical section.
  *
  * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
  * and while lockdep is disabled.
@@ -211,7 +283,11 @@ static inline int rcu_read_lock_sched_held(void)
 
 extern int rcu_my_thread_group_empty(void);
 
-#define __do_rcu_dereference_check(c)                                  \
+/**
+ * rcu_lockdep_assert - emit lockdep splat if specified condition not met
+ * @c: condition to check
+ */
+#define rcu_lockdep_assert(c)                                          \
        do {                                                            \
                static bool __warned;                                   \
                if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \
@@ -220,41 +296,163 @@ extern int rcu_my_thread_group_empty(void);
                }                                                       \
        } while (0)
 
+#else /* #ifdef CONFIG_PROVE_RCU */
+
+#define rcu_lockdep_assert(c) do { } while (0)
+
+#endif /* #else #ifdef CONFIG_PROVE_RCU */
+
+/*
+ * Helper functions for rcu_dereference_check(), rcu_dereference_protected()
+ * and rcu_assign_pointer().  Some of these could be folded into their
+ * callers, but they are left separate in order to ease introduction of
+ * multiple flavors of pointers to match the multiple flavors of RCU
+ * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in
+ * the future.
+ */
+
+#ifdef __CHECKER__
+#define rcu_dereference_sparse(p, space) \
+       ((void)(((typeof(*p) space *)p) == p))
+#else /* #ifdef __CHECKER__ */
+#define rcu_dereference_sparse(p, space)
+#endif /* #else #ifdef __CHECKER__ */
+
+#define __rcu_access_pointer(p, space) \
+       ({ \
+               typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
+               rcu_dereference_sparse(p, space); \
+               ((typeof(*p) __force __kernel *)(_________p1)); \
+       })
+#define __rcu_dereference_check(p, c, space) \
+       ({ \
+               typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
+               rcu_lockdep_assert(c); \
+               rcu_dereference_sparse(p, space); \
+               smp_read_barrier_depends(); \
+               ((typeof(*p) __force __kernel *)(_________p1)); \
+       })
+#define __rcu_dereference_protected(p, c, space) \
+       ({ \
+               rcu_lockdep_assert(c); \
+               rcu_dereference_sparse(p, space); \
+               ((typeof(*p) __force __kernel *)(p)); \
+       })
+
+#define __rcu_dereference_index_check(p, c) \
+       ({ \
+               typeof(p) _________p1 = ACCESS_ONCE(p); \
+               rcu_lockdep_assert(c); \
+               smp_read_barrier_depends(); \
+               (_________p1); \
+       })
+#define __rcu_assign_pointer(p, v, space) \
+       ({ \
+               if (!__builtin_constant_p(v) || \
+                   ((v) != NULL)) \
+                       smp_wmb(); \
+               (p) = (typeof(*v) __force space *)(v); \
+       })
+
+
+/**
+ * rcu_access_pointer() - fetch RCU pointer with no dereferencing
+ * @p: The pointer to read
+ *
+ * Return the value of the specified RCU-protected pointer, but omit the
+ * smp_read_barrier_depends() and keep the ACCESS_ONCE().  This is useful
+ * when the value of this pointer is accessed, but the pointer is not
+ * dereferenced, for example, when testing an RCU-protected pointer against
+ * NULL.  Although rcu_access_pointer() may also be used in cases where
+ * update-side locks prevent the value of the pointer from changing, you
+ * should instead use rcu_dereference_protected() for this use case.
+ */
+#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)
+
 /**
- * rcu_dereference_check - rcu_dereference with debug checking
+ * rcu_dereference_check() - rcu_dereference with debug checking
  * @p: The pointer to read, prior to dereferencing
  * @c: The conditions under which the dereference will take place
  *
  * Do an rcu_dereference(), but check that the conditions under which the
- * dereference will take place are correct.  Typically the conditions indicate
- * the various locking conditions that should be held at that point.  The check
- * should return true if the conditions are satisfied.
+ * dereference will take place are correct.  Typically the conditions
+ * indicate the various locking conditions that should be held at that
+ * point.  The check should return true if the conditions are satisfied.
+ * An implicit check for being in an RCU read-side critical section
+ * (rcu_read_lock()) is included.
  *
  * For example:
  *
- *     bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() ||
- *                                           lockdep_is_held(&foo->lock));
+ *     bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock));
  *
  * could be used to indicate to lockdep that foo->bar may only be dereferenced
- * if either the RCU read lock is held, or that the lock required to replace
+ * if either rcu_read_lock() is held, or that the lock required to replace
  * the bar struct at foo->bar is held.
  *
  * Note that the list of conditions may also include indications of when a lock
  * need not be held, for example during initialisation or destruction of the
  * target struct:
  *
- *     bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() ||
- *                                           lockdep_is_held(&foo->lock) ||
+ *     bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) ||
  *                                           atomic_read(&foo->usage) == 0);
+ *
+ * Inserts memory barriers on architectures that require them
+ * (currently only the Alpha), prevents the compiler from refetching
+ * (and from merging fetches), and, more importantly, documents exactly
+ * which pointers are protected by RCU and checks that the pointer is
+ * annotated as __rcu.
  */
 #define rcu_dereference_check(p, c) \
-       ({ \
-               __do_rcu_dereference_check(c); \
-               rcu_dereference_raw(p); \
-       })
+       __rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu)
+
+/**
+ * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
+ * @p: The pointer to read, prior to dereferencing
+ * @c: The conditions under which the dereference will take place
+ *
+ * This is the RCU-bh counterpart to rcu_dereference_check().
+ */
+#define rcu_dereference_bh_check(p, c) \
+       __rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu)
 
 /**
- * rcu_dereference_protected - fetch RCU pointer when updates prevented
+ * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
+ * @p: The pointer to read, prior to dereferencing
+ * @c: The conditions under which the dereference will take place
+ *
+ * This is the RCU-sched counterpart to rcu_dereference_check().
+ */
+#define rcu_dereference_sched_check(p, c) \
+       __rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \
+                               __rcu)
+
+#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/
+
+/**
+ * rcu_dereference_index_check() - rcu_dereference for indices with debug checking
+ * @p: The pointer to read, prior to dereferencing
+ * @c: The conditions under which the dereference will take place
+ *
+ * Similar to rcu_dereference_check(), but omits the sparse checking.
+ * This allows rcu_dereference_index_check() to be used on integers,
+ * which can then be used as array indices.  Attempting to use
+ * rcu_dereference_check() on an integer will give compiler warnings
+ * because the sparse address-space mechanism relies on dereferencing
+ * the RCU-protected pointer.  Dereferencing integers is not something
+ * that even gcc will put up with.
+ *
+ * Note that this function does not implicitly check for RCU read-side
+ * critical sections.  If this function gains lots of uses, it might
+ * make sense to provide versions for each flavor of RCU, but it does
+ * not make sense as of early 2010.
+ */
+#define rcu_dereference_index_check(p, c) \
+       __rcu_dereference_index_check((p), (c))
+
+/**
+ * rcu_dereference_protected() - fetch RCU pointer when updates prevented
+ * @p: The pointer to read, prior to dereferencing
+ * @c: The conditions under which the dereference will take place
  *
  * Return the value of the specified RCU-protected pointer, but omit
  * both the smp_read_barrier_depends() and the ACCESS_ONCE().  This
@@ -263,35 +461,61 @@ extern int rcu_my_thread_group_empty(void);
  * prevent the compiler from repeating this reference or combining it
  * with other references, so it should not be used without protection
  * of appropriate locks.
+ *
+ * This function is only for update-side use.  Using this function
+ * when protected only by rcu_read_lock() will result in infrequent
+ * but very ugly failures.
  */
 #define rcu_dereference_protected(p, c) \
-       ({ \
-               __do_rcu_dereference_check(c); \
-               (p); \
-       })
+       __rcu_dereference_protected((p), (c), __rcu)
 
-#else /* #ifdef CONFIG_PROVE_RCU */
+/**
+ * rcu_dereference_bh_protected() - fetch RCU-bh pointer when updates prevented
+ * @p: The pointer to read, prior to dereferencing
+ * @c: The conditions under which the dereference will take place
+ *
+ * This is the RCU-bh counterpart to rcu_dereference_protected().
+ */
+#define rcu_dereference_bh_protected(p, c) \
+       __rcu_dereference_protected((p), (c), __rcu)
 
-#define rcu_dereference_check(p, c)    rcu_dereference_raw(p)
-#define rcu_dereference_protected(p, c) (p)
+/**
+ * rcu_dereference_sched_protected() - fetch RCU-sched pointer when updates prevented
+ * @p: The pointer to read, prior to dereferencing
+ * @c: The conditions under which the dereference will take place
+ *
+ * This is the RCU-sched counterpart to rcu_dereference_protected().
+ */
+#define rcu_dereference_sched_protected(p, c) \
+       __rcu_dereference_protected((p), (c), __rcu)
 
-#endif /* #else #ifdef CONFIG_PROVE_RCU */
 
 /**
- * rcu_access_pointer - fetch RCU pointer with no dereferencing
+ * rcu_dereference() - fetch RCU-protected pointer for dereferencing
+ * @p: The pointer to read, prior to dereferencing
  *
- * Return the value of the specified RCU-protected pointer, but omit the
- * smp_read_barrier_depends() and keep the ACCESS_ONCE().  This is useful
- * when the value of this pointer is accessed, but the pointer is not
- * dereferenced, for example, when testing an RCU-protected pointer against
- * NULL.  This may also be used in cases where update-side locks prevent
- * the value of the pointer from changing, but rcu_dereference_protected()
- * is a lighter-weight primitive for this use case.
+ * This is a simple wrapper around rcu_dereference_check().
+ */
+#define rcu_dereference(p) rcu_dereference_check(p, 0)
+
+/**
+ * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * Makes rcu_dereference_check() do the dirty work.
+ */
+#define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0)
+
+/**
+ * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * Makes rcu_dereference_check() do the dirty work.
  */
-#define rcu_access_pointer(p)  ACCESS_ONCE(p)
+#define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
 
 /**
- * rcu_read_lock - mark the beginning of an RCU read-side critical section.
+ * rcu_read_lock() - mark the beginning of an RCU read-side critical section
  *
  * When synchronize_rcu() is invoked on one CPU while other CPUs
  * are within RCU read-side critical sections, then the
@@ -302,7 +526,7 @@ extern int rcu_my_thread_group_empty(void);
  * until after the all the other CPUs exit their critical sections.
  *
  * Note, however, that RCU callbacks are permitted to run concurrently
- * with RCU read-side critical sections.  One way that this can happen
+ * with new RCU read-side critical sections.  One way that this can happen
  * is via the following sequence of events: (1) CPU 0 enters an RCU
  * read-side critical section, (2) CPU 1 invokes call_rcu() to register
  * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
@@ -317,7 +541,20 @@ extern int rcu_my_thread_group_empty(void);
  * will be deferred until the outermost RCU read-side critical section
  * completes.
  *
- * It is illegal to block while in an RCU read-side critical section.
+ * You can avoid reading and understanding the next paragraph by
+ * following this rule: don't put anything in an rcu_read_lock() RCU
+ * read-side critical section that would block in a !PREEMPT kernel.
+ * But if you want the full story, read on!
+ *
+ * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), it
+ * is illegal to block while in an RCU read-side critical section.  In
+ * preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU)
+ * in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may
+ * be preempted, but explicit blocking is illegal.  Finally, in preemptible
+ * RCU implementations in real-time (CONFIG_PREEMPT_RT) kernel builds,
+ * RCU read-side critical sections may be preempted and they may also
+ * block, but only when acquiring spinlocks that are subject to priority
+ * inheritance.
  */
 static inline void rcu_read_lock(void)
 {
@@ -337,7 +574,7 @@ static inline void rcu_read_lock(void)
  */
 
 /**
- * rcu_read_unlock - marks the end of an RCU read-side critical section.
+ * rcu_read_unlock() - marks the end of an RCU read-side critical section.
  *
  * See rcu_read_lock() for more information.
  */
@@ -349,15 +586,16 @@ static inline void rcu_read_unlock(void)
 }
 
 /**
- * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section
+ * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
  *
  * This is equivalent of rcu_read_lock(), but to be used when updates
- * are being done using call_rcu_bh(). Since call_rcu_bh() callbacks
- * consider completion of a softirq handler to be a quiescent state,
- * a process in RCU read-side critical section must be protected by
- * disabling softirqs. Read-side critical sections in interrupt context
- * can use just rcu_read_lock().
- *
+ * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since
+ * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a
+ * softirq handler to be a quiescent state, a process in RCU read-side
+ * critical section must be protected by disabling softirqs. Read-side
+ * critical sections in interrupt context can use just rcu_read_lock(),
+ * though this should at least be commented to avoid confusing people
+ * reading the code.
  */
 static inline void rcu_read_lock_bh(void)
 {
@@ -379,13 +617,12 @@ static inline void rcu_read_unlock_bh(void)
 }
 
 /**
- * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section
+ * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
  *
- * Should be used with either
- * - synchronize_sched()
- * or
- * - call_rcu_sched() and rcu_barrier_sched()
- * on the write-side to insure proper synchronization.
+ * This is equivalent of rcu_read_lock(), but to be used when updates
+ * are being done using call_rcu_sched() or synchronize_rcu_sched().
+ * Read-side critical sections can also be introduced by anything that
+ * disables preemption, including local_irq_disable() and friends.
  */
 static inline void rcu_read_lock_sched(void)
 {
@@ -420,54 +657,14 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
        preempt_enable_notrace();
 }
 
-
 /**
- * rcu_dereference_raw - fetch an RCU-protected pointer
+ * rcu_assign_pointer() - assign to RCU-protected pointer
+ * @p: pointer to assign to
+ * @v: value to assign (publish)
  *
- * The caller must be within some flavor of RCU read-side critical
- * section, or must be otherwise preventing the pointer from changing,
- * for example, by holding an appropriate lock.  This pointer may later
- * be safely dereferenced.  It is the caller's responsibility to have
- * done the right thing, as this primitive does no checking of any kind.
- *
- * Inserts memory barriers on architectures that require them
- * (currently only the Alpha), and, more importantly, documents
- * exactly which pointers are protected by RCU.
- */
-#define rcu_dereference_raw(p) ({ \
-                               typeof(p) _________p1 = ACCESS_ONCE(p); \
-                               smp_read_barrier_depends(); \
-                               (_________p1); \
-                               })
-
-/**
- * rcu_dereference - fetch an RCU-protected pointer, checking for RCU
- *
- * Makes rcu_dereference_check() do the dirty work.
- */
-#define rcu_dereference(p) \
-       rcu_dereference_check(p, rcu_read_lock_held())
-
-/**
- * rcu_dereference_bh - fetch an RCU-protected pointer, checking for RCU-bh
- *
- * Makes rcu_dereference_check() do the dirty work.
- */
-#define rcu_dereference_bh(p) \
-               rcu_dereference_check(p, rcu_read_lock_bh_held())
-
-/**
- * rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched
- *
- * Makes rcu_dereference_check() do the dirty work.
- */
-#define rcu_dereference_sched(p) \
-               rcu_dereference_check(p, rcu_read_lock_sched_held())
-
-/**
- * rcu_assign_pointer - assign (publicize) a pointer to a newly
- * initialized structure that will be dereferenced by RCU read-side
- * critical sections.  Returns the value assigned.
+ * Assigns the specified value to the specified RCU-protected
+ * pointer, ensuring that any concurrent RCU readers will see
+ * any prior initialization.  Returns the value assigned.
  *
  * Inserts memory barriers on architectures that require them
  * (pretty much all of them other than x86), and also prevents
@@ -476,14 +673,17 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
  * call documents which pointers will be dereferenced by RCU read-side
  * code.
  */
-
 #define rcu_assign_pointer(p, v) \
-       ({ \
-               if (!__builtin_constant_p(v) || \
-                   ((v) != NULL)) \
-                       smp_wmb(); \
-               (p) = (v); \
-       })
+       __rcu_assign_pointer((p), (v), __rcu)
+
+/**
+ * RCU_INIT_POINTER() - initialize an RCU protected pointer
+ *
+ * Initialize an RCU-protected pointer in such a way to avoid RCU-lockdep
+ * splats.
+ */
+#define RCU_INIT_POINTER(p, v) \
+               p = (typeof(*v) __force __rcu *)(v)
 
 /* Infrastructure to implement the synchronize_() primitives. */
 
@@ -494,26 +694,37 @@ struct rcu_synchronize {
 
 extern void wakeme_after_rcu(struct rcu_head  *head);
 
+#ifdef CONFIG_PREEMPT_RCU
+
 /**
- * call_rcu - Queue an RCU callback for invocation after a grace period.
+ * call_rcu() - Queue an RCU callback for invocation after a grace period.
  * @head: structure to be used for queueing the RCU updates.
- * @func: actual update function to be invoked after the grace period
+ * @func: actual callback function to be invoked after the grace period
  *
- * The update function will be invoked some time after a full grace
- * period elapses, in other words after all currently executing RCU
- * read-side critical sections have completed.  RCU read-side critical
+ * The callback function will be invoked some time after a full grace
+ * period elapses, in other words after all pre-existing RCU read-side
+ * critical sections have completed.  However, the callback function
+ * might well execute concurrently with RCU read-side critical sections
+ * that started after call_rcu() was invoked.  RCU read-side critical
  * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
  * and may be nested.
  */
 extern void call_rcu(struct rcu_head *head,
                              void (*func)(struct rcu_head *head));
 
+#else /* #ifdef CONFIG_PREEMPT_RCU */
+
+/* In classic RCU, call_rcu() is just call_rcu_sched(). */
+#define        call_rcu        call_rcu_sched
+
+#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+
 /**
- * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
+ * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
  * @head: structure to be used for queueing the RCU updates.
- * @func: actual update function to be invoked after the grace period
+ * @func: actual callback function to be invoked after the grace period
  *
- * The update function will be invoked some time after a full grace
+ * The callback function will be invoked some time after a full grace
  * period elapses, in other words after all currently executing RCU
  * read-side critical sections have completed. call_rcu_bh() assumes
  * that the read-side critical sections end on completion of a softirq
@@ -566,37 +777,4 @@ static inline void debug_rcu_head_unqueue(struct rcu_head *head)
 }
 #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
 
-#ifndef CONFIG_PROVE_RCU
-#define __do_rcu_dereference_check(c) do { } while (0)
-#endif /* #ifdef CONFIG_PROVE_RCU */
-
-#define __rcu_dereference_index_check(p, c) \
-       ({ \
-               typeof(p) _________p1 = ACCESS_ONCE(p); \
-               __do_rcu_dereference_check(c); \
-               smp_read_barrier_depends(); \
-               (_________p1); \
-       })
-
-/**
- * rcu_dereference_index_check() - rcu_dereference for indices with debug checking
- * @p: The pointer to read, prior to dereferencing
- * @c: The conditions under which the dereference will take place
- *
- * Similar to rcu_dereference_check(), but omits the sparse checking.
- * This allows rcu_dereference_index_check() to be used on integers,
- * which can then be used as array indices.  Attempting to use
- * rcu_dereference_check() on an integer will give compiler warnings
- * because the sparse address-space mechanism relies on dereferencing
- * the RCU-protected pointer.  Dereferencing integers is not something
- * that even gcc will put up with.
- *
- * Note that this function does not implicitly check for RCU read-side
- * critical sections.  If this function gains lots of uses, it might
- * make sense to provide versions for each flavor of RCU, but it does
- * not make sense as of early 2010.
- */
-#define rcu_dereference_index_check(p, c) \
-       __rcu_dereference_index_check((p), (c))
-
 #endif /* __LINUX_RCUPDATE_H */
index e2e893144a8450848cf50f8672368aa79c0a0001..13877cb93a6000043f11a6704f2d90b0cc04552d 100644 (file)
 
 #include <linux/cache.h>
 
-void rcu_sched_qs(int cpu);
-void rcu_bh_qs(int cpu);
-static inline void rcu_note_context_switch(int cpu)
-{
-       rcu_sched_qs(cpu);
-}
+#define rcu_init_sched()       do { } while (0)
 
-#define __rcu_read_lock()      preempt_disable()
-#define __rcu_read_unlock()    preempt_enable()
-#define __rcu_read_lock_bh()   local_bh_disable()
-#define __rcu_read_unlock_bh() local_bh_enable()
-#define call_rcu_sched         call_rcu
+#ifdef CONFIG_TINY_RCU
 
-#define rcu_init_sched()       do { } while (0)
-extern void rcu_check_callbacks(int cpu, int user);
+static inline void synchronize_rcu_expedited(void)
+{
+       synchronize_sched();    /* Only one CPU, so pretty fast anyway!!! */
+}
 
-static inline int rcu_needs_cpu(int cpu)
+static inline void rcu_barrier(void)
 {
-       return 0;
+       rcu_barrier_sched();  /* Only one CPU, so only one list of callbacks! */
 }
 
-/*
- * Return the number of grace periods.
- */
-static inline long rcu_batches_completed(void)
+#else /* #ifdef CONFIG_TINY_RCU */
+
+void rcu_barrier(void);
+void synchronize_rcu_expedited(void);
+
+#endif /* #else #ifdef CONFIG_TINY_RCU */
+
+static inline void synchronize_rcu_bh(void)
 {
-       return 0;
+       synchronize_sched();
 }
 
-/*
- * Return the number of bottom-half grace periods.
- */
-static inline long rcu_batches_completed_bh(void)
+static inline void synchronize_rcu_bh_expedited(void)
 {
-       return 0;
+       synchronize_sched();
 }
 
-static inline void rcu_force_quiescent_state(void)
+#ifdef CONFIG_TINY_RCU
+
+static inline void rcu_preempt_note_context_switch(void)
 {
 }
 
-static inline void rcu_bh_force_quiescent_state(void)
+static inline void exit_rcu(void)
 {
 }
 
-static inline void rcu_sched_force_quiescent_state(void)
+static inline int rcu_needs_cpu(int cpu)
 {
+       return 0;
 }
 
-extern void synchronize_sched(void);
+#else /* #ifdef CONFIG_TINY_RCU */
+
+void rcu_preempt_note_context_switch(void);
+extern void exit_rcu(void);
+int rcu_preempt_needs_cpu(void);
 
-static inline void synchronize_rcu(void)
+static inline int rcu_needs_cpu(int cpu)
 {
-       synchronize_sched();
+       return rcu_preempt_needs_cpu();
 }
 
-static inline void synchronize_rcu_bh(void)
+#endif /* #else #ifdef CONFIG_TINY_RCU */
+
+static inline void rcu_note_context_switch(int cpu)
 {
-       synchronize_sched();
+       rcu_sched_qs(cpu);
+       rcu_preempt_note_context_switch();
 }
 
-static inline void synchronize_rcu_expedited(void)
+/*
+ * Return the number of grace periods.
+ */
+static inline long rcu_batches_completed(void)
 {
-       synchronize_sched();
+       return 0;
 }
 
-static inline void synchronize_rcu_bh_expedited(void)
+/*
+ * Return the number of bottom-half grace periods.
+ */
+static inline long rcu_batches_completed_bh(void)
 {
-       synchronize_sched();
+       return 0;
 }
 
-struct notifier_block;
-
-#ifdef CONFIG_NO_HZ
-
-extern void rcu_enter_nohz(void);
-extern void rcu_exit_nohz(void);
-
-#else /* #ifdef CONFIG_NO_HZ */
-
-static inline void rcu_enter_nohz(void)
+static inline void rcu_force_quiescent_state(void)
 {
 }
 
-static inline void rcu_exit_nohz(void)
+static inline void rcu_bh_force_quiescent_state(void)
 {
 }
 
-#endif /* #else #ifdef CONFIG_NO_HZ */
-
-static inline void exit_rcu(void)
+static inline void rcu_sched_force_quiescent_state(void)
 {
 }
 
-static inline int rcu_preempt_depth(void)
+static inline void rcu_cpu_stall_reset(void)
 {
-       return 0;
 }
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
index c0ed1c056f290701def0e0116b6aad72ada30c91..95518e6287946177e0eceb5cbf201ebfcaf0e072 100644 (file)
 #ifndef __LINUX_RCUTREE_H
 #define __LINUX_RCUTREE_H
 
-struct notifier_block;
-
-extern void rcu_sched_qs(int cpu);
-extern void rcu_bh_qs(int cpu);
 extern void rcu_note_context_switch(int cpu);
 extern int rcu_needs_cpu(int cpu);
+extern void rcu_cpu_stall_reset(void);
 
 #ifdef CONFIG_TREE_PREEMPT_RCU
 
-extern void __rcu_read_lock(void);
-extern void __rcu_read_unlock(void);
-extern void synchronize_rcu(void);
 extern void exit_rcu(void);
 
-/*
- * Defined as macro as it is a very low level header
- * included from areas that don't even know about current
- */
-#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
-
 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
 
-static inline void __rcu_read_lock(void)
-{
-       preempt_disable();
-}
-
-static inline void __rcu_read_unlock(void)
-{
-       preempt_enable();
-}
-
-#define synchronize_rcu synchronize_sched
-
 static inline void exit_rcu(void)
 {
 }
 
-static inline int rcu_preempt_depth(void)
-{
-       return 0;
-}
-
 #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
 
-static inline void __rcu_read_lock_bh(void)
-{
-       local_bh_disable();
-}
-static inline void __rcu_read_unlock_bh(void)
-{
-       local_bh_enable();
-}
-
-extern void call_rcu_sched(struct rcu_head *head,
-                          void (*func)(struct rcu_head *rcu));
 extern void synchronize_rcu_bh(void);
-extern void synchronize_sched(void);
 extern void synchronize_rcu_expedited(void);
 
 static inline void synchronize_rcu_bh_expedited(void)
@@ -95,7 +54,7 @@ static inline void synchronize_rcu_bh_expedited(void)
        synchronize_sched_expedited();
 }
 
-extern void rcu_check_callbacks(int cpu, int user);
+extern void rcu_barrier(void);
 
 extern long rcu_batches_completed(void);
 extern long rcu_batches_completed_bh(void);
@@ -104,18 +63,6 @@ extern void rcu_force_quiescent_state(void);
 extern void rcu_bh_force_quiescent_state(void);
 extern void rcu_sched_force_quiescent_state(void);
 
-#ifdef CONFIG_NO_HZ
-void rcu_enter_nohz(void);
-void rcu_exit_nohz(void);
-#else /* CONFIG_NO_HZ */
-static inline void rcu_enter_nohz(void)
-{
-}
-static inline void rcu_exit_nohz(void)
-{
-}
-#endif /* CONFIG_NO_HZ */
-
 /* A context switch is a grace period for RCU-sched and RCU-bh. */
 static inline int rcu_blocking_is_gp(void)
 {
index 1e2a6db2d7dd03466bf850dc5011860c23e8f9c9..0383601a927c48fa6ccba55de8ebf347573ef8ea 100644 (file)
@@ -875,6 +875,7 @@ enum sched_domain_level {
        SD_LV_NONE = 0,
        SD_LV_SIBLING,
        SD_LV_MC,
+       SD_LV_BOOK,
        SD_LV_CPU,
        SD_LV_NODE,
        SD_LV_ALLNODES,
@@ -1160,6 +1161,13 @@ struct sched_rt_entity {
 
 struct rcu_node;
 
+enum perf_event_task_context {
+       perf_invalid_context = -1,
+       perf_hw_context = 0,
+       perf_sw_context,
+       perf_nr_task_contexts,
+};
+
 struct task_struct {
        volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */
        void *stack;
@@ -1202,11 +1210,13 @@ struct task_struct {
        unsigned int policy;
        cpumask_t cpus_allowed;
 
-#ifdef CONFIG_TREE_PREEMPT_RCU
+#ifdef CONFIG_PREEMPT_RCU
        int rcu_read_lock_nesting;
        char rcu_read_unlock_special;
-       struct rcu_node *rcu_blocked_node;
        struct list_head rcu_node_entry;
+#endif /* #ifdef CONFIG_PREEMPT_RCU */
+#ifdef CONFIG_TREE_PREEMPT_RCU
+       struct rcu_node *rcu_blocked_node;
 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
 
 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
@@ -1288,9 +1298,9 @@ struct task_struct {
        struct list_head cpu_timers[3];
 
 /* process credentials */
-       const struct cred *real_cred;   /* objective and real subjective task
+       const struct cred __rcu *real_cred; /* objective and real subjective task
                                         * credentials (COW) */
-       const struct cred *cred;        /* effective (overridable) subjective task
+       const struct cred __rcu *cred;  /* effective (overridable) subjective task
                                         * credentials (COW) */
        struct mutex cred_guard_mutex;  /* guard against foreign influences on
                                         * credential calculations
@@ -1418,7 +1428,7 @@ struct task_struct {
 #endif
 #ifdef CONFIG_CGROUPS
        /* Control Group info protected by css_set_lock */
-       struct css_set *cgroups;
+       struct css_set __rcu *cgroups;
        /* cg_list protected by css_set_lock and tsk->alloc_lock */
        struct list_head cg_list;
 #endif
@@ -1431,7 +1441,7 @@ struct task_struct {
        struct futex_pi_state *pi_state_cache;
 #endif
 #ifdef CONFIG_PERF_EVENTS
-       struct perf_event_context *perf_event_ctxp;
+       struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
        struct mutex perf_event_mutex;
        struct list_head perf_event_list;
 #endif
@@ -1681,8 +1691,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
 /*
  * Per process flags
  */
-#define PF_ALIGNWARN   0x00000001      /* Print alignment warning msgs */
-                                       /* Not implemented yet, only for 486*/
+#define PF_KSOFTIRQD   0x00000001      /* I am ksoftirqd */
 #define PF_STARTING    0x00000002      /* being created */
 #define PF_EXITING     0x00000004      /* getting shut down */
 #define PF_EXITPIDONE  0x00000008      /* pi exit done on shut down */
@@ -1740,7 +1749,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
 #define used_math() tsk_used_math(current)
 
-#ifdef CONFIG_TREE_PREEMPT_RCU
+#ifdef CONFIG_PREEMPT_RCU
 
 #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
 #define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
@@ -1749,7 +1758,9 @@ static inline void rcu_copy_process(struct task_struct *p)
 {
        p->rcu_read_lock_nesting = 0;
        p->rcu_read_unlock_special = 0;
+#ifdef CONFIG_TREE_PREEMPT_RCU
        p->rcu_blocked_node = NULL;
+#endif
        INIT_LIST_HEAD(&p->rcu_node_entry);
 }
 
@@ -1826,6 +1837,19 @@ extern void sched_clock_idle_sleep_event(void);
 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
 #endif
 
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+/*
+ * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
+ * The reason for this explicit opt-in is not to have perf penalty with
+ * slow sched_clocks.
+ */
+extern void enable_sched_clock_irqtime(void);
+extern void disable_sched_clock_irqtime(void);
+#else
+static inline void enable_sched_clock_irqtime(void) {}
+static inline void disable_sched_clock_irqtime(void) {}
+#endif
+
 extern unsigned long long
 task_sched_runtime(struct task_struct *task);
 extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
@@ -2367,9 +2391,9 @@ extern int __cond_resched_lock(spinlock_t *lock);
 
 extern int __cond_resched_softirq(void);
 
-#define cond_resched_softirq() ({                              \
-       __might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET);      \
-       __cond_resched_softirq();                               \
+#define cond_resched_softirq() ({                                      \
+       __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);      \
+       __cond_resched_softirq();                                       \
 })
 
 /*
index a22219afff092952bbe276cb9da1d0509ddf196c..b8246a8df7d2dc2ecc864ac192d694b369dd12d6 100644 (file)
@@ -74,7 +74,7 @@ extern int cap_file_mmap(struct file *file, unsigned long reqprot,
 extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags);
 extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
                          unsigned long arg4, unsigned long arg5);
-extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp);
+extern int cap_task_setscheduler(struct task_struct *p);
 extern int cap_task_setioprio(struct task_struct *p, int ioprio);
 extern int cap_task_setnice(struct task_struct *p, int nice);
 extern int cap_syslog(int type, bool from_file);
@@ -959,6 +959,12 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
  *     Sets the new child socket's sid to the openreq sid.
  * @inet_conn_established:
  *     Sets the connection's peersid to the secmark on skb.
+ * @secmark_relabel_packet:
+ *     check if the process should be allowed to relabel packets to the given secid
+ * @security_secmark_refcount_inc
+ *     tells the LSM to increment the number of secmark labeling rules loaded
+ * @security_secmark_refcount_dec
+ *     tells the LSM to decrement the number of secmark labeling rules loaded
  * @req_classify_flow:
  *     Sets the flow's sid to the openreq sid.
  * @tun_dev_create:
@@ -1279,9 +1285,13 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
  *     Return 0 if permission is granted.
  *
  * @secid_to_secctx:
- *     Convert secid to security context.
+ *     Convert secid to security context.  If secdata is NULL the length of
+ *     the result will be returned in seclen, but no secdata will be returned.
+ *     This does mean that the length could change between calls to check the
+ *     length and the next call which actually allocates and returns the secdata.
  *     @secid contains the security ID.
  *     @secdata contains the pointer that stores the converted security context.
+ *     @seclen pointer which contains the length of the data
  * @secctx_to_secid:
  *     Convert security context to secid.
  *     @secid contains the pointer to the generated security ID.
@@ -1501,8 +1511,7 @@ struct security_operations {
        int (*task_getioprio) (struct task_struct *p);
        int (*task_setrlimit) (struct task_struct *p, unsigned int resource,
                        struct rlimit *new_rlim);
-       int (*task_setscheduler) (struct task_struct *p, int policy,
-                                 struct sched_param *lp);
+       int (*task_setscheduler) (struct task_struct *p);
        int (*task_getscheduler) (struct task_struct *p);
        int (*task_movememory) (struct task_struct *p);
        int (*task_kill) (struct task_struct *p,
@@ -1594,6 +1603,9 @@ struct security_operations {
                                  struct request_sock *req);
        void (*inet_csk_clone) (struct sock *newsk, const struct request_sock *req);
        void (*inet_conn_established) (struct sock *sk, struct sk_buff *skb);
+       int (*secmark_relabel_packet) (u32 secid);
+       void (*secmark_refcount_inc) (void);
+       void (*secmark_refcount_dec) (void);
        void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl);
        int (*tun_dev_create)(void);
        void (*tun_dev_post_create)(struct sock *sk);
@@ -1752,8 +1764,7 @@ int security_task_setioprio(struct task_struct *p, int ioprio);
 int security_task_getioprio(struct task_struct *p);
 int security_task_setrlimit(struct task_struct *p, unsigned int resource,
                struct rlimit *new_rlim);
-int security_task_setscheduler(struct task_struct *p,
-                               int policy, struct sched_param *lp);
+int security_task_setscheduler(struct task_struct *p);
 int security_task_getscheduler(struct task_struct *p);
 int security_task_movememory(struct task_struct *p);
 int security_task_kill(struct task_struct *p, struct siginfo *info,
@@ -2320,11 +2331,9 @@ static inline int security_task_setrlimit(struct task_struct *p,
        return 0;
 }
 
-static inline int security_task_setscheduler(struct task_struct *p,
-                                            int policy,
-                                            struct sched_param *lp)
+static inline int security_task_setscheduler(struct task_struct *p)
 {
-       return cap_task_setscheduler(p, policy, lp);
+       return cap_task_setscheduler(p);
 }
 
 static inline int security_task_getscheduler(struct task_struct *p)
@@ -2551,6 +2560,9 @@ void security_inet_csk_clone(struct sock *newsk,
                        const struct request_sock *req);
 void security_inet_conn_established(struct sock *sk,
                        struct sk_buff *skb);
+int security_secmark_relabel_packet(u32 secid);
+void security_secmark_refcount_inc(void);
+void security_secmark_refcount_dec(void);
 int security_tun_dev_create(void);
 void security_tun_dev_post_create(struct sock *sk);
 int security_tun_dev_attach(struct sock *sk);
@@ -2705,6 +2717,19 @@ static inline void security_inet_conn_established(struct sock *sk,
 {
 }
 
+static inline int security_secmark_relabel_packet(u32 secid)
+{
+       return 0;
+}
+
+static inline void security_secmark_refcount_inc(void)
+{
+}
+
+static inline void security_secmark_refcount_dec(void)
+{
+}
+
 static inline int security_tun_dev_create(void)
 {
        return 0;
index 82e0f26a12996a1bcce6efddd07e734313162c19..44f4596126904f7d0357654c999ed2ae13258c82 100644 (file)
@@ -20,75 +20,12 @@ struct kern_ipc_perm;
 
 #ifdef CONFIG_SECURITY_SELINUX
 
-/**
- *     selinux_string_to_sid - map a security context string to a security ID
- *     @str: the security context string to be mapped
- *     @sid: ID value returned via this.
- *
- *     Returns 0 if successful, with the SID stored in sid.  A value
- *     of zero for sid indicates no SID could be determined (but no error
- *     occurred).
- */
-int selinux_string_to_sid(char *str, u32 *sid);
-
-/**
- *     selinux_secmark_relabel_packet_permission - secmark permission check
- *     @sid: SECMARK ID value to be applied to network packet
- *
- *     Returns 0 if the current task is allowed to set the SECMARK label of
- *     packets with the supplied security ID.  Note that it is implicit that
- *     the packet is always being relabeled from the default unlabeled value,
- *     and that the access control decision is made in the AVC.
- */
-int selinux_secmark_relabel_packet_permission(u32 sid);
-
-/**
- *     selinux_secmark_refcount_inc - increments the secmark use counter
- *
- *     SELinux keeps track of the current SECMARK targets in use so it knows
- *     when to apply SECMARK label access checks to network packets.  This
- *     function incements this reference count to indicate that a new SECMARK
- *     target has been configured.
- */
-void selinux_secmark_refcount_inc(void);
-
-/**
- *     selinux_secmark_refcount_dec - decrements the secmark use counter
- *
- *     SELinux keeps track of the current SECMARK targets in use so it knows
- *     when to apply SECMARK label access checks to network packets.  This
- *     function decements this reference count to indicate that one of the
- *     existing SECMARK targets has been removed/flushed.
- */
-void selinux_secmark_refcount_dec(void);
-
 /**
  * selinux_is_enabled - is SELinux enabled?
  */
 bool selinux_is_enabled(void);
 #else
 
-static inline int selinux_string_to_sid(const char *str, u32 *sid)
-{
-       *sid = 0;
-       return 0;
-}
-
-static inline int selinux_secmark_relabel_packet_permission(u32 sid)
-{
-       return 0;
-}
-
-static inline void selinux_secmark_refcount_inc(void)
-{
-       return;
-}
-
-static inline void selinux_secmark_refcount_dec(void)
-{
-       return;
-}
-
 static inline bool selinux_is_enabled(void)
 {
        return false;
index 7415839ac890f538b88611f7843b5b770e73292f..5310d27abd2a503ad523059ea4832f34ecfbb194 100644 (file)
@@ -26,6 +26,9 @@ struct semaphore {
        .wait_list      = LIST_HEAD_INIT((name).wait_list),             \
 }
 
+#define DEFINE_SEMAPHORE(name) \
+       struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
+
 #define DECLARE_MUTEX(name)    \
        struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
 
index a2fada9becb60c47fcbf8143b3b3c67d291731b8..a8f56e1ec7602705d70ca326e620f1a8927e3925 100644 (file)
@@ -322,7 +322,7 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
                                          int offset, 
                                          unsigned int len, __wsum *csump);
 
-extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
+extern long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
 extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
 extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
                             int offset, int len);
index cc813f95a2f2b6a4ddf19d04ed1b60fc321c9712..c91302f3a25789b8045b34d9c0a9fa814c29b0ae 100644 (file)
@@ -14,7 +14,9 @@
 #define SPI_MODE_OFFSET                        6
 #define SPI_SCPH_OFFSET                        6
 #define SPI_SCOL_OFFSET                        7
+
 #define SPI_TMOD_OFFSET                        8
+#define SPI_TMOD_MASK                  (0x3 << SPI_TMOD_OFFSET)
 #define        SPI_TMOD_TR                     0x0             /* xmit & recv */
 #define SPI_TMOD_TO                    0x1             /* xmit only */
 #define SPI_TMOD_RO                    0x2             /* recv only */
index 4d5d2f546dbff11ee6a4abb6ad2cc53770d45aeb..58971e891f489950102d41f75bea118a22604f23 100644 (file)
@@ -108,19 +108,43 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp)
 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 
 /**
- * srcu_dereference - fetch SRCU-protected pointer with checking
+ * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing
+ * @p: the pointer to fetch and protect for later dereferencing
+ * @sp: pointer to the srcu_struct, which is used to check that we
+ *     really are in an SRCU read-side critical section.
+ * @c: condition to check for update-side use
  *
- * Makes rcu_dereference_check() do the dirty work.
+ * If PROVE_RCU is enabled, invoking this outside of an RCU read-side
+ * critical section will result in an RCU-lockdep splat, unless @c evaluates
+ * to 1.  The @c argument will normally be a logical expression containing
+ * lockdep_is_held() calls.
  */
-#define srcu_dereference(p, sp) \
-               rcu_dereference_check(p, srcu_read_lock_held(sp))
+#define srcu_dereference_check(p, sp, c) \
+       __rcu_dereference_check((p), srcu_read_lock_held(sp) || (c), __rcu)
+
+/**
+ * srcu_dereference - fetch SRCU-protected pointer for later dereferencing
+ * @p: the pointer to fetch and protect for later dereferencing
+ * @sp: pointer to the srcu_struct, which is used to check that we
+ *     really are in an SRCU read-side critical section.
+ *
+ * Makes rcu_dereference_check() do the dirty work.  If PROVE_RCU
+ * is enabled, invoking this outside of an RCU read-side critical
+ * section will result in an RCU-lockdep splat.
+ */
+#define srcu_dereference(p, sp) srcu_dereference_check((p), (sp), 0)
 
 /**
  * srcu_read_lock - register a new reader for an SRCU-protected structure.
  * @sp: srcu_struct in which to register the new reader.
  *
  * Enter an SRCU read-side critical section.  Note that SRCU read-side
- * critical sections may be nested.
+ * critical sections may be nested.  However, it is illegal to
+ * call anything that waits on an SRCU grace period for the same
+ * srcu_struct, whether directly or indirectly.  Please note that
+ * one way to indirectly wait on an SRCU grace period is to acquire
+ * a mutex that is held elsewhere while calling synchronize_srcu() or
+ * synchronize_srcu_expedited().
  */
 static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
 {
index 6b524a0d02e42b14419c5ae75133360a1f4874a5..1808960c50595908935455ca90a80fc02e421ef0 100644 (file)
@@ -126,8 +126,8 @@ int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
 
 #else   /* CONFIG_STOP_MACHINE && CONFIG_SMP */
 
-static inline int stop_machine(int (*fn)(void *), void *data,
-                              const struct cpumask *cpus)
+static inline int __stop_machine(int (*fn)(void *), void *data,
+                                const struct cpumask *cpus)
 {
        int ret;
        local_irq_disable();
@@ -136,5 +136,11 @@ static inline int stop_machine(int (*fn)(void *), void *data,
        return ret;
 }
 
+static inline int stop_machine(int (*fn)(void *), void *data,
+                              const struct cpumask *cpus)
+{
+       return __stop_machine(fn, data, cpus);
+}
+
 #endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */
 #endif /* _LINUX_STOP_MACHINE */
index 671538d25bc15b623155f2b7b7fd269ce394b5d6..8eee9dbbfe7aaddbdb5aaebfbaf5ad82669f846d 100644 (file)
@@ -69,7 +69,7 @@ struct gss_cl_ctx {
        enum rpc_gss_proc       gc_proc;
        u32                     gc_seq;
        spinlock_t              gc_seq_lock;
-       struct gss_ctx          *gc_gss_ctx;
+       struct gss_ctx __rcu    *gc_gss_ctx;
        struct xdr_netobj       gc_wire_ctx;
        u32                     gc_win;
        unsigned long           gc_expiry;
@@ -80,7 +80,7 @@ struct gss_upcall_msg;
 struct gss_cred {
        struct rpc_cred         gc_base;
        enum rpc_gss_svc        gc_service;
-       struct gss_cl_ctx       *gc_ctx;
+       struct gss_cl_ctx __rcu *gc_ctx;
        struct gss_upcall_msg   *gc_upcall;
        unsigned long           gc_upcall_timestamp;
        unsigned char           gc_machine_cred : 1;
index 569dc722a600d55834055cf22474858ef69f5bae..85f38a63f098a2c55989f1e5d4e070595e525a99 100644 (file)
@@ -30,7 +30,7 @@ struct rpc_inode;
  * The high-level client handle
  */
 struct rpc_clnt {
-       struct kref             cl_kref;        /* Number of references */
+       atomic_t                cl_count;       /* Number of references */
        struct list_head        cl_clients;     /* Global list of clients */
        struct list_head        cl_tasks;       /* List of tasks */
        spinlock_t              cl_lock;        /* spinlock */
index 2fee51a11b7399aea7ea7427ae32777b45cf8903..7cdd63366f883a164a7f5d5b74ff8882c62b4f8c 100644 (file)
@@ -19,6 +19,7 @@ struct bio;
 #define SWAP_FLAG_PREFER       0x8000  /* set if swap priority specified */
 #define SWAP_FLAG_PRIO_MASK    0x7fff
 #define SWAP_FLAG_PRIO_SHIFT   0
+#define SWAP_FLAG_DISCARD      0x10000 /* discard swap cluster after use */
 
 static inline int current_is_kswapd(void)
 {
@@ -142,7 +143,7 @@ struct swap_extent {
 enum {
        SWP_USED        = (1 << 0),     /* is slot in swap_info[] used? */
        SWP_WRITEOK     = (1 << 1),     /* ok to write to this swap?    */
-       SWP_DISCARDABLE = (1 << 2),     /* blkdev supports discard */
+       SWP_DISCARDABLE = (1 << 2),     /* swapon+blkdev support discard */
        SWP_DISCARDING  = (1 << 3),     /* now discarding a free cluster */
        SWP_SOLIDSTATE  = (1 << 4),     /* blkdev seeks are cheap */
        SWP_CONTINUED   = (1 << 5),     /* swap_map has count continuation */
@@ -315,6 +316,7 @@ extern long nr_swap_pages;
 extern long total_swap_pages;
 extern void si_swapinfo(struct sysinfo *);
 extern swp_entry_t get_swap_page(void);
+extern swp_entry_t get_swap_page_of_type(int);
 extern int valid_swaphandles(swp_entry_t, unsigned long *);
 extern int add_swap_count_continuation(swp_entry_t, gfp_t);
 extern void swap_shmem_alloc(swp_entry_t);
@@ -331,13 +333,6 @@ extern int reuse_swap_page(struct page *);
 extern int try_to_free_swap(struct page *);
 struct backing_dev_info;
 
-#ifdef CONFIG_HIBERNATION
-void hibernation_freeze_swap(void);
-void hibernation_thaw_swap(void);
-swp_entry_t get_swap_for_hibernation(int type);
-void swap_free_for_hibernation(swp_entry_t val);
-#endif
-
 /* linux/mm/thrash.c */
 extern struct mm_struct *swap_token_mm;
 extern void grab_swap_token(struct mm_struct *);
index a8cc4e13434c4261ae8ab536566b23fcd777286a..c90696544176902262699186b32dc1c515311a15 100644 (file)
@@ -23,12 +23,12 @@ struct restart_block {
                };
                /* For futex_wait and futex_wait_requeue_pi */
                struct {
-                       u32 *uaddr;
+                       u32 __user *uaddr;
                        u32 val;
                        u32 flags;
                        u32 bitset;
                        u64 time;
-                       u32 *uaddr2;
+                       u32 __user *uaddr2;
                } futex;
                /* For nanosleep */
                struct {
index 64e084ff5e5c9a6e68257068821b79110a760f02..b91a40e847d236d9046dc3154a7e7a58ea594776 100644 (file)
@@ -201,6 +201,12 @@ int arch_update_cpu_topology(void);
        .balance_interval       = 64,                                   \
 }
 
+#ifdef CONFIG_SCHED_BOOK
+#ifndef SD_BOOK_INIT
+#error Please define an appropriate SD_BOOK_INIT in include/asm/topology.h!!!
+#endif
+#endif /* CONFIG_SCHED_BOOK */
+
 #ifdef CONFIG_NUMA
 #ifndef SD_NODE_INIT
 #error Please define an appropriate SD_NODE_INIT in include/asm/topology.h!!!
index 103d1b61aacba635bb7c81cef9e32b70fe220bb7..a4a90b6726ce6129b43174609fb3e35a2bd088ae 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/errno.h>
 #include <linux/types.h>
 #include <linux/rcupdate.h>
+#include <linux/jump_label.h>
 
 struct module;
 struct tracepoint;
@@ -145,7 +146,9 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin,
        extern struct tracepoint __tracepoint_##name;                   \
        static inline void trace_##name(proto)                          \
        {                                                               \
-               if (unlikely(__tracepoint_##name.state))                \
+               JUMP_LABEL(&__tracepoint_##name.state, do_trace);       \
+               return;                                                 \
+do_trace:                                                              \
                        __DO_TRACE(&__tracepoint_##name,                \
                                TP_PROTO(data_proto),                   \
                                TP_ARGS(data_args));                    \
index 01a082f56ef423065adb66f621b11db2bd1d88f1..357dbc19606f8920f0b060ad04fb1d4fb2dfca00 100644 (file)
@@ -121,7 +121,15 @@ typedef            __u64           u_int64_t;
 typedef                __s64           int64_t;
 #endif
 
-/* this is a special 64bit data type that is 8-byte aligned */
+/*
+ * aligned_u64 should be used in defining kernel<->userspace ABIs to avoid
+ * common 32/64-bit compat problems.
+ * 64-bit values align to 4-byte boundaries on x86_32 (and possibly other
+ * architectures) and to 8-byte boundaries on 64-bit architetures.  The new
+ * aligned_64 type enforces 8-byte alignment so that structs containing
+ * aligned_64 values have the same alignment on 32-bit and 64-bit architectures.
+ * No conversions are necessary between 32-bit user-space and a 64-bit kernel.
+ */
 #define aligned_u64 __u64 __attribute__((aligned(8)))
 #define aligned_be64 __be64 __attribute__((aligned(8)))
 #define aligned_le64 __le64 __attribute__((aligned(8)))
@@ -178,6 +186,11 @@ typedef __u64 __bitwise __be64;
 typedef __u16 __bitwise __sum16;
 typedef __u32 __bitwise __wsum;
 
+/* this is a special 64bit data type that is 8-byte aligned */
+#define __aligned_u64 __u64 __attribute__((aligned(8)))
+#define __aligned_be64 __be64 __attribute__((aligned(8)))
+#define __aligned_le64 __le64 __attribute__((aligned(8)))
+
 #ifdef __KERNEL__
 typedef unsigned __bitwise__ gfp_t;
 typedef unsigned __bitwise__ fmode_t;
index 7f43ccdc1d38c0eb919efe4891e1b91ec19c3705..eaaea37b3b75dd64b73a34a0e3beb31417bdd0d6 100644 (file)
@@ -170,6 +170,28 @@ static inline unsigned long zone_page_state(struct zone *zone,
        return x;
 }
 
+/*
+ * More accurate version that also considers the currently pending
+ * deltas. For that we need to loop over all cpus to find the current
+ * deltas. There is no synchronization so the result cannot be
+ * exactly accurate either.
+ */
+static inline unsigned long zone_page_state_snapshot(struct zone *zone,
+                                       enum zone_stat_item item)
+{
+       long x = atomic_long_read(&zone->vm_stat[item]);
+
+#ifdef CONFIG_SMP
+       int cpu;
+       for_each_online_cpu(cpu)
+               x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
+
+       if (x < 0)
+               x = 0;
+#endif
+       return x;
+}
+
 extern unsigned long global_reclaimable_pages(void);
 extern unsigned long zone_reclaimable_pages(struct zone *zone);
 
index 0836ccc5712146f87d13e9e35a9480e980708392..3efc9f3f43a0862cad51aa325e08c9f24d129f8e 100644 (file)
@@ -614,6 +614,7 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
                (wait)->private = current;                              \
                (wait)->func = autoremove_wake_function;                \
                INIT_LIST_HEAD(&(wait)->task_list);                     \
+               (wait)->flags = 0;                                      \
        } while (0)
 
 /**
index f11100f964824c250b4eacb21e96e36640527ac6..25e02c941bac34a5322510f0461998b1054259fc 100644 (file)
@@ -235,6 +235,10 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
 #define work_clear_pending(work) \
        clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
 
+/*
+ * Workqueue flags and constants.  For details, please refer to
+ * Documentation/workqueue.txt.
+ */
 enum {
        WQ_NON_REENTRANT        = 1 << 0, /* guarantee non-reentrance */
        WQ_UNBOUND              = 1 << 1, /* not bound to any cpu */
index 97e07f46a0fae22f1cba1eb3073f5661cc97b95a..aa4ebb42a5652b2261b1a2dbbbe133cbd5fbfe51 100644 (file)
@@ -48,6 +48,7 @@ struct videobuf_dmabuf {
 
        /* for userland buffer */
        int                 offset;
+       size_t              size;
        struct page         **pages;
 
        /* for kernel buffers */
index 45375b41a2a0d44e62a370813ab4fdcbe046e3b3..4d40c4d0230baeffbafcb57cb6fac99a1f0174b3 100644 (file)
@@ -121,6 +121,7 @@ static inline int addrconf_finite_timeout(unsigned long timeout)
  *     IPv6 Address Label subsystem (addrlabel.c)
  */
 extern int                     ipv6_addr_label_init(void);
+extern void                    ipv6_addr_label_cleanup(void);
 extern void                    ipv6_addr_label_rtnl_register(void);
 extern u32                     ipv6_addr_label(struct net *net,
                                                const struct in6_addr *addr,
index 27a902d9b3a9a431c6b3162a4c6fe479aa99504c..30fce0128dd72fa0281795bc0ae03809b85bc9d0 100644 (file)
@@ -161,12 +161,30 @@ static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk, unsigned long l
 {
        struct sk_buff *skb;
 
+       release_sock(sk);
        if ((skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err))) {
                skb_reserve(skb, BT_SKB_RESERVE);
                bt_cb(skb)->incoming  = 0;
        }
+       lock_sock(sk);
+
+       if (!skb && *err)
+               return NULL;
+
+       *err = sock_error(sk);
+       if (*err)
+               goto out;
+
+       if (sk->sk_shutdown) {
+               *err = -ECONNRESET;
+               goto out;
+       }
 
        return skb;
+
+out:
+       kfree_skb(skb);
+       return NULL;
 }
 
 int bt_err(__u16 code);
index 726cc353640988bd6fed9361f74cb8ef6dfbb9ce..a4dc5b027bd9cc7731b7fab1146508896c0b215c 100644 (file)
@@ -27,11 +27,17 @@ struct cgroup_cls_state
 #ifdef CONFIG_NET_CLS_CGROUP
 static inline u32 task_cls_classid(struct task_struct *p)
 {
+       int classid;
+
        if (in_interrupt())
                return 0;
 
-       return container_of(task_subsys_state(p, net_cls_subsys_id),
-                           struct cgroup_cls_state, css)->classid;
+       rcu_read_lock();
+       classid = container_of(task_subsys_state(p, net_cls_subsys_id),
+                              struct cgroup_cls_state, css)->classid;
+       rcu_read_unlock();
+
+       return classid;
 }
 #else
 extern int net_cls_subsys_id;
@@ -45,7 +51,8 @@ static inline u32 task_cls_classid(struct task_struct *p)
                return 0;
 
        rcu_read_lock();
-       id = rcu_dereference(net_cls_subsys_id);
+       id = rcu_dereference_index_check(net_cls_subsys_id,
+                                        rcu_read_lock_held());
        if (id >= 0)
                classid = container_of(task_subsys_state(p, id),
                                       struct cgroup_cls_state, css)->classid;
index 81d1413a87010967684febe7344169baa7d828ef..02386505033d975e666cd924a3bafe3ca02bb100 100644 (file)
@@ -242,6 +242,7 @@ static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
        dev->stats.rx_packets++;
        dev->stats.rx_bytes += skb->len;
        skb->rxhash = 0;
+       skb_set_queue_mapping(skb, 0);
        skb_dst_drop(skb);
        nf_reset(skb);
 }
index a4747a0f7303ab73b1dffa7cd9a66740c0ef0ffe..f976885f686f67f593d2928175e9f06de1c94f56 100644 (file)
@@ -955,6 +955,9 @@ static inline __wsum ip_vs_check_diff2(__be16 old, __be16 new, __wsum oldsum)
        return csum_partial(diff, sizeof(diff), oldsum);
 }
 
+extern void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp,
+                                  int outin);
+
 #endif /* __KERNEL__ */
 
 #endif /* _NET_IP_VS_H */
index e624dae54fa49b7d713b78641c7f18c2e3cfbae2..caf17db87dbc8983f4aeef52a54c2424a7d21054 100644 (file)
@@ -75,7 +75,7 @@ struct nf_conntrack_helper;
 /* nf_conn feature for connections that have a helper */
 struct nf_conn_help {
        /* Helper. if any */
-       struct nf_conntrack_helper *helper;
+       struct nf_conntrack_helper __rcu *helper;
 
        union nf_conntrack_help help;
 
index bd732d62e1c3a3181c7c324d695b02e1548970e0..7e5e73bfa4dec8e2d45c834507f74d86484b8715 100644 (file)
@@ -199,6 +199,8 @@ static inline int ip_route_newports(struct rtable **rp, u8 protocol,
                fl.fl_ip_sport = sport;
                fl.fl_ip_dport = dport;
                fl.proto = protocol;
+               if (inet_sk(sk)->transparent)
+                       fl.flags |= FLOWI_FLAG_ANYSRC;
                ip_rt_put(*rp);
                *rp = NULL;
                security_sk_classify_flow(sk, &fl);
index ac53bfbdfe16b57038cf6c0b7f88cc88f5221594..adab9dc5818355c603a699101c4787c99434eb12 100644 (file)
@@ -752,6 +752,7 @@ struct proto {
        /* Keeping track of sk's, looking them up, and port selection methods. */
        void                    (*hash)(struct sock *sk);
        void                    (*unhash)(struct sock *sk);
+       void                    (*rehash)(struct sock *sk);
        int                     (*get_port)(struct sock *sk, unsigned short snum);
 
        /* Keeping track of sockets in use */
index eaa9582779d029a9362c32b3816fed8c157e4d3e..3e4b33e36602caade361654d0561a65b3fba2224 100644 (file)
@@ -475,8 +475,22 @@ extern unsigned int tcp_current_mss(struct sock *sk);
 /* Bound MSS / TSO packet size with the half of the window */
 static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
 {
-       if (tp->max_window && pktsize > (tp->max_window >> 1))
-               return max(tp->max_window >> 1, 68U - tp->tcp_header_len);
+       int cutoff;
+
+       /* When peer uses tiny windows, there is no use in packetizing
+        * to sub-MSS pieces for the sake of SWS or making sure there
+        * are enough packets in the pipe for fast recovery.
+        *
+        * On the other hand, for extremely large MSS devices, handling
+        * smaller than MSS windows in this way does make sense.
+        */
+       if (tp->max_window >= 512)
+               cutoff = (tp->max_window >> 1);
+       else
+               cutoff = tp->max_window;
+
+       if (cutoff && pktsize > cutoff)
+               return max_t(int, cutoff, 68U - tp->tcp_header_len);
        else
                return pktsize;
 }
index 7abdf305da50fad63880b12c2c76a69b6b74a5fd..a184d3496b1369deefd62aba376f04320f76a773 100644 (file)
@@ -151,6 +151,7 @@ static inline void udp_lib_hash(struct sock *sk)
 }
 
 extern void udp_lib_unhash(struct sock *sk);
+extern void udp_lib_rehash(struct sock *sk, u16 new_hash);
 
 static inline void udp_lib_close(struct sock *sk, long timeout)
 {
index fc8f36dd0f5c5145932c45aa8e9f441697dcb0ec..4f53532d4c2f0f6e20d200fabafe1ac51dd1632c 100644 (file)
@@ -298,8 +298,8 @@ struct xfrm_state_afinfo {
        const struct xfrm_type  *type_map[IPPROTO_MAX];
        struct xfrm_mode        *mode_map[XFRM_MODE_MAX];
        int                     (*init_flags)(struct xfrm_state *x);
-       void                    (*init_tempsel)(struct xfrm_state *x, struct flowi *fl,
-                                               struct xfrm_tmpl *tmpl,
+       void                    (*init_tempsel)(struct xfrm_selector *sel, struct flowi *fl);
+       void                    (*init_temprop)(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
                                                xfrm_address_t *daddr, xfrm_address_t *saddr);
        int                     (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
        int                     (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
index 0e4cfb694fe70630457af67e1b1bc568f56c9b09..6fa7cbab7d932c6649e9fbd4221615b6b4d0fd8d 100644 (file)
@@ -5,7 +5,9 @@
 #define _TRACE_IRQ_H
 
 #include <linux/tracepoint.h>
-#include <linux/interrupt.h>
+
+struct irqaction;
+struct softirq_action;
 
 #define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq }
 #define show_softirq_name(val)                         \
@@ -93,7 +95,10 @@ DECLARE_EVENT_CLASS(softirq,
        ),
 
        TP_fast_assign(
-               __entry->vec = (int)(h - vec);
+               if (vec)
+                       __entry->vec = (int)(h - vec);
+               else
+                       __entry->vec = (int)(long)h;
        ),
 
        TP_printk("vec=%d [action=%s]", __entry->vec,
@@ -136,6 +141,23 @@ DEFINE_EVENT(softirq, softirq_exit,
        TP_ARGS(h, vec)
 );
 
+/**
+ * softirq_raise - called immediately when a softirq is raised
+ * @h: pointer to struct softirq_action
+ * @vec: pointer to first struct softirq_action in softirq_vec array
+ *
+ * The @h parameter contains a pointer to the softirq vector number which is
+ * raised. @vec is NULL and it means @h includes vector number not
+ * softirq_action. When used in combination with the softirq_entry tracepoint
+ * we can determine the softirq raise latency.
+ */
+DEFINE_EVENT(softirq, softirq_raise,
+
+       TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
+
+       TP_ARGS(h, vec)
+);
+
 #endif /*  _TRACE_IRQ_H */
 
 /* This part must be outside protection */
index 188deca2f3c7721a1baac60cc07e8d7006442c71..8fe1e93f531dd81a8e549689a1b8e9b551231e9e 100644 (file)
@@ -6,10 +6,31 @@
 
 #include <linux/netdevice.h>
 #include <linux/tracepoint.h>
+#include <linux/ftrace.h>
+
+#define NO_DEV "(no_device)"
+
+TRACE_EVENT(napi_poll,
 
-DECLARE_TRACE(napi_poll,
        TP_PROTO(struct napi_struct *napi),
-       TP_ARGS(napi));
+
+       TP_ARGS(napi),
+
+       TP_STRUCT__entry(
+               __field(        struct napi_struct *,   napi)
+               __string(       dev_name, napi->dev ? napi->dev->name : NO_DEV)
+       ),
+
+       TP_fast_assign(
+               __entry->napi = napi;
+               __assign_str(dev_name, napi->dev ? napi->dev->name : NO_DEV);
+       ),
+
+       TP_printk("napi poll on napi struct %p for device %s",
+               __entry->napi, __get_str(dev_name))
+);
+
+#undef NO_DEV
 
 #endif /* _TRACE_NAPI_H_ */
 
diff --git a/include/trace/events/net.h b/include/trace/events/net.h
new file mode 100644 (file)
index 0000000..5f247f5
--- /dev/null
@@ -0,0 +1,82 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM net
+
+#if !defined(_TRACE_NET_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NET_H
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ip.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(net_dev_xmit,
+
+       TP_PROTO(struct sk_buff *skb,
+                int rc),
+
+       TP_ARGS(skb, rc),
+
+       TP_STRUCT__entry(
+               __field(        void *,         skbaddr         )
+               __field(        unsigned int,   len             )
+               __field(        int,            rc              )
+               __string(       name,           skb->dev->name  )
+       ),
+
+       TP_fast_assign(
+               __entry->skbaddr = skb;
+               __entry->len = skb->len;
+               __entry->rc = rc;
+               __assign_str(name, skb->dev->name);
+       ),
+
+       TP_printk("dev=%s skbaddr=%p len=%u rc=%d",
+               __get_str(name), __entry->skbaddr, __entry->len, __entry->rc)
+);
+
+DECLARE_EVENT_CLASS(net_dev_template,
+
+       TP_PROTO(struct sk_buff *skb),
+
+       TP_ARGS(skb),
+
+       TP_STRUCT__entry(
+               __field(        void *,         skbaddr         )
+               __field(        unsigned int,   len             )
+               __string(       name,           skb->dev->name  )
+       ),
+
+       TP_fast_assign(
+               __entry->skbaddr = skb;
+               __entry->len = skb->len;
+               __assign_str(name, skb->dev->name);
+       ),
+
+       TP_printk("dev=%s skbaddr=%p len=%u",
+               __get_str(name), __entry->skbaddr, __entry->len)
+)
+
+DEFINE_EVENT(net_dev_template, net_dev_queue,
+
+       TP_PROTO(struct sk_buff *skb),
+
+       TP_ARGS(skb)
+);
+
+DEFINE_EVENT(net_dev_template, netif_receive_skb,
+
+       TP_PROTO(struct sk_buff *skb),
+
+       TP_ARGS(skb)
+);
+
+DEFINE_EVENT(net_dev_template, netif_rx,
+
+       TP_PROTO(struct sk_buff *skb),
+
+       TP_ARGS(skb)
+);
+#endif /* _TRACE_NET_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index 35a2a6e7bf1e74992b8b83b242c4a507fed4246f..286784d69b8f480343244d8046327a5a7d9883d9 100644 (file)
 #ifndef _TRACE_POWER_ENUM_
 #define _TRACE_POWER_ENUM_
 enum {
-       POWER_NONE = 0,
-       POWER_CSTATE = 1,
-       POWER_PSTATE = 2,
+       POWER_NONE      = 0,
+       POWER_CSTATE    = 1,    /* C-State */
+       POWER_PSTATE    = 2,    /* Fequency change or DVFS */
+       POWER_SSTATE    = 3,    /* Suspend */
 };
 #endif
 
+/*
+ * The power events are used for cpuidle & suspend (power_start, power_end)
+ *  and for cpufreq (power_frequency)
+ */
 DECLARE_EVENT_CLASS(power,
 
        TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id),
@@ -70,6 +75,85 @@ TRACE_EVENT(power_end,
 
 );
 
+/*
+ * The clock events are used for clock enable/disable and for
+ *  clock rate change
+ */
+DECLARE_EVENT_CLASS(clock,
+
+       TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
+
+       TP_ARGS(name, state, cpu_id),
+
+       TP_STRUCT__entry(
+               __string(       name,           name            )
+               __field(        u64,            state           )
+               __field(        u64,            cpu_id          )
+       ),
+
+       TP_fast_assign(
+               __assign_str(name, name);
+               __entry->state = state;
+               __entry->cpu_id = cpu_id;
+       ),
+
+       TP_printk("%s state=%lu cpu_id=%lu", __get_str(name),
+               (unsigned long)__entry->state, (unsigned long)__entry->cpu_id)
+);
+
+DEFINE_EVENT(clock, clock_enable,
+
+       TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
+
+       TP_ARGS(name, state, cpu_id)
+);
+
+DEFINE_EVENT(clock, clock_disable,
+
+       TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
+
+       TP_ARGS(name, state, cpu_id)
+);
+
+DEFINE_EVENT(clock, clock_set_rate,
+
+       TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
+
+       TP_ARGS(name, state, cpu_id)
+);
+
+/*
+ * The power domain events are used for power domains transitions
+ */
+DECLARE_EVENT_CLASS(power_domain,
+
+       TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
+
+       TP_ARGS(name, state, cpu_id),
+
+       TP_STRUCT__entry(
+               __string(       name,           name            )
+               __field(        u64,            state           )
+               __field(        u64,            cpu_id          )
+       ),
+
+       TP_fast_assign(
+               __assign_str(name, name);
+               __entry->state = state;
+               __entry->cpu_id = cpu_id;
+),
+
+       TP_printk("%s state=%lu cpu_id=%lu", __get_str(name),
+               (unsigned long)__entry->state, (unsigned long)__entry->cpu_id)
+);
+
+DEFINE_EVENT(power_domain, power_domain_target,
+
+       TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
+
+       TP_ARGS(name, state, cpu_id)
+);
+
 #endif /* _TRACE_POWER_H */
 
 /* This part must be outside protection */
index 9208c92aeab5eee575b21f3000ea8034f6c00788..f6334782a593847907b82fcfc5756040b1bd6ff8 100644 (file)
@@ -362,6 +362,35 @@ TRACE_EVENT(sched_stat_runtime,
                        (unsigned long long)__entry->vruntime)
 );
 
+/*
+ * Tracepoint for showing priority inheritance modifying a tasks
+ * priority.
+ */
+TRACE_EVENT(sched_pi_setprio,
+
+       TP_PROTO(struct task_struct *tsk, int newprio),
+
+       TP_ARGS(tsk, newprio),
+
+       TP_STRUCT__entry(
+               __array( char,  comm,   TASK_COMM_LEN   )
+               __field( pid_t, pid                     )
+               __field( int,   oldprio                 )
+               __field( int,   newprio                 )
+       ),
+
+       TP_fast_assign(
+               memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+               __entry->pid            = tsk->pid;
+               __entry->oldprio        = tsk->prio;
+               __entry->newprio        = newprio;
+       ),
+
+       TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
+                       __entry->comm, __entry->pid,
+                       __entry->oldprio, __entry->newprio)
+);
+
 #endif /* _TRACE_SCHED_H */
 
 /* This part must be outside protection */
index 4b2be6dc76f091647eb30f30a40a49218def8682..75ce9d500d8e3c62dbfffbc0acafca13d11d90aa 100644 (file)
@@ -35,6 +35,23 @@ TRACE_EVENT(kfree_skb,
                __entry->skbaddr, __entry->protocol, __entry->location)
 );
 
+TRACE_EVENT(consume_skb,
+
+       TP_PROTO(struct sk_buff *skb),
+
+       TP_ARGS(skb),
+
+       TP_STRUCT__entry(
+               __field(        void *, skbaddr )
+       ),
+
+       TP_fast_assign(
+               __entry->skbaddr = skb;
+       ),
+
+       TP_printk("skbaddr=%p", __entry->skbaddr)
+);
+
 TRACE_EVENT(skb_copy_datagram_iovec,
 
        TP_PROTO(const struct sk_buff *skb, int len),
index 2de5b1cbadd9e47138f879d23cc4d2d5066d32d7..7b920aafa98a3bc5e8873bb192e8c0db59535556 100644 (file)
@@ -21,6 +21,13 @@ config CONSTRUCTORS
        depends on !UML
        default y
 
+config HAVE_IRQ_WORK
+       bool
+
+config IRQ_WORK
+       bool
+       depends on HAVE_IRQ_WORK
+
 menu "General setup"
 
 config EXPERIMENTAL
@@ -340,6 +347,7 @@ choice
 
 config TREE_RCU
        bool "Tree-based hierarchical RCU"
+       depends on !PREEMPT && SMP
        help
          This option selects the RCU implementation that is
          designed for very large SMP system with hundreds or
@@ -347,7 +355,7 @@ config TREE_RCU
          smaller systems.
 
 config TREE_PREEMPT_RCU
-       bool "Preemptable tree-based hierarchical RCU"
+       bool "Preemptible tree-based hierarchical RCU"
        depends on PREEMPT
        help
          This option selects the RCU implementation that is
@@ -365,8 +373,22 @@ config TINY_RCU
          is not required.  This option greatly reduces the
          memory footprint of RCU.
 
+config TINY_PREEMPT_RCU
+       bool "Preemptible UP-only small-memory-footprint RCU"
+       depends on !SMP && PREEMPT
+       help
+         This option selects the RCU implementation that is designed
+         for real-time UP systems.  This option greatly reduces the
+         memory footprint of RCU.
+
 endchoice
 
+config PREEMPT_RCU
+       def_bool ( TREE_PREEMPT_RCU || TINY_PREEMPT_RCU )
+       help
+         This option enables preemptible-RCU code that is common between
+         the TREE_PREEMPT_RCU and TINY_PREEMPT_RCU implementations.
+
 config RCU_TRACE
        bool "Enable tracing for RCU"
        depends on TREE_RCU || TREE_PREEMPT_RCU
@@ -387,9 +409,12 @@ config RCU_FANOUT
        help
          This option controls the fanout of hierarchical implementations
          of RCU, allowing RCU to work efficiently on machines with
-         large numbers of CPUs.  This value must be at least the cube
-         root of NR_CPUS, which allows NR_CPUS up to 32,768 for 32-bit
-         systems and up to 262,144 for 64-bit systems.
+         large numbers of CPUs.  This value must be at least the fourth
+         root of NR_CPUS, which allows NR_CPUS to be insanely large.
+         The default value of RCU_FANOUT should be used for production
+         systems, but if you are stress-testing the RCU implementation
+         itself, small RCU_FANOUT values allow you to test large-system
+         code paths on small(er) systems.
 
          Select a specific number if testing RCU itself.
          Take the default if unsure.
@@ -987,6 +1012,7 @@ config PERF_EVENTS
        default y if (PROFILING || PERF_COUNTERS)
        depends on HAVE_PERF_EVENTS
        select ANON_INODES
+       select IRQ_WORK
        help
          Enable kernel support for various performance events provided
          by software and hardware.
index 40a8f462a8224b298690cb07892f93afe8c15214..0e0d49bbb867f239be5690968227c53e7c0226c0 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -743,6 +743,8 @@ static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in,
            {
                struct semid_ds out;
 
+               memset(&out, 0, sizeof(out));
+
                ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
 
                out.sem_otime   = in->sem_otime;
index 0b72d1a74be07c25b99a8da670ec4cfb0963cf77..e2c9d52cfe9e52b31eb6a430195d63da22e8aa30 100644 (file)
@@ -10,7 +10,7 @@ obj-y     = sched.o fork.o exec_domain.o panic.o printk.o \
            kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
            hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
            notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \
-           async.o range.o
+           async.o range.o jump_label.o
 obj-$(CONFIG_HAVE_EARLY_RES) += early_res.o
 obj-y += groups.o
 
@@ -23,6 +23,7 @@ CFLAGS_REMOVE_rtmutex-debug.o = -pg
 CFLAGS_REMOVE_cgroup-debug.o = -pg
 CFLAGS_REMOVE_sched_clock.o = -pg
 CFLAGS_REMOVE_perf_event.o = -pg
+CFLAGS_REMOVE_irq_work.o = -pg
 endif
 
 obj-$(CONFIG_FREEZER) += freezer.o
@@ -86,6 +87,7 @@ obj-$(CONFIG_TREE_RCU) += rcutree.o
 obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o
 obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o
 obj-$(CONFIG_TINY_RCU) += rcutiny.o
+obj-$(CONFIG_TINY_PREEMPT_RCU) += rcutiny.o
 obj-$(CONFIG_RELAY) += relay.o
 obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
 obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
@@ -100,6 +102,7 @@ obj-$(CONFIG_TRACING) += trace/
 obj-$(CONFIG_X86_DS) += trace/
 obj-$(CONFIG_RING_BUFFER) += trace/
 obj-$(CONFIG_SMP) += sched_cpupri.o
+obj-$(CONFIG_IRQ_WORK) += irq_work.o
 obj-$(CONFIG_PERF_EVENTS) += perf_event.o
 obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
 obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o
index 192f88c5b0f9df29b80d51d4c5ceba5388d099eb..291ba3d04beab08f0edc4a0239bae956941e50d9 100644 (file)
@@ -138,7 +138,7 @@ struct css_id {
         * is called after synchronize_rcu(). But for safe use, css_is_removed()
         * css_tryget() should be used for avoiding race.
         */
-       struct cgroup_subsys_state *css;
+       struct cgroup_subsys_state __rcu *css;
        /*
         * ID of this css.
         */
@@ -1791,19 +1791,20 @@ out:
 }
 
 /**
- * cgroup_attach_task_current_cg - attach task 'tsk' to current task's cgroup
+ * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
+ * @from: attach to all cgroups of a given task
  * @tsk: the task to be attached
  */
-int cgroup_attach_task_current_cg(struct task_struct *tsk)
+int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
 {
        struct cgroupfs_root *root;
-       struct cgroup *cur_cg;
        int retval = 0;
 
        cgroup_lock();
        for_each_active_root(root) {
-               cur_cg = task_cgroup_from_root(current, root);
-               retval = cgroup_attach_task(cur_cg, tsk);
+               struct cgroup *from_cg = task_cgroup_from_root(from, root);
+
+               retval = cgroup_attach_task(from_cg, tsk);
                if (retval)
                        break;
        }
@@ -1811,7 +1812,7 @@ int cgroup_attach_task_current_cg(struct task_struct *tsk)
 
        return retval;
 }
-EXPORT_SYMBOL_GPL(cgroup_attach_task_current_cg);
+EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
 
 /*
  * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex
index e167efce8423e2cdaf2709412c36cc3ae642c90f..c9e2ec0b34a8cc38cda604273618a22c8d2d928c 100644 (file)
@@ -1126,3 +1126,24 @@ compat_sys_sysinfo(struct compat_sysinfo __user *info)
 
        return 0;
 }
+
+/*
+ * Allocate user-space memory for the duration of a single system call,
+ * in order to marshall parameters inside a compat thunk.
+ */
+void __user *compat_alloc_user_space(unsigned long len)
+{
+       void __user *ptr;
+
+       /* If len would occupy more than half of the entire compat space... */
+       if (unlikely(len > (((compat_uptr_t)~0) >> 1)))
+               return NULL;
+
+       ptr = arch_compat_alloc_user_space(len);
+
+       if (unlikely(!access_ok(VERIFY_WRITE, ptr, len)))
+               return NULL;
+
+       return ptr;
+}
+EXPORT_SYMBOL_GPL(compat_alloc_user_space);
index b23c0979bbe7212a748a9aac70697e92ee54f448..51b143e2a07a49603d9aa462728f6a45032c01d9 100644 (file)
@@ -1397,7 +1397,7 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont,
        if (tsk->flags & PF_THREAD_BOUND)
                return -EINVAL;
 
-       ret = security_task_setscheduler(tsk, 0, NULL);
+       ret = security_task_setscheduler(tsk);
        if (ret)
                return ret;
        if (threadgroup) {
@@ -1405,7 +1405,7 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont,
 
                rcu_read_lock();
                list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
-                       ret = security_task_setscheduler(c, 0, NULL);
+                       ret = security_task_setscheduler(c);
                        if (ret) {
                                rcu_read_unlock();
                                return ret;
index 75bd9b3ebbb7cf501800115a531e86e7c30c5a28..20059ef4459a4ff293428337d9936ff438a8eb96 100644 (file)
@@ -274,7 +274,6 @@ static int kdb_bp(int argc, const char **argv)
        int i, bpno;
        kdb_bp_t *bp, *bp_check;
        int diag;
-       int free;
        char *symname = NULL;
        long offset = 0ul;
        int nextarg;
@@ -305,7 +304,6 @@ static int kdb_bp(int argc, const char **argv)
        /*
         * Find an empty bp structure to allocate
         */
-       free = KDB_MAXBPT;
        for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) {
                if (bp->bp_free)
                        break;
index 03120229db2802929065a210930e41c7fa701ba0..e2bdf37f9fdea71a15acb3523ec20b63a6aa42d3 100644 (file)
@@ -149,9 +149,7 @@ static void delayed_put_task_struct(struct rcu_head *rhp)
 {
        struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
 
-#ifdef CONFIG_PERF_EVENTS
-       WARN_ON_ONCE(tsk->perf_event_ctxp);
-#endif
+       perf_event_delayed_put(tsk);
        trace_sched_process_free(tsk);
        put_task_struct(tsk);
 }
index b7e9d60a675d3a08a1096ce725380229217d9fa3..c445f8cc408d777dd7a94aec3fa78c07e1d98b98 100644 (file)
@@ -356,10 +356,10 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
                if (IS_ERR(pol))
                        goto fail_nomem_policy;
                vma_set_policy(tmp, pol);
+               tmp->vm_mm = mm;
                if (anon_vma_fork(tmp, mpnt))
                        goto fail_nomem_anon_vma_fork;
                tmp->vm_flags &= ~VM_LOCKED;
-               tmp->vm_mm = mm;
                tmp->vm_next = tmp->vm_prev = NULL;
                file = tmp->vm_file;
                if (file) {
index 6a3a5fa1526d87d16b362e20d7d62e6809285c18..a118bf160e0b05a4b24404beda19d07adead5161 100644 (file)
@@ -91,6 +91,7 @@ struct futex_pi_state {
 
 /**
  * struct futex_q - The hashed futex queue entry, one per waiting task
+ * @list:              priority-sorted list of tasks waiting on this futex
  * @task:              the task waiting on the futex
  * @lock_ptr:          the hash bucket lock
  * @key:               the key the futex is hashed on
@@ -104,7 +105,7 @@ struct futex_pi_state {
  *
  * A futex_q has a woken state, just like tasks have TASK_RUNNING.
  * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
- * The order of wakup is always to make the first condition true, then
+ * The order of wakeup is always to make the first condition true, then
  * the second.
  *
  * PI futexes are typically woken before they are removed from the hash list via
@@ -295,7 +296,7 @@ void put_futex_key(int fshared, union futex_key *key)
  * Slow path to fixup the fault we just took in the atomic write
  * access to @uaddr.
  *
- * We have no generic implementation of a non destructive write to the
+ * We have no generic implementation of a non-destructive write to the
  * user address. We know that we faulted in the atomic pagefault
  * disabled section so we can as well avoid the #PF overhead by
  * calling get_user_pages() right away.
@@ -515,7 +516,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
                         */
                        pi_state = this->pi_state;
                        /*
-                        * Userspace might have messed up non PI and PI futexes
+                        * Userspace might have messed up non-PI and PI futexes
                         */
                        if (unlikely(!pi_state))
                                return -EINVAL;
@@ -736,8 +737,8 @@ static void wake_futex(struct futex_q *q)
 
        /*
         * We set q->lock_ptr = NULL _before_ we wake up the task. If
-        * a non futex wake up happens on another CPU then the task
-        * might exit and p would dereference a non existing task
+        * a non-futex wake up happens on another CPU then the task
+        * might exit and p would dereference a non-existing task
         * struct. Prevent this by holding a reference on p across the
         * wake up.
         */
@@ -1131,11 +1132,13 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
 
 /**
  * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
- * uaddr1:     source futex user address
- * uaddr2:     target futex user address
- * nr_wake:    number of waiters to wake (must be 1 for requeue_pi)
- * nr_requeue: number of waiters to requeue (0-INT_MAX)
- * requeue_pi: if we are attempting to requeue from a non-pi futex to a
+ * @uaddr1:    source futex user address
+ * @fshared:   0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
+ * @uaddr2:    target futex user address
+ * @nr_wake:   number of waiters to wake (must be 1 for requeue_pi)
+ * @nr_requeue:        number of waiters to requeue (0-INT_MAX)
+ * @cmpval:    @uaddr1 expected value (or %NULL)
+ * @requeue_pi:        if we are attempting to requeue from a non-pi futex to a
  *             pi futex (pi to pi requeue is not supported)
  *
  * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
@@ -1360,10 +1363,10 @@ out:
 
 /* The key must be already stored in q->key. */
 static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
+       __acquires(&hb->lock)
 {
        struct futex_hash_bucket *hb;
 
-       get_futex_key_refs(&q->key);
        hb = hash_futex(&q->key);
        q->lock_ptr = &hb->lock;
 
@@ -1373,9 +1376,9 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
 
 static inline void
 queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
+       __releases(&hb->lock)
 {
        spin_unlock(&hb->lock);
-       drop_futex_key_refs(&q->key);
 }
 
 /**
@@ -1391,6 +1394,7 @@ queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
  * an example).
  */
 static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
+       __releases(&hb->lock)
 {
        int prio;
 
@@ -1471,6 +1475,7 @@ retry:
  * and dropped here.
  */
 static void unqueue_me_pi(struct futex_q *q)
+       __releases(q->lock_ptr)
 {
        WARN_ON(plist_node_empty(&q->list));
        plist_del(&q->list, &q->list.plist);
@@ -1480,8 +1485,6 @@ static void unqueue_me_pi(struct futex_q *q)
        q->pi_state = NULL;
 
        spin_unlock(q->lock_ptr);
-
-       drop_futex_key_refs(&q->key);
 }
 
 /*
@@ -1812,7 +1815,10 @@ static int futex_wait(u32 __user *uaddr, int fshared,
        }
 
 retry:
-       /* Prepare to wait on uaddr. */
+       /*
+        * Prepare to wait on uaddr. On success, holds hb lock and increments
+        * q.key refs.
+        */
        ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
        if (ret)
                goto out;
@@ -1822,28 +1828,27 @@ retry:
 
        /* If we were woken (and unqueued), we succeeded, whatever. */
        ret = 0;
+       /* unqueue_me() drops q.key ref */
        if (!unqueue_me(&q))
-               goto out_put_key;
+               goto out;
        ret = -ETIMEDOUT;
        if (to && !to->task)
-               goto out_put_key;
+               goto out;
 
        /*
         * We expect signal_pending(current), but we might be the
         * victim of a spurious wakeup as well.
         */
-       if (!signal_pending(current)) {
-               put_futex_key(fshared, &q.key);
+       if (!signal_pending(current))
                goto retry;
-       }
 
        ret = -ERESTARTSYS;
        if (!abs_time)
-               goto out_put_key;
+               goto out;
 
        restart = &current_thread_info()->restart_block;
        restart->fn = futex_wait_restart;
-       restart->futex.uaddr = (u32 *)uaddr;
+       restart->futex.uaddr = uaddr;
        restart->futex.val = val;
        restart->futex.time = abs_time->tv64;
        restart->futex.bitset = bitset;
@@ -1856,8 +1861,6 @@ retry:
 
        ret = -ERESTART_RESTARTBLOCK;
 
-out_put_key:
-       put_futex_key(fshared, &q.key);
 out:
        if (to) {
                hrtimer_cancel(&to->timer);
@@ -1869,7 +1872,7 @@ out:
 
 static long futex_wait_restart(struct restart_block *restart)
 {
-       u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
+       u32 __user *uaddr = restart->futex.uaddr;
        int fshared = 0;
        ktime_t t, *tp = NULL;
 
@@ -2236,7 +2239,10 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
        q.rt_waiter = &rt_waiter;
        q.requeue_pi_key = &key2;
 
-       /* Prepare to wait on uaddr. */
+       /*
+        * Prepare to wait on uaddr. On success, increments q.key (key1) ref
+        * count.
+        */
        ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
        if (ret)
                goto out_key2;
@@ -2254,7 +2260,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
         * In order for us to be here, we know our q.key == key2, and since
         * we took the hb->lock above, we also know that futex_requeue() has
         * completed and we no longer have to concern ourselves with a wakeup
-        * race with the atomic proxy lock acquition by the requeue code.
+        * race with the atomic proxy lock acquisition by the requeue code. The
+        * futex_requeue dropped our key1 reference and incremented our key2
+        * reference count.
         */
 
        /* Check if the requeue code acquired the second futex for us. */
@@ -2458,7 +2466,7 @@ retry:
  */
 static inline int fetch_robust_entry(struct robust_list __user **entry,
                                     struct robust_list __user * __user *head,
-                                    int *pi)
+                                    unsigned int *pi)
 {
        unsigned long uentry;
 
@@ -2647,7 +2655,7 @@ static int __init futex_init(void)
         * of the complex code paths. Also we want to prevent
         * registration of robust lists in that case. NULL is
         * guaranteed to fault and we get -EFAULT on functional
-        * implementation, the non functional ones will return
+        * implementation, the non-functional ones will return
         * -ENOSYS.
         */
        curval = cmpxchg_futex_value_locked(NULL, 0, 0);
index d49afb2395e5cab17dd85417c95741c60c12a4cf..06da4dfc339b7362716858f92bec749962241808 100644 (file)
@@ -19,7 +19,7 @@
  */
 static inline int
 fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
-                  compat_uptr_t __user *head, int *pi)
+                  compat_uptr_t __user *head, unsigned int *pi)
 {
        if (get_user(*uentry, head))
                return -EFAULT;
index ef3c3f88a7a35e36d8f1fb551c56e534cea52687..f83972b16564d00676154900f09d3c70affd773c 100644 (file)
  * @children: child nodes
  * @all: list head for list of all nodes
  * @parent: parent node
- * @info: associated profiling data structure if not a directory
- * @ghost: when an object file containing profiling data is unloaded we keep a
- *         copy of the profiling data here to allow collecting coverage data
- *         for cleanup code. Such a node is called a "ghost".
+ * @loaded_info: array of pointers to profiling data sets for loaded object
+ *   files.
+ * @num_loaded: number of profiling data sets for loaded object files.
+ * @unloaded_info: accumulated copy of profiling data sets for unloaded
+ *   object files. Used only when gcov_persist=1.
  * @dentry: main debugfs entry, either a directory or data file
  * @links: associated symbolic links
  * @name: data file basename
@@ -51,10 +52,11 @@ struct gcov_node {
        struct list_head children;
        struct list_head all;
        struct gcov_node *parent;
-       struct gcov_info *info;
-       struct gcov_info *ghost;
+       struct gcov_info **loaded_info;
+       struct gcov_info *unloaded_info;
        struct dentry *dentry;
        struct dentry **links;
+       int num_loaded;
        char name[0];
 };
 
@@ -136,16 +138,37 @@ static const struct seq_operations gcov_seq_ops = {
 };
 
 /*
- * Return the profiling data set for a given node. This can either be the
- * original profiling data structure or a duplicate (also called "ghost")
- * in case the associated object file has been unloaded.
+ * Return a profiling data set associated with the given node. This is
+ * either a data set for a loaded object file or a data set copy in case
+ * all associated object files have been unloaded.
  */
 static struct gcov_info *get_node_info(struct gcov_node *node)
 {
-       if (node->info)
-               return node->info;
+       if (node->num_loaded > 0)
+               return node->loaded_info[0];
 
-       return node->ghost;
+       return node->unloaded_info;
+}
+
+/*
+ * Return a newly allocated profiling data set which contains the sum of
+ * all profiling data associated with the given node.
+ */
+static struct gcov_info *get_accumulated_info(struct gcov_node *node)
+{
+       struct gcov_info *info;
+       int i = 0;
+
+       if (node->unloaded_info)
+               info = gcov_info_dup(node->unloaded_info);
+       else
+               info = gcov_info_dup(node->loaded_info[i++]);
+       if (!info)
+               return NULL;
+       for (; i < node->num_loaded; i++)
+               gcov_info_add(info, node->loaded_info[i]);
+
+       return info;
 }
 
 /*
@@ -163,9 +186,10 @@ static int gcov_seq_open(struct inode *inode, struct file *file)
        mutex_lock(&node_lock);
        /*
         * Read from a profiling data copy to minimize reference tracking
-        * complexity and concurrent access.
+        * complexity and concurrent access and to keep accumulating multiple
+        * profiling data sets associated with one node simple.
         */
-       info = gcov_info_dup(get_node_info(node));
+       info = get_accumulated_info(node);
        if (!info)
                goto out_unlock;
        iter = gcov_iter_new(info);
@@ -225,12 +249,25 @@ static struct gcov_node *get_node_by_name(const char *name)
        return NULL;
 }
 
+/*
+ * Reset all profiling data associated with the specified node.
+ */
+static void reset_node(struct gcov_node *node)
+{
+       int i;
+
+       if (node->unloaded_info)
+               gcov_info_reset(node->unloaded_info);
+       for (i = 0; i < node->num_loaded; i++)
+               gcov_info_reset(node->loaded_info[i]);
+}
+
 static void remove_node(struct gcov_node *node);
 
 /*
  * write() implementation for gcov data files. Reset profiling data for the
- * associated file. If the object file has been unloaded (i.e. this is
- * a "ghost" node), remove the debug fs node as well.
+ * corresponding file. If all associated object files have been unloaded,
+ * remove the debug fs node as well.
  */
 static ssize_t gcov_seq_write(struct file *file, const char __user *addr,
                              size_t len, loff_t *pos)
@@ -245,10 +282,10 @@ static ssize_t gcov_seq_write(struct file *file, const char __user *addr,
        node = get_node_by_name(info->filename);
        if (node) {
                /* Reset counts or remove node for unloaded modules. */
-               if (node->ghost)
+               if (node->num_loaded == 0)
                        remove_node(node);
                else
-                       gcov_info_reset(node->info);
+                       reset_node(node);
        }
        /* Reset counts for open file. */
        gcov_info_reset(info);
@@ -378,7 +415,10 @@ static void init_node(struct gcov_node *node, struct gcov_info *info,
        INIT_LIST_HEAD(&node->list);
        INIT_LIST_HEAD(&node->children);
        INIT_LIST_HEAD(&node->all);
-       node->info = info;
+       if (node->loaded_info) {
+               node->loaded_info[0] = info;
+               node->num_loaded = 1;
+       }
        node->parent = parent;
        if (name)
                strcpy(node->name, name);
@@ -394,9 +434,13 @@ static struct gcov_node *new_node(struct gcov_node *parent,
        struct gcov_node *node;
 
        node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL);
-       if (!node) {
-               pr_warning("out of memory\n");
-               return NULL;
+       if (!node)
+               goto err_nomem;
+       if (info) {
+               node->loaded_info = kcalloc(1, sizeof(struct gcov_info *),
+                                          GFP_KERNEL);
+               if (!node->loaded_info)
+                       goto err_nomem;
        }
        init_node(node, info, name, parent);
        /* Differentiate between gcov data file nodes and directory nodes. */
@@ -416,6 +460,11 @@ static struct gcov_node *new_node(struct gcov_node *parent,
        list_add(&node->all, &all_head);
 
        return node;
+
+err_nomem:
+       kfree(node);
+       pr_warning("out of memory\n");
+       return NULL;
 }
 
 /* Remove symbolic links associated with node. */
@@ -441,8 +490,9 @@ static void release_node(struct gcov_node *node)
        list_del(&node->all);
        debugfs_remove(node->dentry);
        remove_links(node);
-       if (node->ghost)
-               gcov_info_free(node->ghost);
+       kfree(node->loaded_info);
+       if (node->unloaded_info)
+               gcov_info_free(node->unloaded_info);
        kfree(node);
 }
 
@@ -477,7 +527,7 @@ static struct gcov_node *get_child_by_name(struct gcov_node *parent,
 
 /*
  * write() implementation for reset file. Reset all profiling data to zero
- * and remove ghost nodes.
+ * and remove nodes for which all associated object files are unloaded.
  */
 static ssize_t reset_write(struct file *file, const char __user *addr,
                           size_t len, loff_t *pos)
@@ -487,8 +537,8 @@ static ssize_t reset_write(struct file *file, const char __user *addr,
        mutex_lock(&node_lock);
 restart:
        list_for_each_entry(node, &all_head, all) {
-               if (node->info)
-                       gcov_info_reset(node->info);
+               if (node->num_loaded > 0)
+                       reset_node(node);
                else if (list_empty(&node->children)) {
                        remove_node(node);
                        /* Several nodes may have gone - restart loop. */
@@ -564,37 +614,115 @@ err_remove:
 }
 
 /*
- * The profiling data set associated with this node is being unloaded. Store a
- * copy of the profiling data and turn this node into a "ghost".
+ * Associate a profiling data set with an existing node. Needs to be called
+ * with node_lock held.
  */
-static int ghost_node(struct gcov_node *node)
+static void add_info(struct gcov_node *node, struct gcov_info *info)
 {
-       node->ghost = gcov_info_dup(node->info);
-       if (!node->ghost) {
-               pr_warning("could not save data for '%s' (out of memory)\n",
-                          node->info->filename);
-               return -ENOMEM;
+       struct gcov_info **loaded_info;
+       int num = node->num_loaded;
+
+       /*
+        * Prepare new array. This is done first to simplify cleanup in
+        * case the new data set is incompatible, the node only contains
+        * unloaded data sets and there's not enough memory for the array.
+        */
+       loaded_info = kcalloc(num + 1, sizeof(struct gcov_info *), GFP_KERNEL);
+       if (!loaded_info) {
+               pr_warning("could not add '%s' (out of memory)\n",
+                          info->filename);
+               return;
+       }
+       memcpy(loaded_info, node->loaded_info,
+              num * sizeof(struct gcov_info *));
+       loaded_info[num] = info;
+       /* Check if the new data set is compatible. */
+       if (num == 0) {
+               /*
+                * A module was unloaded, modified and reloaded. The new
+                * data set replaces the copy of the last one.
+                */
+               if (!gcov_info_is_compatible(node->unloaded_info, info)) {
+                       pr_warning("discarding saved data for %s "
+                                  "(incompatible version)\n", info->filename);
+                       gcov_info_free(node->unloaded_info);
+                       node->unloaded_info = NULL;
+               }
+       } else {
+               /*
+                * Two different versions of the same object file are loaded.
+                * The initial one takes precedence.
+                */
+               if (!gcov_info_is_compatible(node->loaded_info[0], info)) {
+                       pr_warning("could not add '%s' (incompatible "
+                                  "version)\n", info->filename);
+                       kfree(loaded_info);
+                       return;
+               }
        }
-       node->info = NULL;
+       /* Overwrite previous array. */
+       kfree(node->loaded_info);
+       node->loaded_info = loaded_info;
+       node->num_loaded = num + 1;
+}
 
-       return 0;
+/*
+ * Return the index of a profiling data set associated with a node.
+ */
+static int get_info_index(struct gcov_node *node, struct gcov_info *info)
+{
+       int i;
+
+       for (i = 0; i < node->num_loaded; i++) {
+               if (node->loaded_info[i] == info)
+                       return i;
+       }
+       return -ENOENT;
 }
 
 /*
- * Profiling data for this node has been loaded again. Add profiling data
- * from previous instantiation and turn this node into a regular node.
+ * Save the data of a profiling data set which is being unloaded.
  */
-static void revive_node(struct gcov_node *node, struct gcov_info *info)
+static void save_info(struct gcov_node *node, struct gcov_info *info)
 {
-       if (gcov_info_is_compatible(node->ghost, info))
-               gcov_info_add(info, node->ghost);
+       if (node->unloaded_info)
+               gcov_info_add(node->unloaded_info, info);
        else {
-               pr_warning("discarding saved data for '%s' (version changed)\n",
+               node->unloaded_info = gcov_info_dup(info);
+               if (!node->unloaded_info) {
+                       pr_warning("could not save data for '%s' "
+                                  "(out of memory)\n", info->filename);
+               }
+       }
+}
+
+/*
+ * Disassociate a profiling data set from a node. Needs to be called with
+ * node_lock held.
+ */
+static void remove_info(struct gcov_node *node, struct gcov_info *info)
+{
+       int i;
+
+       i = get_info_index(node, info);
+       if (i < 0) {
+               pr_warning("could not remove '%s' (not found)\n",
                           info->filename);
+               return;
        }
-       gcov_info_free(node->ghost);
-       node->ghost = NULL;
-       node->info = info;
+       if (gcov_persist)
+               save_info(node, info);
+       /* Shrink array. */
+       node->loaded_info[i] = node->loaded_info[node->num_loaded - 1];
+       node->num_loaded--;
+       if (node->num_loaded > 0)
+               return;
+       /* Last loaded data set was removed. */
+       kfree(node->loaded_info);
+       node->loaded_info = NULL;
+       node->num_loaded = 0;
+       if (!node->unloaded_info)
+               remove_node(node);
 }
 
 /*
@@ -609,30 +737,18 @@ void gcov_event(enum gcov_action action, struct gcov_info *info)
        node = get_node_by_name(info->filename);
        switch (action) {
        case GCOV_ADD:
-               /* Add new node or revive ghost. */
-               if (!node) {
+               if (node)
+                       add_info(node, info);
+               else
                        add_node(info);
-                       break;
-               }
-               if (gcov_persist)
-                       revive_node(node, info);
-               else {
-                       pr_warning("could not add '%s' (already exists)\n",
-                                  info->filename);
-               }
                break;
        case GCOV_REMOVE:
-               /* Remove node or turn into ghost. */
-               if (!node) {
+               if (node)
+                       remove_info(node, info);
+               else {
                        pr_warning("could not remove '%s' (not found)\n",
                                   info->filename);
-                       break;
                }
-               if (gcov_persist) {
-                       if (!ghost_node(node))
-                               break;
-               }
-               remove_node(node);
                break;
        }
        mutex_unlock(&node_lock);
index 53b1916c94926c245aacd17ccb420fc00d06a19b..253dc0f35cf4c30786d1ff3565427d1374762a04 100644 (file)
@@ -143,10 +143,9 @@ int groups_search(const struct group_info *group_info, gid_t grp)
        right = group_info->ngroups;
        while (left < right) {
                unsigned int mid = (left+right)/2;
-               int cmp = grp - GROUP_AT(group_info, mid);
-               if (cmp > 0)
+               if (grp > GROUP_AT(group_info, mid))
                        left = mid + 1;
-               else if (cmp < 0)
+               else if (grp < GROUP_AT(group_info, mid))
                        right = mid;
                else
                        return 1;
index ce669174f355c7dd1e1893903bb4d18a25f17c34..72206cf5c6cf854898d889a6a645e44febdd526f 100644 (file)
@@ -931,6 +931,7 @@ static inline int
 remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
 {
        if (hrtimer_is_queued(timer)) {
+               unsigned long state;
                int reprogram;
 
                /*
@@ -944,8 +945,13 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
                debug_deactivate(timer);
                timer_stats_hrtimer_clear_start_info(timer);
                reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
-               __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE,
-                                reprogram);
+               /*
+                * We must preserve the CALLBACK state flag here,
+                * otherwise we could move the timer base in
+                * switch_hrtimer_base.
+                */
+               state = timer->state & HRTIMER_STATE_CALLBACK;
+               __remove_hrtimer(timer, base, state, reprogram);
                return 1;
        }
        return 0;
@@ -1091,11 +1097,10 @@ EXPORT_SYMBOL_GPL(hrtimer_cancel);
  */
 ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
 {
-       struct hrtimer_clock_base *base;
        unsigned long flags;
        ktime_t rem;
 
-       base = lock_hrtimer_base(timer, &flags);
+       lock_hrtimer_base(timer, &flags);
        rem = hrtimer_expires_remaining(timer);
        unlock_hrtimer_base(timer, &flags);
 
@@ -1232,6 +1237,9 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
                BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
                enqueue_hrtimer(timer, base);
        }
+
+       WARN_ON_ONCE(!(timer->state & HRTIMER_STATE_CALLBACK));
+
        timer->state &= ~HRTIMER_STATE_CALLBACK;
 }
 
index 0c642d51aac2d8ab82c782a786eb13b90c0985c1..53ead174da2f0a7c7849df4ef81b60a96fe7c4f6 100644 (file)
@@ -98,7 +98,7 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
        printk(KERN_ERR "\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
                        " disables this message.\n");
        sched_show_task(t);
-       __debug_show_held_locks(t);
+       debug_show_held_locks(t);
 
        touch_nmi_watchdog();
 
@@ -111,7 +111,7 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
  * periodically exit the critical section and enter a new one.
  *
  * For preemptible RCU it is sufficient to call rcu_read_unlock in order
- * exit the grace period. For classic RCU, a reschedule is required.
+ * to exit the grace period. For classic RCU, a reschedule is required.
  */
 static void rcu_lock_break(struct task_struct *g, struct task_struct *t)
 {
index d71a987fd2bf2ba5e698f9b69fccb59db4a1bb30..2c9120f0afca9872cc4edba57d4e5cfe9311788f 100644 (file)
@@ -113,12 +113,12 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
  */
 static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type)
 {
-       struct perf_event_context *ctx = bp->ctx;
+       struct task_struct *tsk = bp->hw.bp_target;
        struct perf_event *iter;
        int count = 0;
 
        list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
-               if (iter->ctx == ctx && find_slot_idx(iter) == type)
+               if (iter->hw.bp_target == tsk && find_slot_idx(iter) == type)
                        count += hw_breakpoint_weight(iter);
        }
 
@@ -134,7 +134,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
                    enum bp_type_idx type)
 {
        int cpu = bp->cpu;
-       struct task_struct *tsk = bp->ctx->task;
+       struct task_struct *tsk = bp->hw.bp_target;
 
        if (cpu >= 0) {
                slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu);
@@ -213,7 +213,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
               int weight)
 {
        int cpu = bp->cpu;
-       struct task_struct *tsk = bp->ctx->task;
+       struct task_struct *tsk = bp->hw.bp_target;
 
        /* Pinned counter cpu profiling */
        if (!tsk) {
@@ -433,7 +433,7 @@ register_user_hw_breakpoint(struct perf_event_attr *attr,
                            perf_overflow_handler_t triggered,
                            struct task_struct *tsk)
 {
-       return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered);
+       return perf_event_create_kernel_counter(attr, -1, tsk, triggered);
 }
 EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
 
@@ -515,7 +515,7 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
        get_online_cpus();
        for_each_online_cpu(cpu) {
                pevent = per_cpu_ptr(cpu_events, cpu);
-               bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered);
+               bp = perf_event_create_kernel_counter(attr, cpu, NULL, triggered);
 
                *pevent = bp;
 
@@ -565,6 +565,61 @@ static struct notifier_block hw_breakpoint_exceptions_nb = {
        .priority = 0x7fffffff
 };
 
+static void bp_perf_event_destroy(struct perf_event *event)
+{
+       release_bp_slot(event);
+}
+
+static int hw_breakpoint_event_init(struct perf_event *bp)
+{
+       int err;
+
+       if (bp->attr.type != PERF_TYPE_BREAKPOINT)
+               return -ENOENT;
+
+       err = register_perf_hw_breakpoint(bp);
+       if (err)
+               return err;
+
+       bp->destroy = bp_perf_event_destroy;
+
+       return 0;
+}
+
+static int hw_breakpoint_add(struct perf_event *bp, int flags)
+{
+       if (!(flags & PERF_EF_START))
+               bp->hw.state = PERF_HES_STOPPED;
+
+       return arch_install_hw_breakpoint(bp);
+}
+
+static void hw_breakpoint_del(struct perf_event *bp, int flags)
+{
+       arch_uninstall_hw_breakpoint(bp);
+}
+
+static void hw_breakpoint_start(struct perf_event *bp, int flags)
+{
+       bp->hw.state = 0;
+}
+
+static void hw_breakpoint_stop(struct perf_event *bp, int flags)
+{
+       bp->hw.state = PERF_HES_STOPPED;
+}
+
+static struct pmu perf_breakpoint = {
+       .task_ctx_nr    = perf_sw_context, /* could eventually get its own */
+
+       .event_init     = hw_breakpoint_event_init,
+       .add            = hw_breakpoint_add,
+       .del            = hw_breakpoint_del,
+       .start          = hw_breakpoint_start,
+       .stop           = hw_breakpoint_stop,
+       .read           = hw_breakpoint_pmu_read,
+};
+
 static int __init init_hw_breakpoint(void)
 {
        unsigned int **task_bp_pinned;
@@ -586,6 +641,8 @@ static int __init init_hw_breakpoint(void)
 
        constraints_initialized = 1;
 
+       perf_pmu_register(&perf_breakpoint);
+
        return register_die_notifier(&hw_breakpoint_exceptions_nb);
 
  err_alloc:
@@ -601,8 +658,3 @@ static int __init init_hw_breakpoint(void)
 core_initcall(init_hw_breakpoint);
 
 
-struct pmu perf_ops_bp = {
-       .enable         = arch_install_hw_breakpoint,
-       .disable        = arch_uninstall_hw_breakpoint,
-       .read           = hw_breakpoint_pmu_read,
-};
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
new file mode 100644 (file)
index 0000000..f16763f
--- /dev/null
@@ -0,0 +1,164 @@
+/*
+ * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *
+ * Provides a framework for enqueueing and running callbacks from hardirq
+ * context. The enqueueing is NMI-safe.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/irq_work.h>
+#include <linux/hardirq.h>
+
+/*
+ * An entry can be in one of four states:
+ *
+ * free             NULL, 0 -> {claimed}       : free to be used
+ * claimed   NULL, 3 -> {pending}       : claimed to be enqueued
+ * pending   next, 3 -> {busy}          : queued, pending callback
+ * busy      NULL, 2 -> {free, claimed} : callback in progress, can be claimed
+ *
+ * We use the lower two bits of the next pointer to keep PENDING and BUSY
+ * flags.
+ */
+
+#define IRQ_WORK_PENDING       1UL
+#define IRQ_WORK_BUSY          2UL
+#define IRQ_WORK_FLAGS         3UL
+
+static inline bool irq_work_is_set(struct irq_work *entry, int flags)
+{
+       return (unsigned long)entry->next & flags;
+}
+
+static inline struct irq_work *irq_work_next(struct irq_work *entry)
+{
+       unsigned long next = (unsigned long)entry->next;
+       next &= ~IRQ_WORK_FLAGS;
+       return (struct irq_work *)next;
+}
+
+static inline struct irq_work *next_flags(struct irq_work *entry, int flags)
+{
+       unsigned long next = (unsigned long)entry;
+       next |= flags;
+       return (struct irq_work *)next;
+}
+
+static DEFINE_PER_CPU(struct irq_work *, irq_work_list);
+
+/*
+ * Claim the entry so that no one else will poke at it.
+ */
+static bool irq_work_claim(struct irq_work *entry)
+{
+       struct irq_work *next, *nflags;
+
+       do {
+               next = entry->next;
+               if ((unsigned long)next & IRQ_WORK_PENDING)
+                       return false;
+               nflags = next_flags(next, IRQ_WORK_FLAGS);
+       } while (cmpxchg(&entry->next, next, nflags) != next);
+
+       return true;
+}
+
+
+void __weak arch_irq_work_raise(void)
+{
+       /*
+        * Lame architectures will get the timer tick callback
+        */
+}
+
+/*
+ * Queue the entry and raise the IPI if needed.
+ */
+static void __irq_work_queue(struct irq_work *entry)
+{
+       struct irq_work **head, *next;
+
+       head = &get_cpu_var(irq_work_list);
+
+       do {
+               next = *head;
+               /* Can assign non-atomic because we keep the flags set. */
+               entry->next = next_flags(next, IRQ_WORK_FLAGS);
+       } while (cmpxchg(head, next, entry) != next);
+
+       /* The list was empty, raise self-interrupt to start processing. */
+       if (!irq_work_next(entry))
+               arch_irq_work_raise();
+
+       put_cpu_var(irq_work_list);
+}
+
+/*
+ * Enqueue the irq_work @entry, returns true on success, failure when the
+ * @entry was already enqueued by someone else.
+ *
+ * Can be re-enqueued while the callback is still in progress.
+ */
+bool irq_work_queue(struct irq_work *entry)
+{
+       if (!irq_work_claim(entry)) {
+               /*
+                * Already enqueued, can't do!
+                */
+               return false;
+       }
+
+       __irq_work_queue(entry);
+       return true;
+}
+EXPORT_SYMBOL_GPL(irq_work_queue);
+
+/*
+ * Run the irq_work entries on this cpu. Requires to be ran from hardirq
+ * context with local IRQs disabled.
+ */
+void irq_work_run(void)
+{
+       struct irq_work *list, **head;
+
+       head = &__get_cpu_var(irq_work_list);
+       if (*head == NULL)
+               return;
+
+       BUG_ON(!in_irq());
+       BUG_ON(!irqs_disabled());
+
+       list = xchg(head, NULL);
+       while (list != NULL) {
+               struct irq_work *entry = list;
+
+               list = irq_work_next(list);
+
+               /*
+                * Clear the PENDING bit, after this point the @entry
+                * can be re-used.
+                */
+               entry->next = next_flags(NULL, IRQ_WORK_BUSY);
+               entry->func(entry);
+               /*
+                * Clear the BUSY bit and return to the free state if
+                * no-one else claimed it meanwhile.
+                */
+               cmpxchg(&entry->next, next_flags(NULL, IRQ_WORK_BUSY), NULL);
+       }
+}
+EXPORT_SYMBOL_GPL(irq_work_run);
+
+/*
+ * Synchronize against the irq_work @entry, ensures the entry is not
+ * currently in use.
+ */
+void irq_work_sync(struct irq_work *entry)
+{
+       WARN_ON_ONCE(irqs_disabled());
+
+       while (irq_work_is_set(entry, IRQ_WORK_BUSY))
+               cpu_relax();
+}
+EXPORT_SYMBOL_GPL(irq_work_sync);
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
new file mode 100644 (file)
index 0000000..7be868b
--- /dev/null
@@ -0,0 +1,429 @@
+/*
+ * jump label support
+ *
+ * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
+ *
+ */
+#include <linux/jump_label.h>
+#include <linux/memory.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/jhash.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/err.h>
+
+#ifdef HAVE_JUMP_LABEL
+
+#define JUMP_LABEL_HASH_BITS 6
+#define JUMP_LABEL_TABLE_SIZE (1 << JUMP_LABEL_HASH_BITS)
+static struct hlist_head jump_label_table[JUMP_LABEL_TABLE_SIZE];
+
+/* mutex to protect coming/going of the the jump_label table */
+static DEFINE_MUTEX(jump_label_mutex);
+
+struct jump_label_entry {
+       struct hlist_node hlist;
+       struct jump_entry *table;
+       int nr_entries;
+       /* hang modules off here */
+       struct hlist_head modules;
+       unsigned long key;
+};
+
+struct jump_label_module_entry {
+       struct hlist_node hlist;
+       struct jump_entry *table;
+       int nr_entries;
+       struct module *mod;
+};
+
+static int jump_label_cmp(const void *a, const void *b)
+{
+       const struct jump_entry *jea = a;
+       const struct jump_entry *jeb = b;
+
+       if (jea->key < jeb->key)
+               return -1;
+
+       if (jea->key > jeb->key)
+               return 1;
+
+       return 0;
+}
+
+static void
+sort_jump_label_entries(struct jump_entry *start, struct jump_entry *stop)
+{
+       unsigned long size;
+
+       size = (((unsigned long)stop - (unsigned long)start)
+                                       / sizeof(struct jump_entry));
+       sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
+}
+
+static struct jump_label_entry *get_jump_label_entry(jump_label_t key)
+{
+       struct hlist_head *head;
+       struct hlist_node *node;
+       struct jump_label_entry *e;
+       u32 hash = jhash((void *)&key, sizeof(jump_label_t), 0);
+
+       head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)];
+       hlist_for_each_entry(e, node, head, hlist) {
+               if (key == e->key)
+                       return e;
+       }
+       return NULL;
+}
+
+static struct jump_label_entry *
+add_jump_label_entry(jump_label_t key, int nr_entries, struct jump_entry *table)
+{
+       struct hlist_head *head;
+       struct jump_label_entry *e;
+       u32 hash;
+
+       e = get_jump_label_entry(key);
+       if (e)
+               return ERR_PTR(-EEXIST);
+
+       e = kmalloc(sizeof(struct jump_label_entry), GFP_KERNEL);
+       if (!e)
+               return ERR_PTR(-ENOMEM);
+
+       hash = jhash((void *)&key, sizeof(jump_label_t), 0);
+       head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)];
+       e->key = key;
+       e->table = table;
+       e->nr_entries = nr_entries;
+       INIT_HLIST_HEAD(&(e->modules));
+       hlist_add_head(&e->hlist, head);
+       return e;
+}
+
+static int
+build_jump_label_hashtable(struct jump_entry *start, struct jump_entry *stop)
+{
+       struct jump_entry *iter, *iter_begin;
+       struct jump_label_entry *entry;
+       int count;
+
+       sort_jump_label_entries(start, stop);
+       iter = start;
+       while (iter < stop) {
+               entry = get_jump_label_entry(iter->key);
+               if (!entry) {
+                       iter_begin = iter;
+                       count = 0;
+                       while ((iter < stop) &&
+                               (iter->key == iter_begin->key)) {
+                               iter++;
+                               count++;
+                       }
+                       entry = add_jump_label_entry(iter_begin->key,
+                                                       count, iter_begin);
+                       if (IS_ERR(entry))
+                               return PTR_ERR(entry);
+                } else {
+                       WARN_ONCE(1, KERN_ERR "build_jump_hashtable: unexpected entry!\n");
+                       return -1;
+               }
+       }
+       return 0;
+}
+
+/***
+ * jump_label_update - update jump label text
+ * @key -  key value associated with a a jump label
+ * @type - enum set to JUMP_LABEL_ENABLE or JUMP_LABEL_DISABLE
+ *
+ * Will enable/disable the jump for jump label @key, depending on the
+ * value of @type.
+ *
+ */
+
+void jump_label_update(unsigned long key, enum jump_label_type type)
+{
+       struct jump_entry *iter;
+       struct jump_label_entry *entry;
+       struct hlist_node *module_node;
+       struct jump_label_module_entry *e_module;
+       int count;
+
+       mutex_lock(&jump_label_mutex);
+       entry = get_jump_label_entry((jump_label_t)key);
+       if (entry) {
+               count = entry->nr_entries;
+               iter = entry->table;
+               while (count--) {
+                       if (kernel_text_address(iter->code))
+                               arch_jump_label_transform(iter, type);
+                       iter++;
+               }
+               /* eanble/disable jump labels in modules */
+               hlist_for_each_entry(e_module, module_node, &(entry->modules),
+                                                       hlist) {
+                       count = e_module->nr_entries;
+                       iter = e_module->table;
+                       while (count--) {
+                               if (kernel_text_address(iter->code))
+                                       arch_jump_label_transform(iter, type);
+                               iter++;
+                       }
+               }
+       }
+       mutex_unlock(&jump_label_mutex);
+}
+
+static int addr_conflict(struct jump_entry *entry, void *start, void *end)
+{
+       if (entry->code <= (unsigned long)end &&
+               entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
+               return 1;
+
+       return 0;
+}
+
+#ifdef CONFIG_MODULES
+
+static int module_conflict(void *start, void *end)
+{
+       struct hlist_head *head;
+       struct hlist_node *node, *node_next, *module_node, *module_node_next;
+       struct jump_label_entry *e;
+       struct jump_label_module_entry *e_module;
+       struct jump_entry *iter;
+       int i, count;
+       int conflict = 0;
+
+       for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) {
+               head = &jump_label_table[i];
+               hlist_for_each_entry_safe(e, node, node_next, head, hlist) {
+                       hlist_for_each_entry_safe(e_module, module_node,
+                                                       module_node_next,
+                                                       &(e->modules), hlist) {
+                               count = e_module->nr_entries;
+                               iter = e_module->table;
+                               while (count--) {
+                                       if (addr_conflict(iter, start, end)) {
+                                               conflict = 1;
+                                               goto out;
+                                       }
+                                       iter++;
+                               }
+                       }
+               }
+       }
+out:
+       return conflict;
+}
+
+#endif
+
+/***
+ * jump_label_text_reserved - check if addr range is reserved
+ * @start: start text addr
+ * @end: end text addr
+ *
+ * checks if the text addr located between @start and @end
+ * overlaps with any of the jump label patch addresses. Code
+ * that wants to modify kernel text should first verify that
+ * it does not overlap with any of the jump label addresses.
+ *
+ * returns 1 if there is an overlap, 0 otherwise
+ */
+int jump_label_text_reserved(void *start, void *end)
+{
+       struct jump_entry *iter;
+       struct jump_entry *iter_start = __start___jump_table;
+       struct jump_entry *iter_stop = __start___jump_table;
+       int conflict = 0;
+
+       mutex_lock(&jump_label_mutex);
+       iter = iter_start;
+       while (iter < iter_stop) {
+               if (addr_conflict(iter, start, end)) {
+                       conflict = 1;
+                       goto out;
+               }
+               iter++;
+       }
+
+       /* now check modules */
+#ifdef CONFIG_MODULES
+       conflict = module_conflict(start, end);
+#endif
+out:
+       mutex_unlock(&jump_label_mutex);
+       return conflict;
+}
+
+static __init int init_jump_label(void)
+{
+       int ret;
+       struct jump_entry *iter_start = __start___jump_table;
+       struct jump_entry *iter_stop = __stop___jump_table;
+       struct jump_entry *iter;
+
+       mutex_lock(&jump_label_mutex);
+       ret = build_jump_label_hashtable(__start___jump_table,
+                                        __stop___jump_table);
+       iter = iter_start;
+       while (iter < iter_stop) {
+               arch_jump_label_text_poke_early(iter->code);
+               iter++;
+       }
+       mutex_unlock(&jump_label_mutex);
+       return ret;
+}
+early_initcall(init_jump_label);
+
+#ifdef CONFIG_MODULES
+
+static struct jump_label_module_entry *
+add_jump_label_module_entry(struct jump_label_entry *entry,
+                           struct jump_entry *iter_begin,
+                           int count, struct module *mod)
+{
+       struct jump_label_module_entry *e;
+
+       e = kmalloc(sizeof(struct jump_label_module_entry), GFP_KERNEL);
+       if (!e)
+               return ERR_PTR(-ENOMEM);
+       e->mod = mod;
+       e->nr_entries = count;
+       e->table = iter_begin;
+       hlist_add_head(&e->hlist, &entry->modules);
+       return e;
+}
+
+static int add_jump_label_module(struct module *mod)
+{
+       struct jump_entry *iter, *iter_begin;
+       struct jump_label_entry *entry;
+       struct jump_label_module_entry *module_entry;
+       int count;
+
+       /* if the module doesn't have jump label entries, just return */
+       if (!mod->num_jump_entries)
+               return 0;
+
+       sort_jump_label_entries(mod->jump_entries,
+                               mod->jump_entries + mod->num_jump_entries);
+       iter = mod->jump_entries;
+       while (iter < mod->jump_entries + mod->num_jump_entries) {
+               entry = get_jump_label_entry(iter->key);
+               iter_begin = iter;
+               count = 0;
+               while ((iter < mod->jump_entries + mod->num_jump_entries) &&
+                       (iter->key == iter_begin->key)) {
+                               iter++;
+                               count++;
+               }
+               if (!entry) {
+                       entry = add_jump_label_entry(iter_begin->key, 0, NULL);
+                       if (IS_ERR(entry))
+                               return PTR_ERR(entry);
+               }
+               module_entry = add_jump_label_module_entry(entry, iter_begin,
+                                                          count, mod);
+               if (IS_ERR(module_entry))
+                       return PTR_ERR(module_entry);
+       }
+       return 0;
+}
+
+static void remove_jump_label_module(struct module *mod)
+{
+       struct hlist_head *head;
+       struct hlist_node *node, *node_next, *module_node, *module_node_next;
+       struct jump_label_entry *e;
+       struct jump_label_module_entry *e_module;
+       int i;
+
+       /* if the module doesn't have jump label entries, just return */
+       if (!mod->num_jump_entries)
+               return;
+
+       for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) {
+               head = &jump_label_table[i];
+               hlist_for_each_entry_safe(e, node, node_next, head, hlist) {
+                       hlist_for_each_entry_safe(e_module, module_node,
+                                                 module_node_next,
+                                                 &(e->modules), hlist) {
+                               if (e_module->mod == mod) {
+                                       hlist_del(&e_module->hlist);
+                                       kfree(e_module);
+                               }
+                       }
+                       if (hlist_empty(&e->modules) && (e->nr_entries == 0)) {
+                               hlist_del(&e->hlist);
+                               kfree(e);
+                       }
+               }
+       }
+}
+
+static int
+jump_label_module_notify(struct notifier_block *self, unsigned long val,
+                        void *data)
+{
+       struct module *mod = data;
+       int ret = 0;
+
+       switch (val) {
+       case MODULE_STATE_COMING:
+               mutex_lock(&jump_label_mutex);
+               ret = add_jump_label_module(mod);
+               if (ret)
+                       remove_jump_label_module(mod);
+               mutex_unlock(&jump_label_mutex);
+               break;
+       case MODULE_STATE_GOING:
+               mutex_lock(&jump_label_mutex);
+               remove_jump_label_module(mod);
+               mutex_unlock(&jump_label_mutex);
+               break;
+       }
+       return ret;
+}
+
+/***
+ * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
+ * @mod: module to patch
+ *
+ * Allow for run-time selection of the optimal nops. Before the module
+ * loads patch these with arch_get_jump_label_nop(), which is specified by
+ * the arch specific jump label code.
+ */
+void jump_label_apply_nops(struct module *mod)
+{
+       struct jump_entry *iter;
+
+       /* if the module doesn't have jump label entries, just return */
+       if (!mod->num_jump_entries)
+               return;
+
+       iter = mod->jump_entries;
+       while (iter < mod->jump_entries + mod->num_jump_entries) {
+               arch_jump_label_text_poke_early(iter->code);
+               iter++;
+       }
+}
+
+struct notifier_block jump_label_module_nb = {
+       .notifier_call = jump_label_module_notify,
+       .priority = 0,
+};
+
+static __init int init_jump_label_module(void)
+{
+       return register_module_notifier(&jump_label_module_nb);
+}
+early_initcall(init_jump_label_module);
+
+#endif /* CONFIG_MODULES */
+
+#endif
index 6b5580c57644dc1804b02fd85648e7100d5d75dd..01a0700e873f53ca60084da3c0c1142bebf49b16 100644 (file)
@@ -365,8 +365,6 @@ static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl,
        n = setup_sgl_buf(sgl, fifo->data + off, nents, l);
        n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l);
 
-       if (n)
-               sg_mark_end(sgl + n - 1);
        return n;
 }
 
index 282035f3ae964e1e288f352c370be8edd11d3078..ec4210c6501e1b549d18c2a89fa3cbe8725da35b 100644 (file)
@@ -47,6 +47,7 @@
 #include <linux/memory.h>
 #include <linux/ftrace.h>
 #include <linux/cpu.h>
+#include <linux/jump_label.h>
 
 #include <asm-generic/sections.h>
 #include <asm/cacheflush.h>
@@ -399,7 +400,7 @@ static inline int kprobe_optready(struct kprobe *p)
  * Return an optimized kprobe whose optimizing code replaces
  * instructions including addr (exclude breakpoint).
  */
-struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
+static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
 {
        int i;
        struct kprobe *p = NULL;
@@ -831,6 +832,7 @@ void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
 
 void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
                         struct hlist_head **head, unsigned long *flags)
+__acquires(hlist_lock)
 {
        unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
        spinlock_t *hlist_lock;
@@ -842,6 +844,7 @@ void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
 
 static void __kprobes kretprobe_table_lock(unsigned long hash,
        unsigned long *flags)
+__acquires(hlist_lock)
 {
        spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
        spin_lock_irqsave(hlist_lock, *flags);
@@ -849,6 +852,7 @@ static void __kprobes kretprobe_table_lock(unsigned long hash,
 
 void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
        unsigned long *flags)
+__releases(hlist_lock)
 {
        unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
        spinlock_t *hlist_lock;
@@ -857,7 +861,9 @@ void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
        spin_unlock_irqrestore(hlist_lock, *flags);
 }
 
-void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
+static void __kprobes kretprobe_table_unlock(unsigned long hash,
+       unsigned long *flags)
+__releases(hlist_lock)
 {
        spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
        spin_unlock_irqrestore(hlist_lock, *flags);
@@ -1141,7 +1147,8 @@ int __kprobes register_kprobe(struct kprobe *p)
        preempt_disable();
        if (!kernel_text_address((unsigned long) p->addr) ||
            in_kprobes_functions((unsigned long) p->addr) ||
-           ftrace_text_reserved(p->addr, p->addr)) {
+           ftrace_text_reserved(p->addr, p->addr) ||
+           jump_label_text_reserved(p->addr, p->addr)) {
                preempt_enable();
                return -EINVAL;
        }
@@ -1339,18 +1346,19 @@ int __kprobes register_jprobes(struct jprobe **jps, int num)
        if (num <= 0)
                return -EINVAL;
        for (i = 0; i < num; i++) {
-               unsigned long addr;
+               unsigned long addr, offset;
                jp = jps[i];
                addr = arch_deref_entry_point(jp->entry);
 
-               if (!kernel_text_address(addr))
-                       ret = -EINVAL;
-               else {
-                       /* Todo: Verify probepoint is a function entry point */
+               /* Verify probepoint is a function entry point */
+               if (kallsyms_lookup_size_offset(addr, NULL, &offset) &&
+                   offset == 0) {
                        jp->kp.pre_handler = setjmp_pre_handler;
                        jp->kp.break_handler = longjmp_break_handler;
                        ret = register_kprobe(&jp->kp);
-               }
+               } else
+                       ret = -EINVAL;
+
                if (ret < 0) {
                        if (i > 0)
                                unregister_jprobes(jps, i);
index f2852a5102327f74c39531a517e488c0d1e673e5..42ba65dff7d99e4eeadfe6175d70c2f9665ad431 100644 (file)
@@ -639,6 +639,16 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
        }
 #endif
 
+       if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
+               debug_locks_off();
+               printk(KERN_ERR
+                       "BUG: looking up invalid subclass: %u\n", subclass);
+               printk(KERN_ERR
+                       "turning off the locking correctness validator.\n");
+               dump_stack();
+               return NULL;
+       }
+
        /*
         * Static locks do not have their class-keys yet - for them the key
         * is the lock object itself:
@@ -774,7 +784,9 @@ out_unlock_set:
        raw_local_irq_restore(flags);
 
        if (!subclass || force)
-               lock->class_cache = class;
+               lock->class_cache[0] = class;
+       else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
+               lock->class_cache[subclass] = class;
 
        if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
                return NULL;
@@ -2679,7 +2691,11 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
 void lockdep_init_map(struct lockdep_map *lock, const char *name,
                      struct lock_class_key *key, int subclass)
 {
-       lock->class_cache = NULL;
+       int i;
+
+       for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
+               lock->class_cache[i] = NULL;
+
 #ifdef CONFIG_LOCK_STAT
        lock->cpu = raw_smp_processor_id();
 #endif
@@ -2739,21 +2755,13 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
                return 0;
 
-       if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
-               debug_locks_off();
-               printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
-               printk("turning off the locking correctness validator.\n");
-               dump_stack();
-               return 0;
-       }
-
        if (lock->key == &__lockdep_no_validate__)
                check = 1;
 
-       if (!subclass)
-               class = lock->class_cache;
+       if (subclass < NR_LOCKDEP_CACHING_CLASSES)
+               class = lock->class_cache[subclass];
        /*
-        * Not cached yet or subclass?
+        * Not cached?
         */
        if (unlikely(!class)) {
                class = register_lock_class(lock, subclass, 0);
@@ -2918,7 +2926,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
                return 1;
 
        if (hlock->references) {
-               struct lock_class *class = lock->class_cache;
+               struct lock_class *class = lock->class_cache[0];
 
                if (!class)
                        class = look_up_lock_class(lock, 0);
@@ -3559,7 +3567,12 @@ void lockdep_reset_lock(struct lockdep_map *lock)
                if (list_empty(head))
                        continue;
                list_for_each_entry_safe(class, next, head, hash_entry) {
-                       if (unlikely(class == lock->class_cache)) {
+                       int match = 0;
+
+                       for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
+                               match |= class == lock->class_cache[j];
+
+                       if (unlikely(match)) {
                                if (debug_locks_off_graph_unlock())
                                        WARN_ON(1);
                                goto out_restore;
@@ -3775,7 +3788,7 @@ EXPORT_SYMBOL_GPL(debug_show_all_locks);
  * Careful: only use this function if you are sure that
  * the task cannot run in parallel!
  */
-void __debug_show_held_locks(struct task_struct *task)
+void debug_show_held_locks(struct task_struct *task)
 {
        if (unlikely(!debug_locks)) {
                printk("INFO: lockdep is turned off.\n");
@@ -3783,12 +3796,6 @@ void __debug_show_held_locks(struct task_struct *task)
        }
        lockdep_print_held_locks(task);
 }
-EXPORT_SYMBOL_GPL(__debug_show_held_locks);
-
-void debug_show_held_locks(struct task_struct *task)
-{
-               __debug_show_held_locks(task);
-}
 EXPORT_SYMBOL_GPL(debug_show_held_locks);
 
 void lockdep_sys_exit(void)
index d0b5f8db11b4a4183c229e2a44f433a99f58090a..2df46301a7a407dcde3435542e38f6944358d7c1 100644 (file)
@@ -55,6 +55,7 @@
 #include <linux/async.h>
 #include <linux/percpu.h>
 #include <linux/kmemleak.h>
+#include <linux/jump_label.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/module.h>
@@ -1537,6 +1538,7 @@ static int __unlink_module(void *_mod)
 {
        struct module *mod = _mod;
        list_del(&mod->list);
+       module_bug_cleanup(mod);
        return 0;
 }
 
@@ -2308,6 +2310,11 @@ static void find_module_sections(struct module *mod, struct load_info *info)
                                        sizeof(*mod->tracepoints),
                                        &mod->num_tracepoints);
 #endif
+#ifdef HAVE_JUMP_LABEL
+       mod->jump_entries = section_objs(info, "__jump_table",
+                                       sizeof(*mod->jump_entries),
+                                       &mod->num_jump_entries);
+#endif
 #ifdef CONFIG_EVENT_TRACING
        mod->trace_events = section_objs(info, "_ftrace_events",
                                         sizeof(*mod->trace_events),
@@ -2625,6 +2632,7 @@ static struct module *load_module(void __user *umod,
        if (err < 0)
                goto ddebug;
 
+       module_bug_finalize(info.hdr, info.sechdrs, mod);
        list_add_rcu(&mod->list, &modules);
        mutex_unlock(&module_mutex);
 
@@ -2650,6 +2658,8 @@ static struct module *load_module(void __user *umod,
        mutex_lock(&module_mutex);
        /* Unlink carefully: kallsyms could be walking list. */
        list_del_rcu(&mod->list);
+       module_bug_cleanup(mod);
+
  ddebug:
        if (!mod->taints)
                dynamic_debug_remove(info.debug);
index 4c0b7b3e6d2e9a483c6cb4cc384e979911ed03bb..200407c1502f509ee3f9d8a665bc4d3b78a27f74 100644 (file)
 # include <asm/mutex.h>
 #endif
 
-/***
- * mutex_init - initialize the mutex
- * @lock: the mutex to be initialized
- * @key: the lock_class_key for the class; used by mutex lock debugging
- *
- * Initialize the mutex to unlocked state.
- *
- * It is not allowed to initialize an already locked mutex.
- */
 void
 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
 {
@@ -68,7 +59,7 @@ EXPORT_SYMBOL(__mutex_init);
 static __used noinline void __sched
 __mutex_lock_slowpath(atomic_t *lock_count);
 
-/***
+/**
  * mutex_lock - acquire the mutex
  * @lock: the mutex to be acquired
  *
@@ -105,7 +96,7 @@ EXPORT_SYMBOL(mutex_lock);
 
 static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
 
-/***
+/**
  * mutex_unlock - release the mutex
  * @lock: the mutex to be released
  *
@@ -364,8 +355,8 @@ __mutex_lock_killable_slowpath(atomic_t *lock_count);
 static noinline int __sched
 __mutex_lock_interruptible_slowpath(atomic_t *lock_count);
 
-/***
- * mutex_lock_interruptible - acquire the mutex, interruptable
+/**
+ * mutex_lock_interruptible - acquire the mutex, interruptible
  * @lock: the mutex to be acquired
  *
  * Lock the mutex like mutex_lock(), and return 0 if the mutex has
@@ -456,15 +447,15 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
        return prev == 1;
 }
 
-/***
- * mutex_trylock - try acquire the mutex, without waiting
+/**
+ * mutex_trylock - try to acquire the mutex, without waiting
  * @lock: the mutex to be acquired
  *
  * Try to acquire the mutex atomically. Returns 1 if the mutex
  * has been acquired successfully, and 0 on contention.
  *
  * NOTE: this function follows the spin_trylock() convention, so
- * it is negated to the down_trylock() return values! Be careful
+ * it is negated from the down_trylock() return values! Be careful
  * about this when converting semaphore users to mutexes.
  *
  * This function must not be used in interrupt context. The
index 403d1804b198140e4f1355c70c0b25e6efa9e5d8..f309e8014c7853105d1a38bc662f10164dc4d3d1 100644 (file)
 #include <linux/kernel_stat.h>
 #include <linux/perf_event.h>
 #include <linux/ftrace_event.h>
-#include <linux/hw_breakpoint.h>
 
 #include <asm/irq_regs.h>
 
-/*
- * Each CPU has a list of per CPU events:
- */
-static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
-
-int perf_max_events __read_mostly = 1;
-static int perf_reserved_percpu __read_mostly;
-static int perf_overcommit __read_mostly = 1;
-
-static atomic_t nr_events __read_mostly;
+atomic_t perf_task_events __read_mostly;
 static atomic_t nr_mmap_events __read_mostly;
 static atomic_t nr_comm_events __read_mostly;
 static atomic_t nr_task_events __read_mostly;
 
+static LIST_HEAD(pmus);
+static DEFINE_MUTEX(pmus_lock);
+static struct srcu_struct pmus_srcu;
+
 /*
  * perf event paranoia level:
  *  -1 - not paranoid at all
@@ -67,36 +61,43 @@ int sysctl_perf_event_sample_rate __read_mostly = 100000;
 
 static atomic64_t perf_event_id;
 
-/*
- * Lock for (sysadmin-configurable) event reservations:
- */
-static DEFINE_SPINLOCK(perf_resource_lock);
+void __weak perf_event_print_debug(void)       { }
 
-/*
- * Architecture provided APIs - weak aliases:
- */
-extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
+extern __weak const char *perf_pmu_name(void)
 {
-       return NULL;
+       return "pmu";
 }
 
-void __weak hw_perf_disable(void)              { barrier(); }
-void __weak hw_perf_enable(void)               { barrier(); }
-
-void __weak perf_event_print_debug(void)       { }
-
-static DEFINE_PER_CPU(int, perf_disable_count);
+void perf_pmu_disable(struct pmu *pmu)
+{
+       int *count = this_cpu_ptr(pmu->pmu_disable_count);
+       if (!(*count)++)
+               pmu->pmu_disable(pmu);
+}
 
-void perf_disable(void)
+void perf_pmu_enable(struct pmu *pmu)
 {
-       if (!__get_cpu_var(perf_disable_count)++)
-               hw_perf_disable();
+       int *count = this_cpu_ptr(pmu->pmu_disable_count);
+       if (!--(*count))
+               pmu->pmu_enable(pmu);
 }
 
-void perf_enable(void)
+static DEFINE_PER_CPU(struct list_head, rotation_list);
+
+/*
+ * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
+ * because they're strictly cpu affine and rotate_start is called with IRQs
+ * disabled, while rotate_context is called from IRQ context.
+ */
+static void perf_pmu_rotate_start(struct pmu *pmu)
 {
-       if (!--__get_cpu_var(perf_disable_count))
-               hw_perf_enable();
+       struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
+       struct list_head *head = &__get_cpu_var(rotation_list);
+
+       WARN_ON(!irqs_disabled());
+
+       if (list_empty(&cpuctx->rotation_list))
+               list_add(&cpuctx->rotation_list, head);
 }
 
 static void get_ctx(struct perf_event_context *ctx)
@@ -151,13 +152,13 @@ static u64 primary_event_id(struct perf_event *event)
  * the context could get moved to another task.
  */
 static struct perf_event_context *
-perf_lock_task_context(struct task_struct *task, unsigned long *flags)
+perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
 {
        struct perf_event_context *ctx;
 
        rcu_read_lock();
- retry:
-       ctx = rcu_dereference(task->perf_event_ctxp);
+retry:
+       ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
        if (ctx) {
                /*
                 * If this context is a clone of another, it might
@@ -170,7 +171,7 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags)
                 * can't get swapped on us any more.
                 */
                raw_spin_lock_irqsave(&ctx->lock, *flags);
-               if (ctx != rcu_dereference(task->perf_event_ctxp)) {
+               if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
                        raw_spin_unlock_irqrestore(&ctx->lock, *flags);
                        goto retry;
                }
@@ -189,12 +190,13 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags)
  * can't get swapped to another task.  This also increments its
  * reference count so that the context can't get freed.
  */
-static struct perf_event_context *perf_pin_task_context(struct task_struct *task)
+static struct perf_event_context *
+perf_pin_task_context(struct task_struct *task, int ctxn)
 {
        struct perf_event_context *ctx;
        unsigned long flags;
 
-       ctx = perf_lock_task_context(task, &flags);
+       ctx = perf_lock_task_context(task, ctxn, &flags);
        if (ctx) {
                ++ctx->pin_count;
                raw_spin_unlock_irqrestore(&ctx->lock, flags);
@@ -302,6 +304,8 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
        }
 
        list_add_rcu(&event->event_entry, &ctx->event_list);
+       if (!ctx->nr_events)
+               perf_pmu_rotate_start(ctx->pmu);
        ctx->nr_events++;
        if (event->attr.inherit_stat)
                ctx->nr_stat++;
@@ -311,7 +315,12 @@ static void perf_group_attach(struct perf_event *event)
 {
        struct perf_event *group_leader = event->group_leader;
 
-       WARN_ON_ONCE(event->attach_state & PERF_ATTACH_GROUP);
+       /*
+        * We can have double attach due to group movement in perf_event_open.
+        */
+       if (event->attach_state & PERF_ATTACH_GROUP)
+               return;
+
        event->attach_state |= PERF_ATTACH_GROUP;
 
        if (group_leader == event)
@@ -402,21 +411,40 @@ static void perf_group_detach(struct perf_event *event)
        }
 }
 
-static void
-event_sched_out(struct perf_event *event,
+static inline int
+event_filter_match(struct perf_event *event)
+{
+       return event->cpu == -1 || event->cpu == smp_processor_id();
+}
+
+static int
+__event_sched_out(struct perf_event *event,
                  struct perf_cpu_context *cpuctx,
                  struct perf_event_context *ctx)
 {
+       u64 delta;
+       /*
+        * An event which could not be activated because of
+        * filter mismatch still needs to have its timings
+        * maintained, otherwise bogus information is return
+        * via read() for time_enabled, time_running:
+        */
+       if (event->state == PERF_EVENT_STATE_INACTIVE
+           && !event_filter_match(event)) {
+               delta = ctx->time - event->tstamp_stopped;
+               event->tstamp_running += delta;
+               event->tstamp_stopped = ctx->time;
+       }
+
        if (event->state != PERF_EVENT_STATE_ACTIVE)
-               return;
+               return 0;
 
        event->state = PERF_EVENT_STATE_INACTIVE;
        if (event->pending_disable) {
                event->pending_disable = 0;
                event->state = PERF_EVENT_STATE_OFF;
        }
-       event->tstamp_stopped = ctx->time;
-       event->pmu->disable(event);
+       event->pmu->del(event, 0);
        event->oncpu = -1;
 
        if (!is_software_event(event))
@@ -424,6 +452,19 @@ event_sched_out(struct perf_event *event,
        ctx->nr_active--;
        if (event->attr.exclusive || !cpuctx->active_oncpu)
                cpuctx->exclusive = 0;
+       return 1;
+}
+
+static void
+event_sched_out(struct perf_event *event,
+                 struct perf_cpu_context *cpuctx,
+                 struct perf_event_context *ctx)
+{
+       int ret;
+
+       ret = __event_sched_out(event, cpuctx, ctx);
+       if (ret)
+               event->tstamp_stopped = ctx->time;
 }
 
 static void
@@ -432,9 +473,7 @@ group_sched_out(struct perf_event *group_event,
                struct perf_event_context *ctx)
 {
        struct perf_event *event;
-
-       if (group_event->state != PERF_EVENT_STATE_ACTIVE)
-               return;
+       int state = group_event->state;
 
        event_sched_out(group_event, cpuctx, ctx);
 
@@ -444,10 +483,16 @@ group_sched_out(struct perf_event *group_event,
        list_for_each_entry(event, &group_event->sibling_list, group_entry)
                event_sched_out(event, cpuctx, ctx);
 
-       if (group_event->attr.exclusive)
+       if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
                cpuctx->exclusive = 0;
 }
 
+static inline struct perf_cpu_context *
+__get_cpu_context(struct perf_event_context *ctx)
+{
+       return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
+}
+
 /*
  * Cross CPU call to remove a performance event
  *
@@ -456,9 +501,9 @@ group_sched_out(struct perf_event *group_event,
  */
 static void __perf_event_remove_from_context(void *info)
 {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
        struct perf_event *event = info;
        struct perf_event_context *ctx = event->ctx;
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
 
        /*
         * If this is a task context, we need to check whether it is
@@ -469,27 +514,11 @@ static void __perf_event_remove_from_context(void *info)
                return;
 
        raw_spin_lock(&ctx->lock);
-       /*
-        * Protect the list operation against NMI by disabling the
-        * events on a global level.
-        */
-       perf_disable();
 
        event_sched_out(event, cpuctx, ctx);
 
        list_del_event(event, ctx);
 
-       if (!ctx->task) {
-               /*
-                * Allow more per task events with respect to the
-                * reservation:
-                */
-               cpuctx->max_pertask =
-                       min(perf_max_events - ctx->nr_events,
-                           perf_max_events - perf_reserved_percpu);
-       }
-
-       perf_enable();
        raw_spin_unlock(&ctx->lock);
 }
 
@@ -554,8 +583,8 @@ retry:
 static void __perf_event_disable(void *info)
 {
        struct perf_event *event = info;
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
        struct perf_event_context *ctx = event->ctx;
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
 
        /*
         * If this is a per-task event, need to check whether this
@@ -610,7 +639,7 @@ void perf_event_disable(struct perf_event *event)
                return;
        }
 
- retry:
+retry:
        task_oncpu_function_call(task, __perf_event_disable, event);
 
        raw_spin_lock_irq(&ctx->lock);
@@ -635,7 +664,7 @@ void perf_event_disable(struct perf_event *event)
 }
 
 static int
-event_sched_in(struct perf_event *event,
+__event_sched_in(struct perf_event *event,
                 struct perf_cpu_context *cpuctx,
                 struct perf_event_context *ctx)
 {
@@ -649,14 +678,12 @@ event_sched_in(struct perf_event *event,
         */
        smp_wmb();
 
-       if (event->pmu->enable(event)) {
+       if (event->pmu->add(event, PERF_EF_START)) {
                event->state = PERF_EVENT_STATE_INACTIVE;
                event->oncpu = -1;
                return -EAGAIN;
        }
 
-       event->tstamp_running += ctx->time - event->tstamp_stopped;
-
        if (!is_software_event(event))
                cpuctx->active_oncpu++;
        ctx->nr_active++;
@@ -667,28 +694,56 @@ event_sched_in(struct perf_event *event,
        return 0;
 }
 
+static inline int
+event_sched_in(struct perf_event *event,
+                struct perf_cpu_context *cpuctx,
+                struct perf_event_context *ctx)
+{
+       int ret = __event_sched_in(event, cpuctx, ctx);
+       if (ret)
+               return ret;
+       event->tstamp_running += ctx->time - event->tstamp_stopped;
+       return 0;
+}
+
+static void
+group_commit_event_sched_in(struct perf_event *group_event,
+              struct perf_cpu_context *cpuctx,
+              struct perf_event_context *ctx)
+{
+       struct perf_event *event;
+       u64 now = ctx->time;
+
+       group_event->tstamp_running += now - group_event->tstamp_stopped;
+       /*
+        * Schedule in siblings as one group (if any):
+        */
+       list_for_each_entry(event, &group_event->sibling_list, group_entry) {
+               event->tstamp_running += now - event->tstamp_stopped;
+       }
+}
+
 static int
 group_sched_in(struct perf_event *group_event,
               struct perf_cpu_context *cpuctx,
               struct perf_event_context *ctx)
 {
        struct perf_event *event, *partial_group = NULL;
-       const struct pmu *pmu = group_event->pmu;
-       bool txn = false;
+       struct pmu *pmu = group_event->pmu;
 
        if (group_event->state == PERF_EVENT_STATE_OFF)
                return 0;
 
-       /* Check if group transaction availabe */
-       if (pmu->start_txn)
-               txn = true;
-
-       if (txn)
-               pmu->start_txn(pmu);
+       pmu->start_txn(pmu);
 
-       if (event_sched_in(group_event, cpuctx, ctx)) {
-               if (txn)
-                       pmu->cancel_txn(pmu);
+       /*
+        * use __event_sched_in() to delay updating tstamp_running
+        * until the transaction is committed. In case of failure
+        * we will keep an unmodified tstamp_running which is a
+        * requirement to get correct timing information
+        */
+       if (__event_sched_in(group_event, cpuctx, ctx)) {
+               pmu->cancel_txn(pmu);
                return -EAGAIN;
        }
 
@@ -696,29 +751,33 @@ group_sched_in(struct perf_event *group_event,
         * Schedule in siblings as one group (if any):
         */
        list_for_each_entry(event, &group_event->sibling_list, group_entry) {
-               if (event_sched_in(event, cpuctx, ctx)) {
+               if (__event_sched_in(event, cpuctx, ctx)) {
                        partial_group = event;
                        goto group_error;
                }
        }
 
-       if (!txn || !pmu->commit_txn(pmu))
+       if (!pmu->commit_txn(pmu)) {
+               /* commit tstamp_running */
+               group_commit_event_sched_in(group_event, cpuctx, ctx);
                return 0;
-
+       }
 group_error:
        /*
         * Groups can be scheduled in as one unit only, so undo any
         * partial group before returning:
+        *
+        * use __event_sched_out() to avoid updating tstamp_stopped
+        * because the event never actually ran
         */
        list_for_each_entry(event, &group_event->sibling_list, group_entry) {
                if (event == partial_group)
                        break;
-               event_sched_out(event, cpuctx, ctx);
+               __event_sched_out(event, cpuctx, ctx);
        }
-       event_sched_out(group_event, cpuctx, ctx);
+       __event_sched_out(group_event, cpuctx, ctx);
 
-       if (txn)
-               pmu->cancel_txn(pmu);
+       pmu->cancel_txn(pmu);
 
        return -EAGAIN;
 }
@@ -771,10 +830,10 @@ static void add_event_to_ctx(struct perf_event *event,
  */
 static void __perf_install_in_context(void *info)
 {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
        struct perf_event *event = info;
        struct perf_event_context *ctx = event->ctx;
        struct perf_event *leader = event->group_leader;
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
        int err;
 
        /*
@@ -794,12 +853,6 @@ static void __perf_install_in_context(void *info)
        ctx->is_active = 1;
        update_context_time(ctx);
 
-       /*
-        * Protect the list operation against NMI by disabling the
-        * events on a global level. NOP for non NMI based events.
-        */
-       perf_disable();
-
        add_event_to_ctx(event, ctx);
 
        if (event->cpu != -1 && event->cpu != smp_processor_id())
@@ -837,12 +890,7 @@ static void __perf_install_in_context(void *info)
                }
        }
 
-       if (!err && !ctx->task && cpuctx->max_pertask)
-               cpuctx->max_pertask--;
-
- unlock:
-       perf_enable();
-
+unlock:
        raw_spin_unlock(&ctx->lock);
 }
 
@@ -865,6 +913,8 @@ perf_install_in_context(struct perf_event_context *ctx,
 {
        struct task_struct *task = ctx->task;
 
+       event->ctx = ctx;
+
        if (!task) {
                /*
                 * Per cpu events are installed via an smp call and
@@ -913,10 +963,12 @@ static void __perf_event_mark_enabled(struct perf_event *event,
 
        event->state = PERF_EVENT_STATE_INACTIVE;
        event->tstamp_enabled = ctx->time - event->total_time_enabled;
-       list_for_each_entry(sub, &event->sibling_list, group_entry)
-               if (sub->state >= PERF_EVENT_STATE_INACTIVE)
+       list_for_each_entry(sub, &event->sibling_list, group_entry) {
+               if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
                        sub->tstamp_enabled =
                                ctx->time - sub->total_time_enabled;
+               }
+       }
 }
 
 /*
@@ -925,9 +977,9 @@ static void __perf_event_mark_enabled(struct perf_event *event,
 static void __perf_event_enable(void *info)
 {
        struct perf_event *event = info;
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
        struct perf_event_context *ctx = event->ctx;
        struct perf_event *leader = event->group_leader;
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
        int err;
 
        /*
@@ -961,12 +1013,10 @@ static void __perf_event_enable(void *info)
        if (!group_can_go_on(event, cpuctx, 1)) {
                err = -EEXIST;
        } else {
-               perf_disable();
                if (event == leader)
                        err = group_sched_in(event, cpuctx, ctx);
                else
                        err = event_sched_in(event, cpuctx, ctx);
-               perf_enable();
        }
 
        if (err) {
@@ -982,7 +1032,7 @@ static void __perf_event_enable(void *info)
                }
        }
 
- unlock:
+unlock:
        raw_spin_unlock(&ctx->lock);
 }
 
@@ -1023,7 +1073,7 @@ void perf_event_enable(struct perf_event *event)
        if (event->state == PERF_EVENT_STATE_ERROR)
                event->state = PERF_EVENT_STATE_OFF;
 
- retry:
+retry:
        raw_spin_unlock_irq(&ctx->lock);
        task_oncpu_function_call(task, __perf_event_enable, event);
 
@@ -1043,7 +1093,7 @@ void perf_event_enable(struct perf_event *event)
        if (event->state == PERF_EVENT_STATE_OFF)
                __perf_event_mark_enabled(event, ctx);
 
- out:
+out:
        raw_spin_unlock_irq(&ctx->lock);
 }
 
@@ -1074,26 +1124,26 @@ static void ctx_sched_out(struct perf_event_context *ctx,
        struct perf_event *event;
 
        raw_spin_lock(&ctx->lock);
+       perf_pmu_disable(ctx->pmu);
        ctx->is_active = 0;
        if (likely(!ctx->nr_events))
                goto out;
        update_context_time(ctx);
 
-       perf_disable();
        if (!ctx->nr_active)
-               goto out_enable;
+               goto out;
 
-       if (event_type & EVENT_PINNED)
+       if (event_type & EVENT_PINNED) {
                list_for_each_entry(event, &ctx->pinned_groups, group_entry)
                        group_sched_out(event, cpuctx, ctx);
+       }
 
-       if (event_type & EVENT_FLEXIBLE)
+       if (event_type & EVENT_FLEXIBLE) {
                list_for_each_entry(event, &ctx->flexible_groups, group_entry)
                        group_sched_out(event, cpuctx, ctx);
-
- out_enable:
-       perf_enable();
- out:
+       }
+out:
+       perf_pmu_enable(ctx->pmu);
        raw_spin_unlock(&ctx->lock);
 }
 
@@ -1191,34 +1241,25 @@ static void perf_event_sync_stat(struct perf_event_context *ctx,
        }
 }
 
-/*
- * Called from scheduler to remove the events of the current task,
- * with interrupts disabled.
- *
- * We stop each event and update the event value in event->count.
- *
- * This does not protect us against NMI, but disable()
- * sets the disabled bit in the control field of event _before_
- * accessing the event control register. If a NMI hits, then it will
- * not restart the event.
- */
-void perf_event_task_sched_out(struct task_struct *task,
-                                struct task_struct *next)
+void perf_event_context_sched_out(struct task_struct *task, int ctxn,
+                                 struct task_struct *next)
 {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
-       struct perf_event_context *ctx = task->perf_event_ctxp;
+       struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
        struct perf_event_context *next_ctx;
        struct perf_event_context *parent;
+       struct perf_cpu_context *cpuctx;
        int do_switch = 1;
 
-       perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
+       if (likely(!ctx))
+               return;
 
-       if (likely(!ctx || !cpuctx->task_ctx))
+       cpuctx = __get_cpu_context(ctx);
+       if (!cpuctx->task_ctx)
                return;
 
        rcu_read_lock();
        parent = rcu_dereference(ctx->parent_ctx);
-       next_ctx = next->perf_event_ctxp;
+       next_ctx = next->perf_event_ctxp[ctxn];
        if (parent && next_ctx &&
            rcu_dereference(next_ctx->parent_ctx) == parent) {
                /*
@@ -1237,8 +1278,8 @@ void perf_event_task_sched_out(struct task_struct *task,
                         * XXX do we need a memory barrier of sorts
                         * wrt to rcu_dereference() of perf_event_ctxp
                         */
-                       task->perf_event_ctxp = next_ctx;
-                       next->perf_event_ctxp = ctx;
+                       task->perf_event_ctxp[ctxn] = next_ctx;
+                       next->perf_event_ctxp[ctxn] = ctx;
                        ctx->task = next;
                        next_ctx->task = task;
                        do_switch = 0;
@@ -1256,10 +1297,35 @@ void perf_event_task_sched_out(struct task_struct *task,
        }
 }
 
+#define for_each_task_context_nr(ctxn)                                 \
+       for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
+
+/*
+ * Called from scheduler to remove the events of the current task,
+ * with interrupts disabled.
+ *
+ * We stop each event and update the event value in event->count.
+ *
+ * This does not protect us against NMI, but disable()
+ * sets the disabled bit in the control field of event _before_
+ * accessing the event control register. If a NMI hits, then it will
+ * not restart the event.
+ */
+void __perf_event_task_sched_out(struct task_struct *task,
+                                struct task_struct *next)
+{
+       int ctxn;
+
+       perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
+
+       for_each_task_context_nr(ctxn)
+               perf_event_context_sched_out(task, ctxn, next);
+}
+
 static void task_ctx_sched_out(struct perf_event_context *ctx,
                               enum event_type_t event_type)
 {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
 
        if (!cpuctx->task_ctx)
                return;
@@ -1271,14 +1337,6 @@ static void task_ctx_sched_out(struct perf_event_context *ctx,
        cpuctx->task_ctx = NULL;
 }
 
-/*
- * Called with IRQs disabled
- */
-static void __perf_event_task_sched_out(struct perf_event_context *ctx)
-{
-       task_ctx_sched_out(ctx, EVENT_ALL);
-}
-
 /*
  * Called with IRQs disabled
  */
@@ -1332,9 +1390,10 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,
                if (event->cpu != -1 && event->cpu != smp_processor_id())
                        continue;
 
-               if (group_can_go_on(event, cpuctx, can_add_hw))
+               if (group_can_go_on(event, cpuctx, can_add_hw)) {
                        if (group_sched_in(event, cpuctx, ctx))
                                can_add_hw = 0;
+               }
        }
 }
 
@@ -1350,8 +1409,6 @@ ctx_sched_in(struct perf_event_context *ctx,
 
        ctx->timestamp = perf_clock();
 
-       perf_disable();
-
        /*
         * First go through the list and put on any pinned groups
         * in order to give them the best chance of going on.
@@ -1363,8 +1420,7 @@ ctx_sched_in(struct perf_event_context *ctx,
        if (event_type & EVENT_FLEXIBLE)
                ctx_flexible_sched_in(ctx, cpuctx);
 
-       perf_enable();
- out:
+out:
        raw_spin_unlock(&ctx->lock);
 }
 
@@ -1376,43 +1432,28 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
        ctx_sched_in(ctx, cpuctx, event_type);
 }
 
-static void task_ctx_sched_in(struct task_struct *task,
+static void task_ctx_sched_in(struct perf_event_context *ctx,
                              enum event_type_t event_type)
 {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
-       struct perf_event_context *ctx = task->perf_event_ctxp;
+       struct perf_cpu_context *cpuctx;
 
-       if (likely(!ctx))
-               return;
+               cpuctx = __get_cpu_context(ctx);
        if (cpuctx->task_ctx == ctx)
                return;
+
        ctx_sched_in(ctx, cpuctx, event_type);
        cpuctx->task_ctx = ctx;
 }
-/*
- * Called from scheduler to add the events of the current task
- * with interrupts disabled.
- *
- * We restore the event value and then enable it.
- *
- * This does not protect us against NMI, but enable()
- * sets the enabled bit in the control field of event _before_
- * accessing the event control register. If a NMI hits, then it will
- * keep the event running.
- */
-void perf_event_task_sched_in(struct task_struct *task)
-{
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
-       struct perf_event_context *ctx = task->perf_event_ctxp;
 
-       if (likely(!ctx))
-               return;
+void perf_event_context_sched_in(struct perf_event_context *ctx)
+{
+       struct perf_cpu_context *cpuctx;
 
+       cpuctx = __get_cpu_context(ctx);
        if (cpuctx->task_ctx == ctx)
                return;
 
-       perf_disable();
-
+       perf_pmu_disable(ctx->pmu);
        /*
         * We want to keep the following priority order:
         * cpu pinned (that don't need to move), task pinned,
@@ -1426,7 +1467,37 @@ void perf_event_task_sched_in(struct task_struct *task)
 
        cpuctx->task_ctx = ctx;
 
-       perf_enable();
+       /*
+        * Since these rotations are per-cpu, we need to ensure the
+        * cpu-context we got scheduled on is actually rotating.
+        */
+       perf_pmu_rotate_start(ctx->pmu);
+       perf_pmu_enable(ctx->pmu);
+}
+
+/*
+ * Called from scheduler to add the events of the current task
+ * with interrupts disabled.
+ *
+ * We restore the event value and then enable it.
+ *
+ * This does not protect us against NMI, but enable()
+ * sets the enabled bit in the control field of event _before_
+ * accessing the event control register. If a NMI hits, then it will
+ * keep the event running.
+ */
+void __perf_event_task_sched_in(struct task_struct *task)
+{
+       struct perf_event_context *ctx;
+       int ctxn;
+
+       for_each_task_context_nr(ctxn) {
+               ctx = task->perf_event_ctxp[ctxn];
+               if (likely(!ctx))
+                       continue;
+
+               perf_event_context_sched_in(ctx);
+       }
 }
 
 #define MAX_INTERRUPTS (~0ULL)
@@ -1506,22 +1577,6 @@ do {                                     \
        return div64_u64(dividend, divisor);
 }
 
-static void perf_event_stop(struct perf_event *event)
-{
-       if (!event->pmu->stop)
-               return event->pmu->disable(event);
-
-       return event->pmu->stop(event);
-}
-
-static int perf_event_start(struct perf_event *event)
-{
-       if (!event->pmu->start)
-               return event->pmu->enable(event);
-
-       return event->pmu->start(event);
-}
-
 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
 {
        struct hw_perf_event *hwc = &event->hw;
@@ -1541,15 +1596,13 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
        hwc->sample_period = sample_period;
 
        if (local64_read(&hwc->period_left) > 8*sample_period) {
-               perf_disable();
-               perf_event_stop(event);
+               event->pmu->stop(event, PERF_EF_UPDATE);
                local64_set(&hwc->period_left, 0);
-               perf_event_start(event);
-               perf_enable();
+               event->pmu->start(event, PERF_EF_RELOAD);
        }
 }
 
-static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
+static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
 {
        struct perf_event *event;
        struct hw_perf_event *hwc;
@@ -1574,23 +1627,19 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
                 */
                if (interrupts == MAX_INTERRUPTS) {
                        perf_log_throttle(event, 1);
-                       perf_disable();
-                       event->pmu->unthrottle(event);
-                       perf_enable();
+                       event->pmu->start(event, 0);
                }
 
                if (!event->attr.freq || !event->attr.sample_freq)
                        continue;
 
-               perf_disable();
                event->pmu->read(event);
                now = local64_read(&event->count);
                delta = now - hwc->freq_count_stamp;
                hwc->freq_count_stamp = now;
 
                if (delta > 0)
-                       perf_adjust_period(event, TICK_NSEC, delta);
-               perf_enable();
+                       perf_adjust_period(event, period, delta);
        }
        raw_spin_unlock(&ctx->lock);
 }
@@ -1608,32 +1657,38 @@ static void rotate_ctx(struct perf_event_context *ctx)
        raw_spin_unlock(&ctx->lock);
 }
 
-void perf_event_task_tick(struct task_struct *curr)
+/*
+ * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
+ * because they're strictly cpu affine and rotate_start is called with IRQs
+ * disabled, while rotate_context is called from IRQ context.
+ */
+static void perf_rotate_context(struct perf_cpu_context *cpuctx)
 {
-       struct perf_cpu_context *cpuctx;
-       struct perf_event_context *ctx;
-       int rotate = 0;
-
-       if (!atomic_read(&nr_events))
-               return;
+       u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC;
+       struct perf_event_context *ctx = NULL;
+       int rotate = 0, remove = 1;
 
-       cpuctx = &__get_cpu_var(perf_cpu_context);
-       if (cpuctx->ctx.nr_events &&
-           cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
-               rotate = 1;
+       if (cpuctx->ctx.nr_events) {
+               remove = 0;
+               if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
+                       rotate = 1;
+       }
 
-       ctx = curr->perf_event_ctxp;
-       if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active)
-               rotate = 1;
+       ctx = cpuctx->task_ctx;
+       if (ctx && ctx->nr_events) {
+               remove = 0;
+               if (ctx->nr_events != ctx->nr_active)
+                       rotate = 1;
+       }
 
-       perf_ctx_adjust_freq(&cpuctx->ctx);
+       perf_pmu_disable(cpuctx->ctx.pmu);
+       perf_ctx_adjust_freq(&cpuctx->ctx, interval);
        if (ctx)
-               perf_ctx_adjust_freq(ctx);
+               perf_ctx_adjust_freq(ctx, interval);
 
        if (!rotate)
-               return;
+               goto done;
 
-       perf_disable();
        cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
        if (ctx)
                task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
@@ -1644,8 +1699,27 @@ void perf_event_task_tick(struct task_struct *curr)
 
        cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
        if (ctx)
-               task_ctx_sched_in(curr, EVENT_FLEXIBLE);
-       perf_enable();
+               task_ctx_sched_in(ctx, EVENT_FLEXIBLE);
+
+done:
+       if (remove)
+               list_del_init(&cpuctx->rotation_list);
+
+       perf_pmu_enable(cpuctx->ctx.pmu);
+}
+
+void perf_event_task_tick(void)
+{
+       struct list_head *head = &__get_cpu_var(rotation_list);
+       struct perf_cpu_context *cpuctx, *tmp;
+
+       WARN_ON(!irqs_disabled());
+
+       list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
+               if (cpuctx->jiffies_interval == 1 ||
+                               !(jiffies % cpuctx->jiffies_interval))
+                       perf_rotate_context(cpuctx);
+       }
 }
 
 static int event_enable_on_exec(struct perf_event *event,
@@ -1667,20 +1741,18 @@ static int event_enable_on_exec(struct perf_event *event,
  * Enable all of a task's events that have been marked enable-on-exec.
  * This expects task == current.
  */
-static void perf_event_enable_on_exec(struct task_struct *task)
+static void perf_event_enable_on_exec(struct perf_event_context *ctx)
 {
-       struct perf_event_context *ctx;
        struct perf_event *event;
        unsigned long flags;
        int enabled = 0;
        int ret;
 
        local_irq_save(flags);
-       ctx = task->perf_event_ctxp;
        if (!ctx || !ctx->nr_events)
                goto out;
 
-       __perf_event_task_sched_out(ctx);
+       task_ctx_sched_out(ctx, EVENT_ALL);
 
        raw_spin_lock(&ctx->lock);
 
@@ -1704,8 +1776,8 @@ static void perf_event_enable_on_exec(struct task_struct *task)
 
        raw_spin_unlock(&ctx->lock);
 
-       perf_event_task_sched_in(task);
- out:
+       perf_event_context_sched_in(ctx);
+out:
        local_irq_restore(flags);
 }
 
@@ -1714,9 +1786,9 @@ static void perf_event_enable_on_exec(struct task_struct *task)
  */
 static void __perf_event_read(void *info)
 {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
        struct perf_event *event = info;
        struct perf_event_context *ctx = event->ctx;
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
 
        /*
         * If this is a task context, we need to check whether it is
@@ -1755,7 +1827,13 @@ static u64 perf_event_read(struct perf_event *event)
                unsigned long flags;
 
                raw_spin_lock_irqsave(&ctx->lock, flags);
-               update_context_time(ctx);
+               /*
+                * may read while context is not active
+                * (e.g., thread is blocked), in that case
+                * we cannot update context time
+                */
+               if (ctx->is_active)
+                       update_context_time(ctx);
                update_event_times(event);
                raw_spin_unlock_irqrestore(&ctx->lock, flags);
        }
@@ -1764,57 +1842,258 @@ static u64 perf_event_read(struct perf_event *event)
 }
 
 /*
- * Initialize the perf_event context in a task_struct:
+ * Callchain support
  */
-static void
-__perf_event_init_context(struct perf_event_context *ctx,
-                           struct task_struct *task)
+
+struct callchain_cpus_entries {
+       struct rcu_head                 rcu_head;
+       struct perf_callchain_entry     *cpu_entries[0];
+};
+
+static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
+static atomic_t nr_callchain_events;
+static DEFINE_MUTEX(callchain_mutex);
+struct callchain_cpus_entries *callchain_cpus_entries;
+
+
+__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
+                                 struct pt_regs *regs)
 {
-       raw_spin_lock_init(&ctx->lock);
-       mutex_init(&ctx->mutex);
-       INIT_LIST_HEAD(&ctx->pinned_groups);
-       INIT_LIST_HEAD(&ctx->flexible_groups);
-       INIT_LIST_HEAD(&ctx->event_list);
-       atomic_set(&ctx->refcount, 1);
-       ctx->task = task;
 }
 
-static struct perf_event_context *find_get_context(pid_t pid, int cpu)
+__weak void perf_callchain_user(struct perf_callchain_entry *entry,
+                               struct pt_regs *regs)
 {
-       struct perf_event_context *ctx;
-       struct perf_cpu_context *cpuctx;
-       struct task_struct *task;
-       unsigned long flags;
-       int err;
+}
 
-       if (pid == -1 && cpu != -1) {
-               /* Must be root to operate on a CPU event: */
-               if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
-                       return ERR_PTR(-EACCES);
+static void release_callchain_buffers_rcu(struct rcu_head *head)
+{
+       struct callchain_cpus_entries *entries;
+       int cpu;
 
-               if (cpu < 0 || cpu >= nr_cpumask_bits)
-                       return ERR_PTR(-EINVAL);
+       entries = container_of(head, struct callchain_cpus_entries, rcu_head);
 
-               /*
-                * We could be clever and allow to attach a event to an
-                * offline CPU and activate it when the CPU comes up, but
-                * that's for later.
-                */
-               if (!cpu_online(cpu))
-                       return ERR_PTR(-ENODEV);
+       for_each_possible_cpu(cpu)
+               kfree(entries->cpu_entries[cpu]);
 
-               cpuctx = &per_cpu(perf_cpu_context, cpu);
-               ctx = &cpuctx->ctx;
-               get_ctx(ctx);
+       kfree(entries);
+}
 
-               return ctx;
+static void release_callchain_buffers(void)
+{
+       struct callchain_cpus_entries *entries;
+
+       entries = callchain_cpus_entries;
+       rcu_assign_pointer(callchain_cpus_entries, NULL);
+       call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
+}
+
+static int alloc_callchain_buffers(void)
+{
+       int cpu;
+       int size;
+       struct callchain_cpus_entries *entries;
+
+       /*
+        * We can't use the percpu allocation API for data that can be
+        * accessed from NMI. Use a temporary manual per cpu allocation
+        * until that gets sorted out.
+        */
+       size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) *
+               num_possible_cpus();
+
+       entries = kzalloc(size, GFP_KERNEL);
+       if (!entries)
+               return -ENOMEM;
+
+       size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
+
+       for_each_possible_cpu(cpu) {
+               entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
+                                                        cpu_to_node(cpu));
+               if (!entries->cpu_entries[cpu])
+                       goto fail;
+       }
+
+       rcu_assign_pointer(callchain_cpus_entries, entries);
+
+       return 0;
+
+fail:
+       for_each_possible_cpu(cpu)
+               kfree(entries->cpu_entries[cpu]);
+       kfree(entries);
+
+       return -ENOMEM;
+}
+
+static int get_callchain_buffers(void)
+{
+       int err = 0;
+       int count;
+
+       mutex_lock(&callchain_mutex);
+
+       count = atomic_inc_return(&nr_callchain_events);
+       if (WARN_ON_ONCE(count < 1)) {
+               err = -EINVAL;
+               goto exit;
+       }
+
+       if (count > 1) {
+               /* If the allocation failed, give up */
+               if (!callchain_cpus_entries)
+                       err = -ENOMEM;
+               goto exit;
+       }
+
+       err = alloc_callchain_buffers();
+       if (err)
+               release_callchain_buffers();
+exit:
+       mutex_unlock(&callchain_mutex);
+
+       return err;
+}
+
+static void put_callchain_buffers(void)
+{
+       if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
+               release_callchain_buffers();
+               mutex_unlock(&callchain_mutex);
+       }
+}
+
+static int get_recursion_context(int *recursion)
+{
+       int rctx;
+
+       if (in_nmi())
+               rctx = 3;
+       else if (in_irq())
+               rctx = 2;
+       else if (in_softirq())
+               rctx = 1;
+       else
+               rctx = 0;
+
+       if (recursion[rctx])
+               return -1;
+
+       recursion[rctx]++;
+       barrier();
+
+       return rctx;
+}
+
+static inline void put_recursion_context(int *recursion, int rctx)
+{
+       barrier();
+       recursion[rctx]--;
+}
+
+static struct perf_callchain_entry *get_callchain_entry(int *rctx)
+{
+       int cpu;
+       struct callchain_cpus_entries *entries;
+
+       *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
+       if (*rctx == -1)
+               return NULL;
+
+       entries = rcu_dereference(callchain_cpus_entries);
+       if (!entries)
+               return NULL;
+
+       cpu = smp_processor_id();
+
+       return &entries->cpu_entries[cpu][*rctx];
+}
+
+static void
+put_callchain_entry(int rctx)
+{
+       put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
+}
+
+static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+{
+       int rctx;
+       struct perf_callchain_entry *entry;
+
+
+       entry = get_callchain_entry(&rctx);
+       if (rctx == -1)
+               return NULL;
+
+       if (!entry)
+               goto exit_put;
+
+       entry->nr = 0;
+
+       if (!user_mode(regs)) {
+               perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
+               perf_callchain_kernel(entry, regs);
+               if (current->mm)
+                       regs = task_pt_regs(current);
+               else
+                       regs = NULL;
+       }
+
+       if (regs) {
+               perf_callchain_store(entry, PERF_CONTEXT_USER);
+               perf_callchain_user(entry, regs);
+       }
+
+exit_put:
+       put_callchain_entry(rctx);
+
+       return entry;
+}
+
+/*
+ * Initialize the perf_event context in a task_struct:
+ */
+static void __perf_event_init_context(struct perf_event_context *ctx)
+{
+       raw_spin_lock_init(&ctx->lock);
+       mutex_init(&ctx->mutex);
+       INIT_LIST_HEAD(&ctx->pinned_groups);
+       INIT_LIST_HEAD(&ctx->flexible_groups);
+       INIT_LIST_HEAD(&ctx->event_list);
+       atomic_set(&ctx->refcount, 1);
+}
+
+static struct perf_event_context *
+alloc_perf_context(struct pmu *pmu, struct task_struct *task)
+{
+       struct perf_event_context *ctx;
+
+       ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
+       if (!ctx)
+               return NULL;
+
+       __perf_event_init_context(ctx);
+       if (task) {
+               ctx->task = task;
+               get_task_struct(task);
        }
+       ctx->pmu = pmu;
+
+       return ctx;
+}
+
+static struct task_struct *
+find_lively_task_by_vpid(pid_t vpid)
+{
+       struct task_struct *task;
+       int err;
 
        rcu_read_lock();
-       if (!pid)
+       if (!vpid)
                task = current;
        else
-               task = find_task_by_vpid(pid);
+               task = find_task_by_vpid(vpid);
        if (task)
                get_task_struct(task);
        rcu_read_unlock();
@@ -1834,36 +2113,78 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
        if (!ptrace_may_access(task, PTRACE_MODE_READ))
                goto errout;
 
- retry:
-       ctx = perf_lock_task_context(task, &flags);
+       return task;
+errout:
+       put_task_struct(task);
+       return ERR_PTR(err);
+
+}
+
+static struct perf_event_context *
+find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
+{
+       struct perf_event_context *ctx;
+       struct perf_cpu_context *cpuctx;
+       unsigned long flags;
+       int ctxn, err;
+
+       if (!task && cpu != -1) {
+               /* Must be root to operate on a CPU event: */
+               if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
+                       return ERR_PTR(-EACCES);
+
+               if (cpu < 0 || cpu >= nr_cpumask_bits)
+                       return ERR_PTR(-EINVAL);
+
+               /*
+                * We could be clever and allow to attach a event to an
+                * offline CPU and activate it when the CPU comes up, but
+                * that's for later.
+                */
+               if (!cpu_online(cpu))
+                       return ERR_PTR(-ENODEV);
+
+               cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+               ctx = &cpuctx->ctx;
+               get_ctx(ctx);
+
+               return ctx;
+       }
+
+       err = -EINVAL;
+       ctxn = pmu->task_ctx_nr;
+       if (ctxn < 0)
+               goto errout;
+
+retry:
+       ctx = perf_lock_task_context(task, ctxn, &flags);
        if (ctx) {
                unclone_ctx(ctx);
                raw_spin_unlock_irqrestore(&ctx->lock, flags);
        }
 
        if (!ctx) {
-               ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
+               ctx = alloc_perf_context(pmu, task);
                err = -ENOMEM;
                if (!ctx)
                        goto errout;
-               __perf_event_init_context(ctx, task);
+
                get_ctx(ctx);
-               if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) {
+
+               if (cmpxchg(&task->perf_event_ctxp[ctxn], NULL, ctx)) {
                        /*
                         * We raced with some other task; use
                         * the context they set.
                         */
+                       put_task_struct(task);
                        kfree(ctx);
                        goto retry;
                }
-               get_task_struct(task);
        }
 
-       put_task_struct(task);
        return ctx;
 
- errout:
-       put_task_struct(task);
+errout:
        return ERR_PTR(err);
 }
 
@@ -1880,21 +2201,23 @@ static void free_event_rcu(struct rcu_head *head)
        kfree(event);
 }
 
-static void perf_pending_sync(struct perf_event *event);
 static void perf_buffer_put(struct perf_buffer *buffer);
 
 static void free_event(struct perf_event *event)
 {
-       perf_pending_sync(event);
+       irq_work_sync(&event->pending);
 
        if (!event->parent) {
-               atomic_dec(&nr_events);
+               if (event->attach_state & PERF_ATTACH_TASK)
+                       jump_label_dec(&perf_task_events);
                if (event->attr.mmap || event->attr.mmap_data)
                        atomic_dec(&nr_mmap_events);
                if (event->attr.comm)
                        atomic_dec(&nr_comm_events);
                if (event->attr.task)
                        atomic_dec(&nr_task_events);
+               if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
+                       put_callchain_buffers();
        }
 
        if (event->buffer) {
@@ -1905,7 +2228,9 @@ static void free_event(struct perf_event *event)
        if (event->destroy)
                event->destroy(event);
 
-       put_ctx(event->ctx);
+       if (event->ctx)
+               put_ctx(event->ctx);
+
        call_rcu(&event->rcu_head, free_event_rcu);
 }
 
@@ -2184,15 +2509,13 @@ static void perf_event_for_each(struct perf_event *event,
 static int perf_event_period(struct perf_event *event, u64 __user *arg)
 {
        struct perf_event_context *ctx = event->ctx;
-       unsigned long size;
        int ret = 0;
        u64 value;
 
        if (!event->attr.sample_period)
                return -EINVAL;
 
-       size = copy_from_user(&value, arg, sizeof(value));
-       if (size != sizeof(value))
+       if (copy_from_user(&value, arg, sizeof(value)))
                return -EFAULT;
 
        if (!value)
@@ -2326,6 +2649,9 @@ int perf_event_task_disable(void)
 
 static int perf_event_index(struct perf_event *event)
 {
+       if (event->hw.state & PERF_HES_STOPPED)
+               return 0;
+
        if (event->state != PERF_EVENT_STATE_ACTIVE)
                return 0;
 
@@ -2829,16 +3155,7 @@ void perf_event_wakeup(struct perf_event *event)
        }
 }
 
-/*
- * Pending wakeups
- *
- * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
- *
- * The NMI bit means we cannot possibly take locks. Therefore, maintain a
- * single linked list and use cmpxchg() to add entries lockless.
- */
-
-static void perf_pending_event(struct perf_pending_entry *entry)
+static void perf_pending_event(struct irq_work *entry)
 {
        struct perf_event *event = container_of(entry,
                        struct perf_event, pending);
@@ -2854,99 +3171,6 @@ static void perf_pending_event(struct perf_pending_entry *entry)
        }
 }
 
-#define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
-
-static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
-       PENDING_TAIL,
-};
-
-static void perf_pending_queue(struct perf_pending_entry *entry,
-                              void (*func)(struct perf_pending_entry *))
-{
-       struct perf_pending_entry **head;
-
-       if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
-               return;
-
-       entry->func = func;
-
-       head = &get_cpu_var(perf_pending_head);
-
-       do {
-               entry->next = *head;
-       } while (cmpxchg(head, entry->next, entry) != entry->next);
-
-       set_perf_event_pending();
-
-       put_cpu_var(perf_pending_head);
-}
-
-static int __perf_pending_run(void)
-{
-       struct perf_pending_entry *list;
-       int nr = 0;
-
-       list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
-       while (list != PENDING_TAIL) {
-               void (*func)(struct perf_pending_entry *);
-               struct perf_pending_entry *entry = list;
-
-               list = list->next;
-
-               func = entry->func;
-               entry->next = NULL;
-               /*
-                * Ensure we observe the unqueue before we issue the wakeup,
-                * so that we won't be waiting forever.
-                * -- see perf_not_pending().
-                */
-               smp_wmb();
-
-               func(entry);
-               nr++;
-       }
-
-       return nr;
-}
-
-static inline int perf_not_pending(struct perf_event *event)
-{
-       /*
-        * If we flush on whatever cpu we run, there is a chance we don't
-        * need to wait.
-        */
-       get_cpu();
-       __perf_pending_run();
-       put_cpu();
-
-       /*
-        * Ensure we see the proper queue state before going to sleep
-        * so that we do not miss the wakeup. -- see perf_pending_handle()
-        */
-       smp_rmb();
-       return event->pending.next == NULL;
-}
-
-static void perf_pending_sync(struct perf_event *event)
-{
-       wait_event(event->waitq, perf_not_pending(event));
-}
-
-void perf_event_do_pending(void)
-{
-       __perf_pending_run();
-}
-
-/*
- * Callchain support -- arch specific
- */
-
-__weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
-{
-       return NULL;
-}
-
-
 /*
  * We assume there is only KVM supporting the callbacks.
  * Later on, we might change it to a list if there is
@@ -2996,8 +3220,7 @@ static void perf_output_wakeup(struct perf_output_handle *handle)
 
        if (handle->nmi) {
                handle->event->pending_wakeup = 1;
-               perf_pending_queue(&handle->event->pending,
-                                  perf_pending_event);
+               irq_work_queue(&handle->event->pending);
        } else
                perf_event_wakeup(handle->event);
 }
@@ -3053,7 +3276,7 @@ again:
        if (handle->wakeup != local_read(&buffer->wakeup))
                perf_output_wakeup(handle);
 
- out:
+out:
        preempt_enable();
 }
 
@@ -3441,14 +3664,20 @@ static void perf_event_output(struct perf_event *event, int nmi,
        struct perf_output_handle handle;
        struct perf_event_header header;
 
+       /* protect the callchain buffers */
+       rcu_read_lock();
+
        perf_prepare_sample(&header, data, event, regs);
 
        if (perf_output_begin(&handle, event, header.size, nmi, 1))
-               return;
+               goto exit;
 
        perf_output_sample(&handle, &header, data, event);
 
        perf_output_end(&handle);
+
+exit:
+       rcu_read_unlock();
 }
 
 /*
@@ -3562,16 +3791,27 @@ static void perf_event_task_ctx(struct perf_event_context *ctx,
 static void perf_event_task_event(struct perf_task_event *task_event)
 {
        struct perf_cpu_context *cpuctx;
-       struct perf_event_context *ctx = task_event->task_ctx;
+       struct perf_event_context *ctx;
+       struct pmu *pmu;
+       int ctxn;
 
        rcu_read_lock();
-       cpuctx = &get_cpu_var(perf_cpu_context);
-       perf_event_task_ctx(&cpuctx->ctx, task_event);
-       if (!ctx)
-               ctx = rcu_dereference(current->perf_event_ctxp);
-       if (ctx)
-               perf_event_task_ctx(ctx, task_event);
-       put_cpu_var(perf_cpu_context);
+       list_for_each_entry_rcu(pmu, &pmus, entry) {
+               cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+               perf_event_task_ctx(&cpuctx->ctx, task_event);
+
+               ctx = task_event->task_ctx;
+               if (!ctx) {
+                       ctxn = pmu->task_ctx_nr;
+                       if (ctxn < 0)
+                               goto next;
+                       ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
+               }
+               if (ctx)
+                       perf_event_task_ctx(ctx, task_event);
+next:
+               put_cpu_ptr(pmu->pmu_cpu_context);
+       }
        rcu_read_unlock();
 }
 
@@ -3676,8 +3916,10 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
 {
        struct perf_cpu_context *cpuctx;
        struct perf_event_context *ctx;
-       unsigned int size;
        char comm[TASK_COMM_LEN];
+       unsigned int size;
+       struct pmu *pmu;
+       int ctxn;
 
        memset(comm, 0, sizeof(comm));
        strlcpy(comm, comm_event->task->comm, sizeof(comm));
@@ -3689,21 +3931,36 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
        comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
 
        rcu_read_lock();
-       cpuctx = &get_cpu_var(perf_cpu_context);
-       perf_event_comm_ctx(&cpuctx->ctx, comm_event);
-       ctx = rcu_dereference(current->perf_event_ctxp);
-       if (ctx)
-               perf_event_comm_ctx(ctx, comm_event);
-       put_cpu_var(perf_cpu_context);
+       list_for_each_entry_rcu(pmu, &pmus, entry) {
+               cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+               perf_event_comm_ctx(&cpuctx->ctx, comm_event);
+
+               ctxn = pmu->task_ctx_nr;
+               if (ctxn < 0)
+                       goto next;
+
+               ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
+               if (ctx)
+                       perf_event_comm_ctx(ctx, comm_event);
+next:
+               put_cpu_ptr(pmu->pmu_cpu_context);
+       }
        rcu_read_unlock();
 }
 
 void perf_event_comm(struct task_struct *task)
 {
        struct perf_comm_event comm_event;
+       struct perf_event_context *ctx;
+       int ctxn;
+
+       for_each_task_context_nr(ctxn) {
+               ctx = task->perf_event_ctxp[ctxn];
+               if (!ctx)
+                       continue;
 
-       if (task->perf_event_ctxp)
-               perf_event_enable_on_exec(task);
+               perf_event_enable_on_exec(ctx);
+       }
 
        if (!atomic_read(&nr_comm_events))
                return;
@@ -3805,6 +4062,8 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
        char tmp[16];
        char *buf = NULL;
        const char *name;
+       struct pmu *pmu;
+       int ctxn;
 
        memset(tmp, 0, sizeof(tmp));
 
@@ -3857,12 +4116,23 @@ got_name:
        mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
 
        rcu_read_lock();
-       cpuctx = &get_cpu_var(perf_cpu_context);
-       perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, vma->vm_flags & VM_EXEC);
-       ctx = rcu_dereference(current->perf_event_ctxp);
-       if (ctx)
-               perf_event_mmap_ctx(ctx, mmap_event, vma->vm_flags & VM_EXEC);
-       put_cpu_var(perf_cpu_context);
+       list_for_each_entry_rcu(pmu, &pmus, entry) {
+               cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+               perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
+                                       vma->vm_flags & VM_EXEC);
+
+               ctxn = pmu->task_ctx_nr;
+               if (ctxn < 0)
+                       goto next;
+
+               ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
+               if (ctx) {
+                       perf_event_mmap_ctx(ctx, mmap_event,
+                                       vma->vm_flags & VM_EXEC);
+               }
+next:
+               put_cpu_ptr(pmu->pmu_cpu_context);
+       }
        rcu_read_unlock();
 
        kfree(buf);
@@ -3944,8 +4214,6 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
        struct hw_perf_event *hwc = &event->hw;
        int ret = 0;
 
-       throttle = (throttle && event->pmu->unthrottle != NULL);
-
        if (!throttle) {
                hwc->interrupts++;
        } else {
@@ -3988,8 +4256,7 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
                event->pending_kill = POLL_HUP;
                if (nmi) {
                        event->pending_disable = 1;
-                       perf_pending_queue(&event->pending,
-                                          perf_pending_event);
+                       irq_work_queue(&event->pending);
                } else
                        perf_event_disable(event);
        }
@@ -4013,6 +4280,17 @@ int perf_event_overflow(struct perf_event *event, int nmi,
  * Generic software event infrastructure
  */
 
+struct swevent_htable {
+       struct swevent_hlist            *swevent_hlist;
+       struct mutex                    hlist_mutex;
+       int                             hlist_refcount;
+
+       /* Recursion avoidance in each contexts */
+       int                             recursion[PERF_NR_CONTEXTS];
+};
+
+static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
+
 /*
  * We directly increment event->count and keep a second value in
  * event->hw.period_left to count intervals. This period event
@@ -4070,7 +4348,7 @@ static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
        }
 }
 
-static void perf_swevent_add(struct perf_event *event, u64 nr,
+static void perf_swevent_event(struct perf_event *event, u64 nr,
                               int nmi, struct perf_sample_data *data,
                               struct pt_regs *regs)
 {
@@ -4096,6 +4374,9 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
 static int perf_exclude_event(struct perf_event *event,
                              struct pt_regs *regs)
 {
+       if (event->hw.state & PERF_HES_STOPPED)
+               return 0;
+
        if (regs) {
                if (event->attr.exclude_user && user_mode(regs))
                        return 1;
@@ -4142,11 +4423,11 @@ __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
 
 /* For the read side: events when they trigger */
 static inline struct hlist_head *
-find_swevent_head_rcu(struct perf_cpu_context *ctx, u64 type, u32 event_id)
+find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
 {
        struct swevent_hlist *hlist;
 
-       hlist = rcu_dereference(ctx->swevent_hlist);
+       hlist = rcu_dereference(swhash->swevent_hlist);
        if (!hlist)
                return NULL;
 
@@ -4155,7 +4436,7 @@ find_swevent_head_rcu(struct perf_cpu_context *ctx, u64 type, u32 event_id)
 
 /* For the event head insertion and removal in the hlist */
 static inline struct hlist_head *
-find_swevent_head(struct perf_cpu_context *ctx, struct perf_event *event)
+find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
 {
        struct swevent_hlist *hlist;
        u32 event_id = event->attr.config;
@@ -4166,7 +4447,7 @@ find_swevent_head(struct perf_cpu_context *ctx, struct perf_event *event)
         * and release. Which makes the protected version suitable here.
         * The context lock guarantees that.
         */
-       hlist = rcu_dereference_protected(ctx->swevent_hlist,
+       hlist = rcu_dereference_protected(swhash->swevent_hlist,
                                          lockdep_is_held(&event->ctx->lock));
        if (!hlist)
                return NULL;
@@ -4179,23 +4460,19 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
                                    struct perf_sample_data *data,
                                    struct pt_regs *regs)
 {
-       struct perf_cpu_context *cpuctx;
+       struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
        struct perf_event *event;
        struct hlist_node *node;
        struct hlist_head *head;
 
-       cpuctx = &__get_cpu_var(perf_cpu_context);
-
        rcu_read_lock();
-
-       head = find_swevent_head_rcu(cpuctx, type, event_id);
-
+       head = find_swevent_head_rcu(swhash, type, event_id);
        if (!head)
                goto end;
 
        hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
                if (perf_swevent_match(event, type, event_id, data, regs))
-                       perf_swevent_add(event, nr, nmi, data, regs);
+                       perf_swevent_event(event, nr, nmi, data, regs);
        }
 end:
        rcu_read_unlock();
@@ -4203,33 +4480,17 @@ end:
 
 int perf_swevent_get_recursion_context(void)
 {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
-       int rctx;
-
-       if (in_nmi())
-               rctx = 3;
-       else if (in_irq())
-               rctx = 2;
-       else if (in_softirq())
-               rctx = 1;
-       else
-               rctx = 0;
-
-       if (cpuctx->recursion[rctx])
-               return -1;
-
-       cpuctx->recursion[rctx]++;
-       barrier();
+       struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
 
-       return rctx;
+       return get_recursion_context(swhash->recursion);
 }
 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
 
 void inline perf_swevent_put_recursion_context(int rctx)
 {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
-       barrier();
-       cpuctx->recursion[rctx]--;
+       struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
+
+       put_recursion_context(swhash->recursion, rctx);
 }
 
 void __perf_sw_event(u32 event_id, u64 nr, int nmi,
@@ -4255,20 +4516,20 @@ static void perf_swevent_read(struct perf_event *event)
 {
 }
 
-static int perf_swevent_enable(struct perf_event *event)
+static int perf_swevent_add(struct perf_event *event, int flags)
 {
+       struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
        struct hw_perf_event *hwc = &event->hw;
-       struct perf_cpu_context *cpuctx;
        struct hlist_head *head;
 
-       cpuctx = &__get_cpu_var(perf_cpu_context);
-
        if (hwc->sample_period) {
                hwc->last_period = hwc->sample_period;
                perf_swevent_set_period(event);
        }
 
-       head = find_swevent_head(cpuctx, event);
+       hwc->state = !(flags & PERF_EF_START);
+
+       head = find_swevent_head(swhash, event);
        if (WARN_ON_ONCE(!head))
                return -EINVAL;
 
@@ -4277,202 +4538,27 @@ static int perf_swevent_enable(struct perf_event *event)
        return 0;
 }
 
-static void perf_swevent_disable(struct perf_event *event)
+static void perf_swevent_del(struct perf_event *event, int flags)
 {
        hlist_del_rcu(&event->hlist_entry);
 }
 
-static void perf_swevent_void(struct perf_event *event)
-{
-}
-
-static int perf_swevent_int(struct perf_event *event)
-{
-       return 0;
-}
-
-static const struct pmu perf_ops_generic = {
-       .enable         = perf_swevent_enable,
-       .disable        = perf_swevent_disable,
-       .start          = perf_swevent_int,
-       .stop           = perf_swevent_void,
-       .read           = perf_swevent_read,
-       .unthrottle     = perf_swevent_void, /* hwc->interrupts already reset */
-};
-
-/*
- * hrtimer based swevent callback
- */
-
-static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
-{
-       enum hrtimer_restart ret = HRTIMER_RESTART;
-       struct perf_sample_data data;
-       struct pt_regs *regs;
-       struct perf_event *event;
-       u64 period;
-
-       event = container_of(hrtimer, struct perf_event, hw.hrtimer);
-       event->pmu->read(event);
-
-       perf_sample_data_init(&data, 0);
-       data.period = event->hw.last_period;
-       regs = get_irq_regs();
-
-       if (regs && !perf_exclude_event(event, regs)) {
-               if (!(event->attr.exclude_idle && current->pid == 0))
-                       if (perf_event_overflow(event, 0, &data, regs))
-                               ret = HRTIMER_NORESTART;
-       }
-
-       period = max_t(u64, 10000, event->hw.sample_period);
-       hrtimer_forward_now(hrtimer, ns_to_ktime(period));
-
-       return ret;
-}
-
-static void perf_swevent_start_hrtimer(struct perf_event *event)
-{
-       struct hw_perf_event *hwc = &event->hw;
-
-       hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-       hwc->hrtimer.function = perf_swevent_hrtimer;
-       if (hwc->sample_period) {
-               u64 period;
-
-               if (hwc->remaining) {
-                       if (hwc->remaining < 0)
-                               period = 10000;
-                       else
-                               period = hwc->remaining;
-                       hwc->remaining = 0;
-               } else {
-                       period = max_t(u64, 10000, hwc->sample_period);
-               }
-               __hrtimer_start_range_ns(&hwc->hrtimer,
-                               ns_to_ktime(period), 0,
-                               HRTIMER_MODE_REL, 0);
-       }
-}
-
-static void perf_swevent_cancel_hrtimer(struct perf_event *event)
-{
-       struct hw_perf_event *hwc = &event->hw;
-
-       if (hwc->sample_period) {
-               ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
-               hwc->remaining = ktime_to_ns(remaining);
-
-               hrtimer_cancel(&hwc->hrtimer);
-       }
-}
-
-/*
- * Software event: cpu wall time clock
- */
-
-static void cpu_clock_perf_event_update(struct perf_event *event)
-{
-       int cpu = raw_smp_processor_id();
-       s64 prev;
-       u64 now;
-
-       now = cpu_clock(cpu);
-       prev = local64_xchg(&event->hw.prev_count, now);
-       local64_add(now - prev, &event->count);
-}
-
-static int cpu_clock_perf_event_enable(struct perf_event *event)
-{
-       struct hw_perf_event *hwc = &event->hw;
-       int cpu = raw_smp_processor_id();
-
-       local64_set(&hwc->prev_count, cpu_clock(cpu));
-       perf_swevent_start_hrtimer(event);
-
-       return 0;
-}
-
-static void cpu_clock_perf_event_disable(struct perf_event *event)
-{
-       perf_swevent_cancel_hrtimer(event);
-       cpu_clock_perf_event_update(event);
-}
-
-static void cpu_clock_perf_event_read(struct perf_event *event)
-{
-       cpu_clock_perf_event_update(event);
-}
-
-static const struct pmu perf_ops_cpu_clock = {
-       .enable         = cpu_clock_perf_event_enable,
-       .disable        = cpu_clock_perf_event_disable,
-       .read           = cpu_clock_perf_event_read,
-};
-
-/*
- * Software event: task time clock
- */
-
-static void task_clock_perf_event_update(struct perf_event *event, u64 now)
-{
-       u64 prev;
-       s64 delta;
-
-       prev = local64_xchg(&event->hw.prev_count, now);
-       delta = now - prev;
-       local64_add(delta, &event->count);
-}
-
-static int task_clock_perf_event_enable(struct perf_event *event)
-{
-       struct hw_perf_event *hwc = &event->hw;
-       u64 now;
-
-       now = event->ctx->time;
-
-       local64_set(&hwc->prev_count, now);
-
-       perf_swevent_start_hrtimer(event);
-
-       return 0;
-}
-
-static void task_clock_perf_event_disable(struct perf_event *event)
+static void perf_swevent_start(struct perf_event *event, int flags)
 {
-       perf_swevent_cancel_hrtimer(event);
-       task_clock_perf_event_update(event, event->ctx->time);
-
+       event->hw.state = 0;
 }
 
-static void task_clock_perf_event_read(struct perf_event *event)
+static void perf_swevent_stop(struct perf_event *event, int flags)
 {
-       u64 time;
-
-       if (!in_nmi()) {
-               update_context_time(event->ctx);
-               time = event->ctx->time;
-       } else {
-               u64 now = perf_clock();
-               u64 delta = now - event->ctx->timestamp;
-               time = event->ctx->time + delta;
-       }
-
-       task_clock_perf_event_update(event, time);
+       event->hw.state = PERF_HES_STOPPED;
 }
 
-static const struct pmu perf_ops_task_clock = {
-       .enable         = task_clock_perf_event_enable,
-       .disable        = task_clock_perf_event_disable,
-       .read           = task_clock_perf_event_read,
-};
-
 /* Deref the hlist from the update side */
 static inline struct swevent_hlist *
-swevent_hlist_deref(struct perf_cpu_context *cpuctx)
+swevent_hlist_deref(struct swevent_htable *swhash)
 {
-       return rcu_dereference_protected(cpuctx->swevent_hlist,
-                                        lockdep_is_held(&cpuctx->hlist_mutex));
+       return rcu_dereference_protected(swhash->swevent_hlist,
+                                        lockdep_is_held(&swhash->hlist_mutex));
 }
 
 static void swevent_hlist_release_rcu(struct rcu_head *rcu_head)
@@ -4483,27 +4569,27 @@ static void swevent_hlist_release_rcu(struct rcu_head *rcu_head)
        kfree(hlist);
 }
 
-static void swevent_hlist_release(struct perf_cpu_context *cpuctx)
+static void swevent_hlist_release(struct swevent_htable *swhash)
 {
-       struct swevent_hlist *hlist = swevent_hlist_deref(cpuctx);
+       struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
 
        if (!hlist)
                return;
 
-       rcu_assign_pointer(cpuctx->swevent_hlist, NULL);
+       rcu_assign_pointer(swhash->swevent_hlist, NULL);
        call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu);
 }
 
 static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
 {
-       struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
+       struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
 
-       mutex_lock(&cpuctx->hlist_mutex);
+       mutex_lock(&swhash->hlist_mutex);
 
-       if (!--cpuctx->hlist_refcount)
-               swevent_hlist_release(cpuctx);
+       if (!--swhash->hlist_refcount)
+               swevent_hlist_release(swhash);
 
-       mutex_unlock(&cpuctx->hlist_mutex);
+       mutex_unlock(&swhash->hlist_mutex);
 }
 
 static void swevent_hlist_put(struct perf_event *event)
@@ -4521,12 +4607,12 @@ static void swevent_hlist_put(struct perf_event *event)
 
 static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
 {
-       struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
+       struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
        int err = 0;
 
-       mutex_lock(&cpuctx->hlist_mutex);
+       mutex_lock(&swhash->hlist_mutex);
 
-       if (!swevent_hlist_deref(cpuctx) && cpu_online(cpu)) {
+       if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
                struct swevent_hlist *hlist;
 
                hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
@@ -4534,11 +4620,11 @@ static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
                        err = -ENOMEM;
                        goto exit;
                }
-               rcu_assign_pointer(cpuctx->swevent_hlist, hlist);
+               rcu_assign_pointer(swhash->swevent_hlist, hlist);
        }
-       cpuctx->hlist_refcount++;
- exit:
-       mutex_unlock(&cpuctx->hlist_mutex);
+       swhash->hlist_refcount++;
+exit:
+       mutex_unlock(&swhash->hlist_mutex);
 
        return err;
 }
@@ -4562,7 +4648,7 @@ static int swevent_hlist_get(struct perf_event *event)
        put_online_cpus();
 
        return 0;
- fail:
+fail:
        for_each_possible_cpu(cpu) {
                if (cpu == failed_cpu)
                        break;
@@ -4573,17 +4659,64 @@ static int swevent_hlist_get(struct perf_event *event)
        return err;
 }
 
-#ifdef CONFIG_EVENT_TRACING
+atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
+
+static void sw_perf_event_destroy(struct perf_event *event)
+{
+       u64 event_id = event->attr.config;
+
+       WARN_ON(event->parent);
+
+       jump_label_dec(&perf_swevent_enabled[event_id]);
+       swevent_hlist_put(event);
+}
 
-static const struct pmu perf_ops_tracepoint = {
-       .enable         = perf_trace_enable,
-       .disable        = perf_trace_disable,
-       .start          = perf_swevent_int,
-       .stop           = perf_swevent_void,
+static int perf_swevent_init(struct perf_event *event)
+{
+       int event_id = event->attr.config;
+
+       if (event->attr.type != PERF_TYPE_SOFTWARE)
+               return -ENOENT;
+
+       switch (event_id) {
+       case PERF_COUNT_SW_CPU_CLOCK:
+       case PERF_COUNT_SW_TASK_CLOCK:
+               return -ENOENT;
+
+       default:
+               break;
+       }
+
+       if (event_id > PERF_COUNT_SW_MAX)
+               return -ENOENT;
+
+       if (!event->parent) {
+               int err;
+
+               err = swevent_hlist_get(event);
+               if (err)
+                       return err;
+
+               jump_label_inc(&perf_swevent_enabled[event_id]);
+               event->destroy = sw_perf_event_destroy;
+       }
+
+       return 0;
+}
+
+static struct pmu perf_swevent = {
+       .task_ctx_nr    = perf_sw_context,
+
+       .event_init     = perf_swevent_init,
+       .add            = perf_swevent_add,
+       .del            = perf_swevent_del,
+       .start          = perf_swevent_start,
+       .stop           = perf_swevent_stop,
        .read           = perf_swevent_read,
-       .unthrottle     = perf_swevent_void,
 };
 
+#ifdef CONFIG_EVENT_TRACING
+
 static int perf_tp_filter_match(struct perf_event *event,
                                struct perf_sample_data *data)
 {
@@ -4627,7 +4760,7 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
 
        hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
                if (perf_tp_event_match(event, &data, regs))
-                       perf_swevent_add(event, count, 1, &data, regs);
+                       perf_swevent_event(event, count, 1, &data, regs);
        }
 
        perf_swevent_put_recursion_context(rctx);
@@ -4639,10 +4772,13 @@ static void tp_perf_event_destroy(struct perf_event *event)
        perf_trace_destroy(event);
 }
 
-static const struct pmu *tp_perf_event_init(struct perf_event *event)
+static int perf_tp_event_init(struct perf_event *event)
 {
        int err;
 
+       if (event->attr.type != PERF_TYPE_TRACEPOINT)
+               return -ENOENT;
+
        /*
         * Raw tracepoint data is a severe data leak, only allow root to
         * have these.
@@ -4650,15 +4786,31 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event)
        if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
                        perf_paranoid_tracepoint_raw() &&
                        !capable(CAP_SYS_ADMIN))
-               return ERR_PTR(-EPERM);
+               return -EPERM;
 
        err = perf_trace_init(event);
        if (err)
-               return NULL;
+               return err;
 
        event->destroy = tp_perf_event_destroy;
 
-       return &perf_ops_tracepoint;
+       return 0;
+}
+
+static struct pmu perf_tracepoint = {
+       .task_ctx_nr    = perf_sw_context,
+
+       .event_init     = perf_tp_event_init,
+       .add            = perf_trace_add,
+       .del            = perf_trace_del,
+       .start          = perf_swevent_start,
+       .stop           = perf_swevent_stop,
+       .read           = perf_swevent_read,
+};
+
+static inline void perf_tp_register(void)
+{
+       perf_pmu_register(&perf_tracepoint);
 }
 
 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
@@ -4686,9 +4838,8 @@ static void perf_event_free_filter(struct perf_event *event)
 
 #else
 
-static const struct pmu *tp_perf_event_init(struct perf_event *event)
+static inline void perf_tp_register(void)
 {
-       return NULL;
 }
 
 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
@@ -4703,24 +4854,6 @@ static void perf_event_free_filter(struct perf_event *event)
 #endif /* CONFIG_EVENT_TRACING */
 
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
-static void bp_perf_event_destroy(struct perf_event *event)
-{
-       release_bp_slot(event);
-}
-
-static const struct pmu *bp_perf_event_init(struct perf_event *bp)
-{
-       int err;
-
-       err = register_perf_hw_breakpoint(bp);
-       if (err)
-               return ERR_PTR(err);
-
-       bp->destroy = bp_perf_event_destroy;
-
-       return &perf_ops_bp;
-}
-
 void perf_bp_event(struct perf_event *bp, void *data)
 {
        struct perf_sample_data sample;
@@ -4728,81 +4861,383 @@ void perf_bp_event(struct perf_event *bp, void *data)
 
        perf_sample_data_init(&sample, bp->attr.bp_addr);
 
-       if (!perf_exclude_event(bp, regs))
-               perf_swevent_add(bp, 1, 1, &sample, regs);
-}
-#else
-static const struct pmu *bp_perf_event_init(struct perf_event *bp)
-{
-       return NULL;
-}
-
-void perf_bp_event(struct perf_event *bp, void *regs)
-{
+       if (!bp->hw.state && !perf_exclude_event(bp, regs))
+               perf_swevent_event(bp, 1, 1, &sample, regs);
 }
 #endif
 
-atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
+/*
+ * hrtimer based swevent callback
+ */
 
-static void sw_perf_event_destroy(struct perf_event *event)
+static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
 {
-       u64 event_id = event->attr.config;
-
-       WARN_ON(event->parent);
+       enum hrtimer_restart ret = HRTIMER_RESTART;
+       struct perf_sample_data data;
+       struct pt_regs *regs;
+       struct perf_event *event;
+       u64 period;
 
-       atomic_dec(&perf_swevent_enabled[event_id]);
-       swevent_hlist_put(event);
+       event = container_of(hrtimer, struct perf_event, hw.hrtimer);
+       event->pmu->read(event);
+
+       perf_sample_data_init(&data, 0);
+       data.period = event->hw.last_period;
+       regs = get_irq_regs();
+
+       if (regs && !perf_exclude_event(event, regs)) {
+               if (!(event->attr.exclude_idle && current->pid == 0))
+                       if (perf_event_overflow(event, 0, &data, regs))
+                               ret = HRTIMER_NORESTART;
+       }
+
+       period = max_t(u64, 10000, event->hw.sample_period);
+       hrtimer_forward_now(hrtimer, ns_to_ktime(period));
+
+       return ret;
 }
 
-static const struct pmu *sw_perf_event_init(struct perf_event *event)
+static void perf_swevent_start_hrtimer(struct perf_event *event)
 {
-       const struct pmu *pmu = NULL;
-       u64 event_id = event->attr.config;
+       struct hw_perf_event *hwc = &event->hw;
+
+       hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       hwc->hrtimer.function = perf_swevent_hrtimer;
+       if (hwc->sample_period) {
+               s64 period = local64_read(&hwc->period_left);
+
+               if (period) {
+                       if (period < 0)
+                               period = 10000;
 
+                       local64_set(&hwc->period_left, 0);
+               } else {
+                       period = max_t(u64, 10000, hwc->sample_period);
+               }
+               __hrtimer_start_range_ns(&hwc->hrtimer,
+                               ns_to_ktime(period), 0,
+                               HRTIMER_MODE_REL_PINNED, 0);
+       }
+}
+
+static void perf_swevent_cancel_hrtimer(struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       if (hwc->sample_period) {
+               ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
+               local64_set(&hwc->period_left, ktime_to_ns(remaining));
+
+               hrtimer_cancel(&hwc->hrtimer);
+       }
+}
+
+/*
+ * Software event: cpu wall time clock
+ */
+
+static void cpu_clock_event_update(struct perf_event *event)
+{
+       s64 prev;
+       u64 now;
+
+       now = local_clock();
+       prev = local64_xchg(&event->hw.prev_count, now);
+       local64_add(now - prev, &event->count);
+}
+
+static void cpu_clock_event_start(struct perf_event *event, int flags)
+{
+       local64_set(&event->hw.prev_count, local_clock());
+       perf_swevent_start_hrtimer(event);
+}
+
+static void cpu_clock_event_stop(struct perf_event *event, int flags)
+{
+       perf_swevent_cancel_hrtimer(event);
+       cpu_clock_event_update(event);
+}
+
+static int cpu_clock_event_add(struct perf_event *event, int flags)
+{
+       if (flags & PERF_EF_START)
+               cpu_clock_event_start(event, flags);
+
+       return 0;
+}
+
+static void cpu_clock_event_del(struct perf_event *event, int flags)
+{
+       cpu_clock_event_stop(event, flags);
+}
+
+static void cpu_clock_event_read(struct perf_event *event)
+{
+       cpu_clock_event_update(event);
+}
+
+static int cpu_clock_event_init(struct perf_event *event)
+{
+       if (event->attr.type != PERF_TYPE_SOFTWARE)
+               return -ENOENT;
+
+       if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
+               return -ENOENT;
+
+       return 0;
+}
+
+static struct pmu perf_cpu_clock = {
+       .task_ctx_nr    = perf_sw_context,
+
+       .event_init     = cpu_clock_event_init,
+       .add            = cpu_clock_event_add,
+       .del            = cpu_clock_event_del,
+       .start          = cpu_clock_event_start,
+       .stop           = cpu_clock_event_stop,
+       .read           = cpu_clock_event_read,
+};
+
+/*
+ * Software event: task time clock
+ */
+
+static void task_clock_event_update(struct perf_event *event, u64 now)
+{
+       u64 prev;
+       s64 delta;
+
+       prev = local64_xchg(&event->hw.prev_count, now);
+       delta = now - prev;
+       local64_add(delta, &event->count);
+}
+
+static void task_clock_event_start(struct perf_event *event, int flags)
+{
+       local64_set(&event->hw.prev_count, event->ctx->time);
+       perf_swevent_start_hrtimer(event);
+}
+
+static void task_clock_event_stop(struct perf_event *event, int flags)
+{
+       perf_swevent_cancel_hrtimer(event);
+       task_clock_event_update(event, event->ctx->time);
+}
+
+static int task_clock_event_add(struct perf_event *event, int flags)
+{
+       if (flags & PERF_EF_START)
+               task_clock_event_start(event, flags);
+
+       return 0;
+}
+
+static void task_clock_event_del(struct perf_event *event, int flags)
+{
+       task_clock_event_stop(event, PERF_EF_UPDATE);
+}
+
+static void task_clock_event_read(struct perf_event *event)
+{
+       u64 time;
+
+       if (!in_nmi()) {
+               update_context_time(event->ctx);
+               time = event->ctx->time;
+       } else {
+               u64 now = perf_clock();
+               u64 delta = now - event->ctx->timestamp;
+               time = event->ctx->time + delta;
+       }
+
+       task_clock_event_update(event, time);
+}
+
+static int task_clock_event_init(struct perf_event *event)
+{
+       if (event->attr.type != PERF_TYPE_SOFTWARE)
+               return -ENOENT;
+
+       if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
+               return -ENOENT;
+
+       return 0;
+}
+
+static struct pmu perf_task_clock = {
+       .task_ctx_nr    = perf_sw_context,
+
+       .event_init     = task_clock_event_init,
+       .add            = task_clock_event_add,
+       .del            = task_clock_event_del,
+       .start          = task_clock_event_start,
+       .stop           = task_clock_event_stop,
+       .read           = task_clock_event_read,
+};
+
+static void perf_pmu_nop_void(struct pmu *pmu)
+{
+}
+
+static int perf_pmu_nop_int(struct pmu *pmu)
+{
+       return 0;
+}
+
+static void perf_pmu_start_txn(struct pmu *pmu)
+{
+       perf_pmu_disable(pmu);
+}
+
+static int perf_pmu_commit_txn(struct pmu *pmu)
+{
+       perf_pmu_enable(pmu);
+       return 0;
+}
+
+static void perf_pmu_cancel_txn(struct pmu *pmu)
+{
+       perf_pmu_enable(pmu);
+}
+
+/*
+ * Ensures all contexts with the same task_ctx_nr have the same
+ * pmu_cpu_context too.
+ */
+static void *find_pmu_context(int ctxn)
+{
+       struct pmu *pmu;
+
+       if (ctxn < 0)
+               return NULL;
+
+       list_for_each_entry(pmu, &pmus, entry) {
+               if (pmu->task_ctx_nr == ctxn)
+                       return pmu->pmu_cpu_context;
+       }
+
+       return NULL;
+}
+
+static void free_pmu_context(void * __percpu cpu_context)
+{
+       struct pmu *pmu;
+
+       mutex_lock(&pmus_lock);
        /*
-        * Software events (currently) can't in general distinguish
-        * between user, kernel and hypervisor events.
-        * However, context switches and cpu migrations are considered
-        * to be kernel events, and page faults are never hypervisor
-        * events.
+        * Like a real lame refcount.
         */
-       switch (event_id) {
-       case PERF_COUNT_SW_CPU_CLOCK:
-               pmu = &perf_ops_cpu_clock;
+       list_for_each_entry(pmu, &pmus, entry) {
+               if (pmu->pmu_cpu_context == cpu_context)
+                       goto out;
+       }
 
-               break;
-       case PERF_COUNT_SW_TASK_CLOCK:
-               /*
-                * If the user instantiates this as a per-cpu event,
-                * use the cpu_clock event instead.
-                */
-               if (event->ctx->task)
-                       pmu = &perf_ops_task_clock;
-               else
-                       pmu = &perf_ops_cpu_clock;
+       free_percpu(cpu_context);
+out:
+       mutex_unlock(&pmus_lock);
+}
 
-               break;
-       case PERF_COUNT_SW_PAGE_FAULTS:
-       case PERF_COUNT_SW_PAGE_FAULTS_MIN:
-       case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
-       case PERF_COUNT_SW_CONTEXT_SWITCHES:
-       case PERF_COUNT_SW_CPU_MIGRATIONS:
-       case PERF_COUNT_SW_ALIGNMENT_FAULTS:
-       case PERF_COUNT_SW_EMULATION_FAULTS:
-               if (!event->parent) {
-                       int err;
-
-                       err = swevent_hlist_get(event);
-                       if (err)
-                               return ERR_PTR(err);
+int perf_pmu_register(struct pmu *pmu)
+{
+       int cpu, ret;
+
+       mutex_lock(&pmus_lock);
+       ret = -ENOMEM;
+       pmu->pmu_disable_count = alloc_percpu(int);
+       if (!pmu->pmu_disable_count)
+               goto unlock;
 
-                       atomic_inc(&perf_swevent_enabled[event_id]);
-                       event->destroy = sw_perf_event_destroy;
+       pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
+       if (pmu->pmu_cpu_context)
+               goto got_cpu_context;
+
+       pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
+       if (!pmu->pmu_cpu_context)
+               goto free_pdc;
+
+       for_each_possible_cpu(cpu) {
+               struct perf_cpu_context *cpuctx;
+
+               cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+               __perf_event_init_context(&cpuctx->ctx);
+               cpuctx->ctx.type = cpu_context;
+               cpuctx->ctx.pmu = pmu;
+               cpuctx->jiffies_interval = 1;
+               INIT_LIST_HEAD(&cpuctx->rotation_list);
+       }
+
+got_cpu_context:
+       if (!pmu->start_txn) {
+               if (pmu->pmu_enable) {
+                       /*
+                        * If we have pmu_enable/pmu_disable calls, install
+                        * transaction stubs that use that to try and batch
+                        * hardware accesses.
+                        */
+                       pmu->start_txn  = perf_pmu_start_txn;
+                       pmu->commit_txn = perf_pmu_commit_txn;
+                       pmu->cancel_txn = perf_pmu_cancel_txn;
+               } else {
+                       pmu->start_txn  = perf_pmu_nop_void;
+                       pmu->commit_txn = perf_pmu_nop_int;
+                       pmu->cancel_txn = perf_pmu_nop_void;
                }
-               pmu = &perf_ops_generic;
-               break;
        }
 
+       if (!pmu->pmu_enable) {
+               pmu->pmu_enable  = perf_pmu_nop_void;
+               pmu->pmu_disable = perf_pmu_nop_void;
+       }
+
+       list_add_rcu(&pmu->entry, &pmus);
+       ret = 0;
+unlock:
+       mutex_unlock(&pmus_lock);
+
+       return ret;
+
+free_pdc:
+       free_percpu(pmu->pmu_disable_count);
+       goto unlock;
+}
+
+void perf_pmu_unregister(struct pmu *pmu)
+{
+       mutex_lock(&pmus_lock);
+       list_del_rcu(&pmu->entry);
+       mutex_unlock(&pmus_lock);
+
+       /*
+        * We dereference the pmu list under both SRCU and regular RCU, so
+        * synchronize against both of those.
+        */
+       synchronize_srcu(&pmus_srcu);
+       synchronize_rcu();
+
+       free_percpu(pmu->pmu_disable_count);
+       free_pmu_context(pmu->pmu_cpu_context);
+}
+
+struct pmu *perf_init_event(struct perf_event *event)
+{
+       struct pmu *pmu = NULL;
+       int idx;
+
+       idx = srcu_read_lock(&pmus_srcu);
+       list_for_each_entry_rcu(pmu, &pmus, entry) {
+               int ret = pmu->event_init(event);
+               if (!ret)
+                       goto unlock;
+
+               if (ret != -ENOENT) {
+                       pmu = ERR_PTR(ret);
+                       goto unlock;
+               }
+       }
+       pmu = ERR_PTR(-ENOENT);
+unlock:
+       srcu_read_unlock(&pmus_srcu, idx);
+
        return pmu;
 }
 
@@ -4810,20 +5245,18 @@ static const struct pmu *sw_perf_event_init(struct perf_event *event)
  * Allocate and initialize a event structure
  */
 static struct perf_event *
-perf_event_alloc(struct perf_event_attr *attr,
-                  int cpu,
-                  struct perf_event_context *ctx,
-                  struct perf_event *group_leader,
-                  struct perf_event *parent_event,
-                  perf_overflow_handler_t overflow_handler,
-                  gfp_t gfpflags)
-{
-       const struct pmu *pmu;
+perf_event_alloc(struct perf_event_attr *attr, int cpu,
+                struct task_struct *task,
+                struct perf_event *group_leader,
+                struct perf_event *parent_event,
+                perf_overflow_handler_t overflow_handler)
+{
+       struct pmu *pmu;
        struct perf_event *event;
        struct hw_perf_event *hwc;
        long err;
 
-       event = kzalloc(sizeof(*event), gfpflags);
+       event = kzalloc(sizeof(*event), GFP_KERNEL);
        if (!event)
                return ERR_PTR(-ENOMEM);
 
@@ -4841,6 +5274,7 @@ perf_event_alloc(struct perf_event_attr *attr,
        INIT_LIST_HEAD(&event->event_entry);
        INIT_LIST_HEAD(&event->sibling_list);
        init_waitqueue_head(&event->waitq);
+       init_irq_work(&event->pending, perf_pending_event);
 
        mutex_init(&event->mmap_mutex);
 
@@ -4848,7 +5282,6 @@ perf_event_alloc(struct perf_event_attr *attr,
        event->attr             = *attr;
        event->group_leader     = group_leader;
        event->pmu              = NULL;
-       event->ctx              = ctx;
        event->oncpu            = -1;
 
        event->parent           = parent_event;
@@ -4858,6 +5291,17 @@ perf_event_alloc(struct perf_event_attr *attr,
 
        event->state            = PERF_EVENT_STATE_INACTIVE;
 
+       if (task) {
+               event->attach_state = PERF_ATTACH_TASK;
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+               /*
+                * hw_breakpoint is a bit difficult here..
+                */
+               if (attr->type == PERF_TYPE_BREAKPOINT)
+                       event->hw.bp_target = task;
+#endif
+       }
+
        if (!overflow_handler && parent_event)
                overflow_handler = parent_event->overflow_handler;
        
@@ -4882,29 +5326,8 @@ perf_event_alloc(struct perf_event_attr *attr,
        if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
                goto done;
 
-       switch (attr->type) {
-       case PERF_TYPE_RAW:
-       case PERF_TYPE_HARDWARE:
-       case PERF_TYPE_HW_CACHE:
-               pmu = hw_perf_event_init(event);
-               break;
-
-       case PERF_TYPE_SOFTWARE:
-               pmu = sw_perf_event_init(event);
-               break;
-
-       case PERF_TYPE_TRACEPOINT:
-               pmu = tp_perf_event_init(event);
-               break;
-
-       case PERF_TYPE_BREAKPOINT:
-               pmu = bp_perf_event_init(event);
-               break;
-
+       pmu = perf_init_event(event);
 
-       default:
-               break;
-       }
 done:
        err = 0;
        if (!pmu)
@@ -4922,13 +5345,21 @@ done:
        event->pmu = pmu;
 
        if (!event->parent) {
-               atomic_inc(&nr_events);
+               if (event->attach_state & PERF_ATTACH_TASK)
+                       jump_label_inc(&perf_task_events);
                if (event->attr.mmap || event->attr.mmap_data)
                        atomic_inc(&nr_mmap_events);
                if (event->attr.comm)
                        atomic_inc(&nr_comm_events);
                if (event->attr.task)
                        atomic_inc(&nr_task_events);
+               if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
+                       err = get_callchain_buffers();
+                       if (err) {
+                               free_event(event);
+                               return ERR_PTR(err);
+                       }
+               }
        }
 
        return event;
@@ -5076,12 +5507,16 @@ SYSCALL_DEFINE5(perf_event_open,
                struct perf_event_attr __user *, attr_uptr,
                pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
 {
-       struct perf_event *event, *group_leader = NULL, *output_event = NULL;
+       struct perf_event *group_leader = NULL, *output_event = NULL;
+       struct perf_event *event, *sibling;
        struct perf_event_attr attr;
        struct perf_event_context *ctx;
        struct file *event_file = NULL;
        struct file *group_file = NULL;
+       struct task_struct *task = NULL;
+       struct pmu *pmu;
        int event_fd;
+       int move_group = 0;
        int fput_needed = 0;
        int err;
 
@@ -5107,20 +5542,11 @@ SYSCALL_DEFINE5(perf_event_open,
        if (event_fd < 0)
                return event_fd;
 
-       /*
-        * Get the target context (task or percpu):
-        */
-       ctx = find_get_context(pid, cpu);
-       if (IS_ERR(ctx)) {
-               err = PTR_ERR(ctx);
-               goto err_fd;
-       }
-
        if (group_fd != -1) {
                group_leader = perf_fget_light(group_fd, &fput_needed);
                if (IS_ERR(group_leader)) {
                        err = PTR_ERR(group_leader);
-                       goto err_put_context;
+                       goto err_fd;
                }
                group_file = group_leader->filp;
                if (flags & PERF_FLAG_FD_OUTPUT)
@@ -5129,6 +5555,58 @@ SYSCALL_DEFINE5(perf_event_open,
                        group_leader = NULL;
        }
 
+       if (pid != -1) {
+               task = find_lively_task_by_vpid(pid);
+               if (IS_ERR(task)) {
+                       err = PTR_ERR(task);
+                       goto err_group_fd;
+               }
+       }
+
+       event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, NULL);
+       if (IS_ERR(event)) {
+               err = PTR_ERR(event);
+               goto err_task;
+       }
+
+       /*
+        * Special case software events and allow them to be part of
+        * any hardware group.
+        */
+       pmu = event->pmu;
+
+       if (group_leader &&
+           (is_software_event(event) != is_software_event(group_leader))) {
+               if (is_software_event(event)) {
+                       /*
+                        * If event and group_leader are not both a software
+                        * event, and event is, then group leader is not.
+                        *
+                        * Allow the addition of software events to !software
+                        * groups, this is safe because software events never
+                        * fail to schedule.
+                        */
+                       pmu = group_leader->pmu;
+               } else if (is_software_event(group_leader) &&
+                          (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
+                       /*
+                        * In case the group is a pure software group, and we
+                        * try to add a hardware event, move the whole group to
+                        * the hardware context.
+                        */
+                       move_group = 1;
+               }
+       }
+
+       /*
+        * Get the target context (task or percpu):
+        */
+       ctx = find_get_context(pmu, task, cpu);
+       if (IS_ERR(ctx)) {
+               err = PTR_ERR(ctx);
+               goto err_alloc;
+       }
+
        /*
         * Look up the group leader (we will attach this event to it):
         */
@@ -5140,42 +5618,66 @@ SYSCALL_DEFINE5(perf_event_open,
                 * becoming part of another group-sibling):
                 */
                if (group_leader->group_leader != group_leader)
-                       goto err_put_context;
+                       goto err_context;
                /*
                 * Do not allow to attach to a group in a different
                 * task or CPU context:
                 */
-               if (group_leader->ctx != ctx)
-                       goto err_put_context;
+               if (move_group) {
+                       if (group_leader->ctx->type != ctx->type)
+                               goto err_context;
+               } else {
+                       if (group_leader->ctx != ctx)
+                               goto err_context;
+               }
+
                /*
                 * Only a group leader can be exclusive or pinned
                 */
                if (attr.exclusive || attr.pinned)
-                       goto err_put_context;
-       }
-
-       event = perf_event_alloc(&attr, cpu, ctx, group_leader,
-                                    NULL, NULL, GFP_KERNEL);
-       if (IS_ERR(event)) {
-               err = PTR_ERR(event);
-               goto err_put_context;
+                       goto err_context;
        }
 
        if (output_event) {
                err = perf_event_set_output(event, output_event);
                if (err)
-                       goto err_free_put_context;
+                       goto err_context;
        }
 
        event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
        if (IS_ERR(event_file)) {
                err = PTR_ERR(event_file);
-               goto err_free_put_context;
+               goto err_context;
+       }
+
+       if (move_group) {
+               struct perf_event_context *gctx = group_leader->ctx;
+
+               mutex_lock(&gctx->mutex);
+               perf_event_remove_from_context(group_leader);
+               list_for_each_entry(sibling, &group_leader->sibling_list,
+                                   group_entry) {
+                       perf_event_remove_from_context(sibling);
+                       put_ctx(gctx);
+               }
+               mutex_unlock(&gctx->mutex);
+               put_ctx(gctx);
        }
 
        event->filp = event_file;
        WARN_ON_ONCE(ctx->parent_ctx);
        mutex_lock(&ctx->mutex);
+
+       if (move_group) {
+               perf_install_in_context(ctx, group_leader, cpu);
+               get_ctx(ctx);
+               list_for_each_entry(sibling, &group_leader->sibling_list,
+                                   group_entry) {
+                       perf_install_in_context(ctx, sibling, cpu);
+                       get_ctx(ctx);
+               }
+       }
+
        perf_install_in_context(ctx, event, cpu);
        ++ctx->generation;
        mutex_unlock(&ctx->mutex);
@@ -5196,11 +5698,15 @@ SYSCALL_DEFINE5(perf_event_open,
        fd_install(event_fd, event_file);
        return event_fd;
 
-err_free_put_context:
+err_context:
+       put_ctx(ctx);
+err_alloc:
        free_event(event);
-err_put_context:
+err_task:
+       if (task)
+               put_task_struct(task);
+err_group_fd:
        fput_light(group_file, fput_needed);
-       put_ctx(ctx);
 err_fd:
        put_unused_fd(event_fd);
        return err;
@@ -5211,154 +5717,54 @@ err_fd:
  *
  * @attr: attributes of the counter to create
  * @cpu: cpu in which the counter is bound
- * @pid: task to profile
+ * @task: task to profile (NULL for percpu)
  */
 struct perf_event *
 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
-                                pid_t pid,
+                                struct task_struct *task,
                                 perf_overflow_handler_t overflow_handler)
 {
-       struct perf_event *event;
        struct perf_event_context *ctx;
-       int err;
-
-       /*
-        * Get the target context (task or percpu):
-        */
-
-       ctx = find_get_context(pid, cpu);
-       if (IS_ERR(ctx)) {
-               err = PTR_ERR(ctx);
-               goto err_exit;
-       }
-
-       event = perf_event_alloc(attr, cpu, ctx, NULL,
-                                NULL, overflow_handler, GFP_KERNEL);
-       if (IS_ERR(event)) {
-               err = PTR_ERR(event);
-               goto err_put_context;
-       }
-
-       event->filp = NULL;
-       WARN_ON_ONCE(ctx->parent_ctx);
-       mutex_lock(&ctx->mutex);
-       perf_install_in_context(ctx, event, cpu);
-       ++ctx->generation;
-       mutex_unlock(&ctx->mutex);
-
-       event->owner = current;
-       get_task_struct(current);
-       mutex_lock(&current->perf_event_mutex);
-       list_add_tail(&event->owner_entry, &current->perf_event_list);
-       mutex_unlock(&current->perf_event_mutex);
-
-       return event;
-
- err_put_context:
-       put_ctx(ctx);
- err_exit:
-       return ERR_PTR(err);
-}
-EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
-
-/*
- * inherit a event from parent task to child task:
- */
-static struct perf_event *
-inherit_event(struct perf_event *parent_event,
-             struct task_struct *parent,
-             struct perf_event_context *parent_ctx,
-             struct task_struct *child,
-             struct perf_event *group_leader,
-             struct perf_event_context *child_ctx)
-{
-       struct perf_event *child_event;
-
-       /*
-        * Instead of creating recursive hierarchies of events,
-        * we link inherited events back to the original parent,
-        * which has a filp for sure, which we use as the reference
-        * count:
-        */
-       if (parent_event->parent)
-               parent_event = parent_event->parent;
-
-       child_event = perf_event_alloc(&parent_event->attr,
-                                          parent_event->cpu, child_ctx,
-                                          group_leader, parent_event,
-                                          NULL, GFP_KERNEL);
-       if (IS_ERR(child_event))
-               return child_event;
-       get_ctx(child_ctx);
-
-       /*
-        * Make the child state follow the state of the parent event,
-        * not its attr.disabled bit.  We hold the parent's mutex,
-        * so we won't race with perf_event_{en, dis}able_family.
-        */
-       if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
-               child_event->state = PERF_EVENT_STATE_INACTIVE;
-       else
-               child_event->state = PERF_EVENT_STATE_OFF;
-
-       if (parent_event->attr.freq) {
-               u64 sample_period = parent_event->hw.sample_period;
-               struct hw_perf_event *hwc = &child_event->hw;
-
-               hwc->sample_period = sample_period;
-               hwc->last_period   = sample_period;
-
-               local64_set(&hwc->period_left, sample_period);
-       }
-
-       child_event->overflow_handler = parent_event->overflow_handler;
-
-       /*
-        * Link it up in the child's context:
-        */
-       add_event_to_ctx(child_event, child_ctx);
-
-       /*
-        * Get a reference to the parent filp - we will fput it
-        * when the child event exits. This is safe to do because
-        * we are in the parent and we know that the filp still
-        * exists and has a nonzero count:
-        */
-       atomic_long_inc(&parent_event->filp->f_count);
-
-       /*
-        * Link this into the parent event's child list
-        */
-       WARN_ON_ONCE(parent_event->ctx->parent_ctx);
-       mutex_lock(&parent_event->child_mutex);
-       list_add_tail(&child_event->child_list, &parent_event->child_list);
-       mutex_unlock(&parent_event->child_mutex);
+       struct perf_event *event;
+       int err;
 
-       return child_event;
-}
+       /*
+        * Get the target context (task or percpu):
+        */
 
-static int inherit_group(struct perf_event *parent_event,
-             struct task_struct *parent,
-             struct perf_event_context *parent_ctx,
-             struct task_struct *child,
-             struct perf_event_context *child_ctx)
-{
-       struct perf_event *leader;
-       struct perf_event *sub;
-       struct perf_event *child_ctr;
+       event = perf_event_alloc(attr, cpu, task, NULL, NULL, overflow_handler);
+       if (IS_ERR(event)) {
+               err = PTR_ERR(event);
+               goto err;
+       }
 
-       leader = inherit_event(parent_event, parent, parent_ctx,
-                                child, NULL, child_ctx);
-       if (IS_ERR(leader))
-               return PTR_ERR(leader);
-       list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
-               child_ctr = inherit_event(sub, parent, parent_ctx,
-                                           child, leader, child_ctx);
-               if (IS_ERR(child_ctr))
-                       return PTR_ERR(child_ctr);
+       ctx = find_get_context(event->pmu, task, cpu);
+       if (IS_ERR(ctx)) {
+               err = PTR_ERR(ctx);
+               goto err_free;
        }
-       return 0;
+
+       event->filp = NULL;
+       WARN_ON_ONCE(ctx->parent_ctx);
+       mutex_lock(&ctx->mutex);
+       perf_install_in_context(ctx, event, cpu);
+       ++ctx->generation;
+       mutex_unlock(&ctx->mutex);
+
+       event->owner = current;
+       get_task_struct(current);
+       mutex_lock(&current->perf_event_mutex);
+       list_add_tail(&event->owner_entry, &current->perf_event_list);
+       mutex_unlock(&current->perf_event_mutex);
+
+       return event;
+
+err_free:
+       free_event(event);
+err:
+       return ERR_PTR(err);
 }
+EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
 
 static void sync_child_event(struct perf_event *child_event,
                               struct task_struct *child)
@@ -5416,16 +5822,13 @@ __perf_event_exit_task(struct perf_event *child_event,
        }
 }
 
-/*
- * When a child task exits, feed back event values to parent events.
- */
-void perf_event_exit_task(struct task_struct *child)
+static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
 {
        struct perf_event *child_event, *tmp;
        struct perf_event_context *child_ctx;
        unsigned long flags;
 
-       if (likely(!child->perf_event_ctxp)) {
+       if (likely(!child->perf_event_ctxp[ctxn])) {
                perf_event_task(child, NULL, 0);
                return;
        }
@@ -5437,8 +5840,8 @@ void perf_event_exit_task(struct task_struct *child)
         * scheduled, so we are now safe from rescheduling changing
         * our context.
         */
-       child_ctx = child->perf_event_ctxp;
-       __perf_event_task_sched_out(child_ctx);
+       child_ctx = child->perf_event_ctxp[ctxn];
+       task_ctx_sched_out(child_ctx, EVENT_ALL);
 
        /*
         * Take the context lock here so that if find_get_context is
@@ -5446,7 +5849,7 @@ void perf_event_exit_task(struct task_struct *child)
         * incremented the context's refcount before we do put_ctx below.
         */
        raw_spin_lock(&child_ctx->lock);
-       child->perf_event_ctxp = NULL;
+       child->perf_event_ctxp[ctxn] = NULL;
        /*
         * If this context is a clone; unclone it so it can't get
         * swapped to another process while we're removing all
@@ -5499,6 +5902,17 @@ again:
        put_ctx(child_ctx);
 }
 
+/*
+ * When a child task exits, feed back event values to parent events.
+ */
+void perf_event_exit_task(struct task_struct *child)
+{
+       int ctxn;
+
+       for_each_task_context_nr(ctxn)
+               perf_event_exit_task_context(child, ctxn);
+}
+
 static void perf_free_event(struct perf_event *event,
                            struct perf_event_context *ctx)
 {
@@ -5520,48 +5934,166 @@ static void perf_free_event(struct perf_event *event,
 
 /*
  * free an unexposed, unused context as created by inheritance by
- * init_task below, used by fork() in case of fail.
+ * perf_event_init_task below, used by fork() in case of fail.
  */
 void perf_event_free_task(struct task_struct *task)
 {
-       struct perf_event_context *ctx = task->perf_event_ctxp;
+       struct perf_event_context *ctx;
        struct perf_event *event, *tmp;
+       int ctxn;
 
-       if (!ctx)
-               return;
+       for_each_task_context_nr(ctxn) {
+               ctx = task->perf_event_ctxp[ctxn];
+               if (!ctx)
+                       continue;
 
-       mutex_lock(&ctx->mutex);
+               mutex_lock(&ctx->mutex);
 again:
-       list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
-               perf_free_event(event, ctx);
+               list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
+                               group_entry)
+                       perf_free_event(event, ctx);
 
-       list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
-                                group_entry)
-               perf_free_event(event, ctx);
+               list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
+                               group_entry)
+                       perf_free_event(event, ctx);
 
-       if (!list_empty(&ctx->pinned_groups) ||
-           !list_empty(&ctx->flexible_groups))
-               goto again;
+               if (!list_empty(&ctx->pinned_groups) ||
+                               !list_empty(&ctx->flexible_groups))
+                       goto again;
 
-       mutex_unlock(&ctx->mutex);
+               mutex_unlock(&ctx->mutex);
 
-       put_ctx(ctx);
+               put_ctx(ctx);
+       }
+}
+
+void perf_event_delayed_put(struct task_struct *task)
+{
+       int ctxn;
+
+       for_each_task_context_nr(ctxn)
+               WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
+}
+
+/*
+ * inherit a event from parent task to child task:
+ */
+static struct perf_event *
+inherit_event(struct perf_event *parent_event,
+             struct task_struct *parent,
+             struct perf_event_context *parent_ctx,
+             struct task_struct *child,
+             struct perf_event *group_leader,
+             struct perf_event_context *child_ctx)
+{
+       struct perf_event *child_event;
+       unsigned long flags;
+
+       /*
+        * Instead of creating recursive hierarchies of events,
+        * we link inherited events back to the original parent,
+        * which has a filp for sure, which we use as the reference
+        * count:
+        */
+       if (parent_event->parent)
+               parent_event = parent_event->parent;
+
+       child_event = perf_event_alloc(&parent_event->attr,
+                                          parent_event->cpu,
+                                          child,
+                                          group_leader, parent_event,
+                                          NULL);
+       if (IS_ERR(child_event))
+               return child_event;
+       get_ctx(child_ctx);
+
+       /*
+        * Make the child state follow the state of the parent event,
+        * not its attr.disabled bit.  We hold the parent's mutex,
+        * so we won't race with perf_event_{en, dis}able_family.
+        */
+       if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
+               child_event->state = PERF_EVENT_STATE_INACTIVE;
+       else
+               child_event->state = PERF_EVENT_STATE_OFF;
+
+       if (parent_event->attr.freq) {
+               u64 sample_period = parent_event->hw.sample_period;
+               struct hw_perf_event *hwc = &child_event->hw;
+
+               hwc->sample_period = sample_period;
+               hwc->last_period   = sample_period;
+
+               local64_set(&hwc->period_left, sample_period);
+       }
+
+       child_event->ctx = child_ctx;
+       child_event->overflow_handler = parent_event->overflow_handler;
+
+       /*
+        * Link it up in the child's context:
+        */
+       raw_spin_lock_irqsave(&child_ctx->lock, flags);
+       add_event_to_ctx(child_event, child_ctx);
+       raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
+
+       /*
+        * Get a reference to the parent filp - we will fput it
+        * when the child event exits. This is safe to do because
+        * we are in the parent and we know that the filp still
+        * exists and has a nonzero count:
+        */
+       atomic_long_inc(&parent_event->filp->f_count);
+
+       /*
+        * Link this into the parent event's child list
+        */
+       WARN_ON_ONCE(parent_event->ctx->parent_ctx);
+       mutex_lock(&parent_event->child_mutex);
+       list_add_tail(&child_event->child_list, &parent_event->child_list);
+       mutex_unlock(&parent_event->child_mutex);
+
+       return child_event;
+}
+
+static int inherit_group(struct perf_event *parent_event,
+             struct task_struct *parent,
+             struct perf_event_context *parent_ctx,
+             struct task_struct *child,
+             struct perf_event_context *child_ctx)
+{
+       struct perf_event *leader;
+       struct perf_event *sub;
+       struct perf_event *child_ctr;
+
+       leader = inherit_event(parent_event, parent, parent_ctx,
+                                child, NULL, child_ctx);
+       if (IS_ERR(leader))
+               return PTR_ERR(leader);
+       list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
+               child_ctr = inherit_event(sub, parent, parent_ctx,
+                                           child, leader, child_ctx);
+               if (IS_ERR(child_ctr))
+                       return PTR_ERR(child_ctr);
+       }
+       return 0;
 }
 
 static int
 inherit_task_group(struct perf_event *event, struct task_struct *parent,
                   struct perf_event_context *parent_ctx,
-                  struct task_struct *child,
+                  struct task_struct *child, int ctxn,
                   int *inherited_all)
 {
        int ret;
-       struct perf_event_context *child_ctx = child->perf_event_ctxp;
+       struct perf_event_context *child_ctx;
 
        if (!event->attr.inherit) {
                *inherited_all = 0;
                return 0;
        }
 
+               child_ctx = child->perf_event_ctxp[ctxn];
        if (!child_ctx) {
                /*
                 * This is executed from the parent task context, so
@@ -5570,14 +6102,11 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent,
                 * child.
                 */
 
-               child_ctx = kzalloc(sizeof(struct perf_event_context),
-                                   GFP_KERNEL);
+               child_ctx = alloc_perf_context(event->pmu, child);
                if (!child_ctx)
                        return -ENOMEM;
 
-               __perf_event_init_context(child_ctx, child);
-               child->perf_event_ctxp = child_ctx;
-               get_task_struct(child);
+               child->perf_event_ctxp[ctxn] = child_ctx;
        }
 
        ret = inherit_group(event, parent, parent_ctx,
@@ -5589,11 +6118,10 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent,
        return ret;
 }
 
-
 /*
  * Initialize the perf_event context in task_struct
  */
-int perf_event_init_task(struct task_struct *child)
+int perf_event_init_context(struct task_struct *child, int ctxn)
 {
        struct perf_event_context *child_ctx, *parent_ctx;
        struct perf_event_context *cloned_ctx;
@@ -5602,19 +6130,19 @@ int perf_event_init_task(struct task_struct *child)
        int inherited_all = 1;
        int ret = 0;
 
-       child->perf_event_ctxp = NULL;
+       child->perf_event_ctxp[ctxn] = NULL;
 
        mutex_init(&child->perf_event_mutex);
        INIT_LIST_HEAD(&child->perf_event_list);
 
-       if (likely(!parent->perf_event_ctxp))
+       if (likely(!parent->perf_event_ctxp[ctxn]))
                return 0;
 
        /*
         * If the parent's context is a clone, pin it so it won't get
         * swapped under us.
         */
-       parent_ctx = perf_pin_task_context(parent);
+       parent_ctx = perf_pin_task_context(parent, ctxn);
 
        /*
         * No need to check if parent_ctx != NULL here; since we saw
@@ -5634,20 +6162,20 @@ int perf_event_init_task(struct task_struct *child)
         * the list, not manipulating it:
         */
        list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
-               ret = inherit_task_group(event, parent, parent_ctx, child,
-                                        &inherited_all);
+               ret = inherit_task_group(event, parent, parent_ctx,
+                                        child, ctxn, &inherited_all);
                if (ret)
                        break;
        }
 
        list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
-               ret = inherit_task_group(event, parent, parent_ctx, child,
-                                        &inherited_all);
+               ret = inherit_task_group(event, parent, parent_ctx,
+                                        child, ctxn, &inherited_all);
                if (ret)
                        break;
        }
 
-       child_ctx = child->perf_event_ctxp;
+       child_ctx = child->perf_event_ctxp[ctxn];
 
        if (child_ctx && inherited_all) {
                /*
@@ -5676,63 +6204,98 @@ int perf_event_init_task(struct task_struct *child)
        return ret;
 }
 
+/*
+ * Initialize the perf_event context in task_struct
+ */
+int perf_event_init_task(struct task_struct *child)
+{
+       int ctxn, ret;
+
+       for_each_task_context_nr(ctxn) {
+               ret = perf_event_init_context(child, ctxn);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
 static void __init perf_event_init_all_cpus(void)
 {
+       struct swevent_htable *swhash;
        int cpu;
-       struct perf_cpu_context *cpuctx;
 
        for_each_possible_cpu(cpu) {
-               cpuctx = &per_cpu(perf_cpu_context, cpu);
-               mutex_init(&cpuctx->hlist_mutex);
-               __perf_event_init_context(&cpuctx->ctx, NULL);
+               swhash = &per_cpu(swevent_htable, cpu);
+               mutex_init(&swhash->hlist_mutex);
+               INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
        }
 }
 
 static void __cpuinit perf_event_init_cpu(int cpu)
 {
-       struct perf_cpu_context *cpuctx;
-
-       cpuctx = &per_cpu(perf_cpu_context, cpu);
+       struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
 
-       spin_lock(&perf_resource_lock);
-       cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
-       spin_unlock(&perf_resource_lock);
-
-       mutex_lock(&cpuctx->hlist_mutex);
-       if (cpuctx->hlist_refcount > 0) {
+       mutex_lock(&swhash->hlist_mutex);
+       if (swhash->hlist_refcount > 0) {
                struct swevent_hlist *hlist;
 
-               hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
-               WARN_ON_ONCE(!hlist);
-               rcu_assign_pointer(cpuctx->swevent_hlist, hlist);
+               hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
+               WARN_ON(!hlist);
+               rcu_assign_pointer(swhash->swevent_hlist, hlist);
        }
-       mutex_unlock(&cpuctx->hlist_mutex);
+       mutex_unlock(&swhash->hlist_mutex);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-static void __perf_event_exit_cpu(void *info)
+static void perf_pmu_rotate_stop(struct pmu *pmu)
 {
-       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
-       struct perf_event_context *ctx = &cpuctx->ctx;
+       struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
+
+       WARN_ON(!irqs_disabled());
+
+       list_del_init(&cpuctx->rotation_list);
+}
+
+static void __perf_event_exit_context(void *__info)
+{
+       struct perf_event_context *ctx = __info;
        struct perf_event *event, *tmp;
 
+       perf_pmu_rotate_stop(ctx->pmu);
+
        list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
                __perf_event_remove_from_context(event);
        list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
                __perf_event_remove_from_context(event);
 }
+
+static void perf_event_exit_cpu_context(int cpu)
+{
+       struct perf_event_context *ctx;
+       struct pmu *pmu;
+       int idx;
+
+       idx = srcu_read_lock(&pmus_srcu);
+       list_for_each_entry_rcu(pmu, &pmus, entry) {
+               ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
+
+               mutex_lock(&ctx->mutex);
+               smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
+               mutex_unlock(&ctx->mutex);
+       }
+       srcu_read_unlock(&pmus_srcu, idx);
+}
+
 static void perf_event_exit_cpu(int cpu)
 {
-       struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
-       struct perf_event_context *ctx = &cpuctx->ctx;
+       struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
 
-       mutex_lock(&cpuctx->hlist_mutex);
-       swevent_hlist_release(cpuctx);
-       mutex_unlock(&cpuctx->hlist_mutex);
+       mutex_lock(&swhash->hlist_mutex);
+       swevent_hlist_release(swhash);
+       mutex_unlock(&swhash->hlist_mutex);
 
-       mutex_lock(&ctx->mutex);
-       smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1);
-       mutex_unlock(&ctx->mutex);
+       perf_event_exit_cpu_context(cpu);
 }
 #else
 static inline void perf_event_exit_cpu(int cpu) { }
@@ -5743,15 +6306,15 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
 {
        unsigned int cpu = (long)hcpu;
 
-       switch (action) {
+       switch (action & ~CPU_TASKS_FROZEN) {
 
        case CPU_UP_PREPARE:
-       case CPU_UP_PREPARE_FROZEN:
+       case CPU_DOWN_FAILED:
                perf_event_init_cpu(cpu);
                break;
 
+       case CPU_UP_CANCELED:
        case CPU_DOWN_PREPARE:
-       case CPU_DOWN_PREPARE_FROZEN:
                perf_event_exit_cpu(cpu);
                break;
 
@@ -5762,118 +6325,13 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
        return NOTIFY_OK;
 }
 
-/*
- * This has to have a higher priority than migration_notifier in sched.c.
- */
-static struct notifier_block __cpuinitdata perf_cpu_nb = {
-       .notifier_call          = perf_cpu_notify,
-       .priority               = 20,
-};
-
 void __init perf_event_init(void)
 {
        perf_event_init_all_cpus();
-       perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
-                       (void *)(long)smp_processor_id());
-       perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
-                       (void *)(long)smp_processor_id());
-       register_cpu_notifier(&perf_cpu_nb);
-}
-
-static ssize_t perf_show_reserve_percpu(struct sysdev_class *class,
-                                       struct sysdev_class_attribute *attr,
-                                       char *buf)
-{
-       return sprintf(buf, "%d\n", perf_reserved_percpu);
-}
-
-static ssize_t
-perf_set_reserve_percpu(struct sysdev_class *class,
-                       struct sysdev_class_attribute *attr,
-                       const char *buf,
-                       size_t count)
-{
-       struct perf_cpu_context *cpuctx;
-       unsigned long val;
-       int err, cpu, mpt;
-
-       err = strict_strtoul(buf, 10, &val);
-       if (err)
-               return err;
-       if (val > perf_max_events)
-               return -EINVAL;
-
-       spin_lock(&perf_resource_lock);
-       perf_reserved_percpu = val;
-       for_each_online_cpu(cpu) {
-               cpuctx = &per_cpu(perf_cpu_context, cpu);
-               raw_spin_lock_irq(&cpuctx->ctx.lock);
-               mpt = min(perf_max_events - cpuctx->ctx.nr_events,
-                         perf_max_events - perf_reserved_percpu);
-               cpuctx->max_pertask = mpt;
-               raw_spin_unlock_irq(&cpuctx->ctx.lock);
-       }
-       spin_unlock(&perf_resource_lock);
-
-       return count;
-}
-
-static ssize_t perf_show_overcommit(struct sysdev_class *class,
-                                   struct sysdev_class_attribute *attr,
-                                   char *buf)
-{
-       return sprintf(buf, "%d\n", perf_overcommit);
-}
-
-static ssize_t
-perf_set_overcommit(struct sysdev_class *class,
-                   struct sysdev_class_attribute *attr,
-                   const char *buf, size_t count)
-{
-       unsigned long val;
-       int err;
-
-       err = strict_strtoul(buf, 10, &val);
-       if (err)
-               return err;
-       if (val > 1)
-               return -EINVAL;
-
-       spin_lock(&perf_resource_lock);
-       perf_overcommit = val;
-       spin_unlock(&perf_resource_lock);
-
-       return count;
-}
-
-static SYSDEV_CLASS_ATTR(
-                               reserve_percpu,
-                               0644,
-                               perf_show_reserve_percpu,
-                               perf_set_reserve_percpu
-                       );
-
-static SYSDEV_CLASS_ATTR(
-                               overcommit,
-                               0644,
-                               perf_show_overcommit,
-                               perf_set_overcommit
-                       );
-
-static struct attribute *perfclass_attrs[] = {
-       &attr_reserve_percpu.attr,
-       &attr_overcommit.attr,
-       NULL
-};
-
-static struct attribute_group perfclass_attr_group = {
-       .attrs                  = perfclass_attrs,
-       .name                   = "perf_events",
-};
-
-static int __init perf_event_sysfs_init(void)
-{
-       return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
-                                 &perfclass_attr_group);
+       init_srcu_struct(&pmus_srcu);
+       perf_pmu_register(&perf_swevent);
+       perf_pmu_register(&perf_cpu_clock);
+       perf_pmu_register(&perf_task_clock);
+       perf_tp_register();
+       perf_cpu_notifier(perf_cpu_notify);
 }
-device_initcall(perf_event_sysfs_init);
index d55c6fb8d087a24a2d462886dfe1fc53bf9deced..39b65b69584f5b0f373e360d6ecc3118751840a9 100644 (file)
@@ -401,7 +401,7 @@ struct task_struct *pid_task(struct pid *pid, enum pid_type type)
        struct task_struct *result = NULL;
        if (pid) {
                struct hlist_node *first;
-               first = rcu_dereference_check(pid->tasks[type].first,
+               first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
                                              rcu_read_lock_held() ||
                                              lockdep_tasklist_lock_is_held());
                if (first)
@@ -416,6 +416,7 @@ EXPORT_SYMBOL(pid_task);
  */
 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
 {
+       rcu_lockdep_assert(rcu_read_lock_held());
        return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
 }
 
index b7e4c362361bcf46fe34992e9bba1852dd478b71..645e541a45f6c9a9667c054bd7f5230ecaef67a3 100644 (file)
@@ -389,10 +389,12 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
        } else if (count == 11) { /* len('0x12345678/0') */
                if (copy_from_user(ascii_value, buf, 11))
                        return -EFAULT;
+               if (strlen(ascii_value) != 10)
+                       return -EINVAL;
                x = sscanf(ascii_value, "%x", &value);
                if (x != 1)
                        return -EINVAL;
-               pr_debug(KERN_ERR "%s, %d, 0x%x\n", ascii_value, x, value);
+               pr_debug("%s, %d, 0x%x\n", ascii_value, x, value);
        } else
                return -EINVAL;
 
index c77963938bca440a90423952d6d85cf4d66abd83..8dc31e02ae129e8f042804b67c38ab02f997d94c 100644 (file)
@@ -338,7 +338,6 @@ int hibernation_snapshot(int platform_mode)
                goto Close;
 
        suspend_console();
-       hibernation_freeze_swap();
        saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
        error = dpm_suspend_start(PMSG_FREEZE);
        if (error)
index 5e7edfb05e66cff0d2c99d5fc8fddfde03e372c3..d3f795f01bbce83741fb717e1ee1a05e1377c561 100644 (file)
@@ -1086,7 +1086,6 @@ void swsusp_free(void)
        buffer = NULL;
        alloc_normal = 0;
        alloc_highmem = 0;
-       hibernation_thaw_swap();
 }
 
 /* Helper functions used for the shrinking of memory. */
@@ -1122,9 +1121,19 @@ static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
        return nr_alloc;
 }
 
-static unsigned long preallocate_image_memory(unsigned long nr_pages)
+static unsigned long preallocate_image_memory(unsigned long nr_pages,
+                                             unsigned long avail_normal)
 {
-       return preallocate_image_pages(nr_pages, GFP_IMAGE);
+       unsigned long alloc;
+
+       if (avail_normal <= alloc_normal)
+               return 0;
+
+       alloc = avail_normal - alloc_normal;
+       if (nr_pages < alloc)
+               alloc = nr_pages;
+
+       return preallocate_image_pages(alloc, GFP_IMAGE);
 }
 
 #ifdef CONFIG_HIGHMEM
@@ -1170,15 +1179,22 @@ static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
  */
 static void free_unnecessary_pages(void)
 {
-       unsigned long save_highmem, to_free_normal, to_free_highmem;
+       unsigned long save, to_free_normal, to_free_highmem;
 
-       to_free_normal = alloc_normal - count_data_pages();
-       save_highmem = count_highmem_pages();
-       if (alloc_highmem > save_highmem) {
-               to_free_highmem = alloc_highmem - save_highmem;
+       save = count_data_pages();
+       if (alloc_normal >= save) {
+               to_free_normal = alloc_normal - save;
+               save = 0;
+       } else {
+               to_free_normal = 0;
+               save -= alloc_normal;
+       }
+       save += count_highmem_pages();
+       if (alloc_highmem >= save) {
+               to_free_highmem = alloc_highmem - save;
        } else {
                to_free_highmem = 0;
-               to_free_normal -= save_highmem - alloc_highmem;
+               to_free_normal -= save - alloc_highmem;
        }
 
        memory_bm_position_reset(&copy_bm);
@@ -1259,7 +1275,7 @@ int hibernate_preallocate_memory(void)
 {
        struct zone *zone;
        unsigned long saveable, size, max_size, count, highmem, pages = 0;
-       unsigned long alloc, save_highmem, pages_highmem;
+       unsigned long alloc, save_highmem, pages_highmem, avail_normal;
        struct timeval start, stop;
        int error;
 
@@ -1296,6 +1312,7 @@ int hibernate_preallocate_memory(void)
                else
                        count += zone_page_state(zone, NR_FREE_PAGES);
        }
+       avail_normal = count;
        count += highmem;
        count -= totalreserve_pages;
 
@@ -1310,12 +1327,21 @@ int hibernate_preallocate_memory(void)
         */
        if (size >= saveable) {
                pages = preallocate_image_highmem(save_highmem);
-               pages += preallocate_image_memory(saveable - pages);
+               pages += preallocate_image_memory(saveable - pages, avail_normal);
                goto out;
        }
 
        /* Estimate the minimum size of the image. */
        pages = minimum_image_size(saveable);
+       /*
+        * To avoid excessive pressure on the normal zone, leave room in it to
+        * accommodate an image of the minimum size (unless it's already too
+        * small, in which case don't preallocate pages from it at all).
+        */
+       if (avail_normal > pages)
+               avail_normal -= pages;
+       else
+               avail_normal = 0;
        if (size < pages)
                size = min_t(unsigned long, pages, max_size);
 
@@ -1336,16 +1362,34 @@ int hibernate_preallocate_memory(void)
         */
        pages_highmem = preallocate_image_highmem(highmem / 2);
        alloc = (count - max_size) - pages_highmem;
-       pages = preallocate_image_memory(alloc);
-       if (pages < alloc)
-               goto err_out;
-       size = max_size - size;
-       alloc = size;
-       size = preallocate_highmem_fraction(size, highmem, count);
-       pages_highmem += size;
-       alloc -= size;
-       pages += preallocate_image_memory(alloc);
-       pages += pages_highmem;
+       pages = preallocate_image_memory(alloc, avail_normal);
+       if (pages < alloc) {
+               /* We have exhausted non-highmem pages, try highmem. */
+               alloc -= pages;
+               pages += pages_highmem;
+               pages_highmem = preallocate_image_highmem(alloc);
+               if (pages_highmem < alloc)
+                       goto err_out;
+               pages += pages_highmem;
+               /*
+                * size is the desired number of saveable pages to leave in
+                * memory, so try to preallocate (all memory - size) pages.
+                */
+               alloc = (count - pages) - size;
+               pages += preallocate_image_highmem(alloc);
+       } else {
+               /*
+                * There are approximately max_size saveable pages at this point
+                * and we want to reduce this number down to size.
+                */
+               alloc = max_size - size;
+               size = preallocate_highmem_fraction(alloc, highmem, count);
+               pages_highmem += size;
+               alloc -= size;
+               size = preallocate_image_memory(alloc, avail_normal);
+               pages_highmem += preallocate_image_highmem(alloc - size);
+               pages += pages_highmem + size;
+       }
 
        /*
         * We only need as many page frames for the image as there are saveable
index 5d0059eed3e4e3ce0bc38ad072bd7b1430d9f712..e6a5bdf61a375c309c1f9ea356e9123d79699037 100644 (file)
@@ -136,10 +136,10 @@ sector_t alloc_swapdev_block(int swap)
 {
        unsigned long offset;
 
-       offset = swp_offset(get_swap_for_hibernation(swap));
+       offset = swp_offset(get_swap_page_of_type(swap));
        if (offset) {
                if (swsusp_extents_insert(offset))
-                       swap_free_for_hibernation(swp_entry(swap, offset));
+                       swap_free(swp_entry(swap, offset));
                else
                        return swapdev_block(swap, offset);
        }
@@ -163,7 +163,7 @@ void free_all_swap_pages(int swap)
                ext = container_of(node, struct swsusp_extent, node);
                rb_erase(node, &swsusp_extents);
                for (offset = ext->start; offset <= ext->end; offset++)
-                       swap_free_for_hibernation(swp_entry(swap, offset));
+                       swap_free(swp_entry(swap, offset));
 
                kfree(ext);
        }
index 8fe465ac008aebdcdbb0e9db9fcd644d5b12c231..2531017795f63e7d9c60d1db976e2b227c98ffda 100644 (file)
@@ -85,7 +85,7 @@ EXPORT_SYMBOL(oops_in_progress);
  * provides serialisation for access to the entire console
  * driver system.
  */
-static DECLARE_MUTEX(console_sem);
+static DEFINE_SEMAPHORE(console_sem);
 struct console *console_drivers;
 EXPORT_SYMBOL_GPL(console_drivers);
 
@@ -556,7 +556,7 @@ static void zap_locks(void)
        /* If a crash is occurring, make sure we can't deadlock */
        spin_lock_init(&logbuf_lock);
        /* And make sure that we print immediately */
-       init_MUTEX(&console_sem);
+       sema_init(&console_sem, 1);
 }
 
 #if defined(CONFIG_PRINTK_TIME)
index 4d169835fb362dcd6eb52a916a0d8e85e48c8fb8..a23a57a976d1a46f69cb2c32ecc38dd78e5e8b08 100644 (file)
@@ -73,12 +73,14 @@ int debug_lockdep_rcu_enabled(void)
 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
 
 /**
- * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section?
+ * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
  *
  * Check for bottom half being disabled, which covers both the
  * CONFIG_PROVE_RCU and not cases.  Note that if someone uses
  * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
- * will show the situation.
+ * will show the situation.  This is useful for debug checks in functions
+ * that require that they be called within an RCU read-side critical
+ * section.
  *
  * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
  */
@@ -86,7 +88,7 @@ int rcu_read_lock_bh_held(void)
 {
        if (!debug_lockdep_rcu_enabled())
                return 1;
-       return in_softirq();
+       return in_softirq() || irqs_disabled();
 }
 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
 
index 196ec02f8be0c1ad884b7d69599005a6fc3a4429..d806735342acb10bc3e3ae787e62ade34f1d5955 100644 (file)
@@ -59,6 +59,14 @@ int rcu_scheduler_active __read_mostly;
 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 
+/* Forward declarations for rcutiny_plugin.h. */
+static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
+static void __call_rcu(struct rcu_head *head,
+                      void (*func)(struct rcu_head *rcu),
+                      struct rcu_ctrlblk *rcp);
+
+#include "rcutiny_plugin.h"
+
 #ifdef CONFIG_NO_HZ
 
 static long rcu_dynticks_nesting = 1;
@@ -140,6 +148,7 @@ void rcu_check_callbacks(int cpu, int user)
                rcu_sched_qs(cpu);
        else if (!in_softirq())
                rcu_bh_qs(cpu);
+       rcu_preempt_check_callbacks();
 }
 
 /*
@@ -162,6 +171,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
        *rcp->donetail = NULL;
        if (rcp->curtail == rcp->donetail)
                rcp->curtail = &rcp->rcucblist;
+       rcu_preempt_remove_callbacks(rcp);
        rcp->donetail = &rcp->rcucblist;
        local_irq_restore(flags);
 
@@ -182,6 +192,7 @@ static void rcu_process_callbacks(struct softirq_action *unused)
 {
        __rcu_process_callbacks(&rcu_sched_ctrlblk);
        __rcu_process_callbacks(&rcu_bh_ctrlblk);
+       rcu_preempt_process_callbacks();
 }
 
 /*
@@ -223,15 +234,15 @@ static void __call_rcu(struct rcu_head *head,
 }
 
 /*
- * Post an RCU callback to be invoked after the end of an RCU grace
+ * Post an RCU callback to be invoked after the end of an RCU-sched grace
  * period.  But since we have but one CPU, that would be after any
  * quiescent state.
  */
-void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
 {
        __call_rcu(head, func, &rcu_sched_ctrlblk);
 }
-EXPORT_SYMBOL_GPL(call_rcu);
+EXPORT_SYMBOL_GPL(call_rcu_sched);
 
 /*
  * Post an RCU bottom-half callback to be invoked after any subsequent
@@ -243,20 +254,6 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
 }
 EXPORT_SYMBOL_GPL(call_rcu_bh);
 
-void rcu_barrier(void)
-{
-       struct rcu_synchronize rcu;
-
-       init_rcu_head_on_stack(&rcu.head);
-       init_completion(&rcu.completion);
-       /* Will wake me after RCU finished. */
-       call_rcu(&rcu.head, wakeme_after_rcu);
-       /* Wait for it. */
-       wait_for_completion(&rcu.completion);
-       destroy_rcu_head_on_stack(&rcu.head);
-}
-EXPORT_SYMBOL_GPL(rcu_barrier);
-
 void rcu_barrier_bh(void)
 {
        struct rcu_synchronize rcu;
@@ -289,5 +286,3 @@ void __init rcu_init(void)
 {
        open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
 }
-
-#include "rcutiny_plugin.h"
index d223a92bc7427ffd098f8c49d524974eb12902fe..6ceca4f745ffa1f4535c69467ea59704e2ddbe97 100644 (file)
@@ -1,7 +1,7 @@
 /*
- * Read-Copy Update mechanism for mutual exclusion (tree-based version)
+ * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition
  * Internal non-public definitions that provide either classic
- * or preemptable semantics.
+ * or preemptible semantics.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  *
- * Copyright IBM Corporation, 2009
+ * Copyright (c) 2010 Linaro
  *
  * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
  */
 
+#ifdef CONFIG_TINY_PREEMPT_RCU
+
+#include <linux/delay.h>
+
+/* Global control variables for preemptible RCU. */
+struct rcu_preempt_ctrlblk {
+       struct rcu_ctrlblk rcb; /* curtail: ->next ptr of last CB for GP. */
+       struct rcu_head **nexttail;
+                               /* Tasks blocked in a preemptible RCU */
+                               /*  read-side critical section while an */
+                               /*  preemptible-RCU grace period is in */
+                               /*  progress must wait for a later grace */
+                               /*  period.  This pointer points to the */
+                               /*  ->next pointer of the last task that */
+                               /*  must wait for a later grace period, or */
+                               /*  to &->rcb.rcucblist if there is no */
+                               /*  such task. */
+       struct list_head blkd_tasks;
+                               /* Tasks blocked in RCU read-side critical */
+                               /*  section.  Tasks are placed at the head */
+                               /*  of this list and age towards the tail. */
+       struct list_head *gp_tasks;
+                               /* Pointer to the first task blocking the */
+                               /*  current grace period, or NULL if there */
+                               /*  is not such task. */
+       struct list_head *exp_tasks;
+                               /* Pointer to first task blocking the */
+                               /*  current expedited grace period, or NULL */
+                               /*  if there is no such task.  If there */
+                               /*  is no current expedited grace period, */
+                               /*  then there cannot be any such task. */
+       u8 gpnum;               /* Current grace period. */
+       u8 gpcpu;               /* Last grace period blocked by the CPU. */
+       u8 completed;           /* Last grace period completed. */
+                               /*  If all three are equal, RCU is idle. */
+};
+
+static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
+       .rcb.donetail = &rcu_preempt_ctrlblk.rcb.rcucblist,
+       .rcb.curtail = &rcu_preempt_ctrlblk.rcb.rcucblist,
+       .nexttail = &rcu_preempt_ctrlblk.rcb.rcucblist,
+       .blkd_tasks = LIST_HEAD_INIT(rcu_preempt_ctrlblk.blkd_tasks),
+};
+
+static int rcu_preempted_readers_exp(void);
+static void rcu_report_exp_done(void);
+
+/*
+ * Return true if the CPU has not yet responded to the current grace period.
+ */
+static int rcu_cpu_blocking_cur_gp(void)
+{
+       return rcu_preempt_ctrlblk.gpcpu != rcu_preempt_ctrlblk.gpnum;
+}
+
+/*
+ * Check for a running RCU reader.  Because there is only one CPU,
+ * there can be but one running RCU reader at a time.  ;-)
+ */
+static int rcu_preempt_running_reader(void)
+{
+       return current->rcu_read_lock_nesting;
+}
+
+/*
+ * Check for preempted RCU readers blocking any grace period.
+ * If the caller needs a reliable answer, it must disable hard irqs.
+ */
+static int rcu_preempt_blocked_readers_any(void)
+{
+       return !list_empty(&rcu_preempt_ctrlblk.blkd_tasks);
+}
+
+/*
+ * Check for preempted RCU readers blocking the current grace period.
+ * If the caller needs a reliable answer, it must disable hard irqs.
+ */
+static int rcu_preempt_blocked_readers_cgp(void)
+{
+       return rcu_preempt_ctrlblk.gp_tasks != NULL;
+}
+
+/*
+ * Return true if another preemptible-RCU grace period is needed.
+ */
+static int rcu_preempt_needs_another_gp(void)
+{
+       return *rcu_preempt_ctrlblk.rcb.curtail != NULL;
+}
+
+/*
+ * Return true if a preemptible-RCU grace period is in progress.
+ * The caller must disable hardirqs.
+ */
+static int rcu_preempt_gp_in_progress(void)
+{
+       return rcu_preempt_ctrlblk.completed != rcu_preempt_ctrlblk.gpnum;
+}
+
+/*
+ * Record a preemptible-RCU quiescent state for the specified CPU.  Note
+ * that this just means that the task currently running on the CPU is
+ * in a quiescent state.  There might be any number of tasks blocked
+ * while in an RCU read-side critical section.
+ *
+ * Unlike the other rcu_*_qs() functions, callers to this function
+ * must disable irqs in order to protect the assignment to
+ * ->rcu_read_unlock_special.
+ *
+ * Because this is a single-CPU implementation, the only way a grace
+ * period can end is if the CPU is in a quiescent state.  The reason is
+ * that a blocked preemptible-RCU reader can exit its critical section
+ * only if the CPU is running it at the time.  Therefore, when the
+ * last task blocking the current grace period exits its RCU read-side
+ * critical section, neither the CPU nor blocked tasks will be stopping
+ * the current grace period.  (In contrast, SMP implementations
+ * might have CPUs running in RCU read-side critical sections that
+ * block later grace periods -- but this is not possible given only
+ * one CPU.)
+ */
+static void rcu_preempt_cpu_qs(void)
+{
+       /* Record both CPU and task as having responded to current GP. */
+       rcu_preempt_ctrlblk.gpcpu = rcu_preempt_ctrlblk.gpnum;
+       current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
+
+       /*
+        * If there is no GP, or if blocked readers are still blocking GP,
+        * then there is nothing more to do.
+        */
+       if (!rcu_preempt_gp_in_progress() || rcu_preempt_blocked_readers_cgp())
+               return;
+
+       /* Advance callbacks. */
+       rcu_preempt_ctrlblk.completed = rcu_preempt_ctrlblk.gpnum;
+       rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.rcb.curtail;
+       rcu_preempt_ctrlblk.rcb.curtail = rcu_preempt_ctrlblk.nexttail;
+
+       /* If there are no blocked readers, next GP is done instantly. */
+       if (!rcu_preempt_blocked_readers_any())
+               rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.nexttail;
+
+       /* If there are done callbacks, make RCU_SOFTIRQ process them. */
+       if (*rcu_preempt_ctrlblk.rcb.donetail != NULL)
+               raise_softirq(RCU_SOFTIRQ);
+}
+
+/*
+ * Start a new RCU grace period if warranted.  Hard irqs must be disabled.
+ */
+static void rcu_preempt_start_gp(void)
+{
+       if (!rcu_preempt_gp_in_progress() && rcu_preempt_needs_another_gp()) {
+
+               /* Official start of GP. */
+               rcu_preempt_ctrlblk.gpnum++;
+
+               /* Any blocked RCU readers block new GP. */
+               if (rcu_preempt_blocked_readers_any())
+                       rcu_preempt_ctrlblk.gp_tasks =
+                               rcu_preempt_ctrlblk.blkd_tasks.next;
+
+               /* If there is no running reader, CPU is done with GP. */
+               if (!rcu_preempt_running_reader())
+                       rcu_preempt_cpu_qs();
+       }
+}
+
+/*
+ * We have entered the scheduler, and the current task might soon be
+ * context-switched away from.  If this task is in an RCU read-side
+ * critical section, we will no longer be able to rely on the CPU to
+ * record that fact, so we enqueue the task on the blkd_tasks list.
+ * If the task started after the current grace period began, as recorded
+ * by ->gpcpu, we enqueue at the beginning of the list.  Otherwise
+ * before the element referenced by ->gp_tasks (or at the tail if
+ * ->gp_tasks is NULL) and point ->gp_tasks at the newly added element.
+ * The task will dequeue itself when it exits the outermost enclosing
+ * RCU read-side critical section.  Therefore, the current grace period
+ * cannot be permitted to complete until the ->gp_tasks pointer becomes
+ * NULL.
+ *
+ * Caller must disable preemption.
+ */
+void rcu_preempt_note_context_switch(void)
+{
+       struct task_struct *t = current;
+       unsigned long flags;
+
+       local_irq_save(flags); /* must exclude scheduler_tick(). */
+       if (rcu_preempt_running_reader() &&
+           (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
+
+               /* Possibly blocking in an RCU read-side critical section. */
+               t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
+
+               /*
+                * If this CPU has already checked in, then this task
+                * will hold up the next grace period rather than the
+                * current grace period.  Queue the task accordingly.
+                * If the task is queued for the current grace period
+                * (i.e., this CPU has not yet passed through a quiescent
+                * state for the current grace period), then as long
+                * as that task remains queued, the current grace period
+                * cannot end.
+                */
+               list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks);
+               if (rcu_cpu_blocking_cur_gp())
+                       rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry;
+       }
+
+       /*
+        * Either we were not in an RCU read-side critical section to
+        * begin with, or we have now recorded that critical section
+        * globally.  Either way, we can now note a quiescent state
+        * for this CPU.  Again, if we were in an RCU read-side critical
+        * section, and if that critical section was blocking the current
+        * grace period, then the fact that the task has been enqueued
+        * means that current grace period continues to be blocked.
+        */
+       rcu_preempt_cpu_qs();
+       local_irq_restore(flags);
+}
+
+/*
+ * Tiny-preemptible RCU implementation for rcu_read_lock().
+ * Just increment ->rcu_read_lock_nesting, shared state will be updated
+ * if we block.
+ */
+void __rcu_read_lock(void)
+{
+       current->rcu_read_lock_nesting++;
+       barrier();  /* needed if we ever invoke rcu_read_lock in rcutiny.c */
+}
+EXPORT_SYMBOL_GPL(__rcu_read_lock);
+
+/*
+ * Handle special cases during rcu_read_unlock(), such as needing to
+ * notify RCU core processing or task having blocked during the RCU
+ * read-side critical section.
+ */
+static void rcu_read_unlock_special(struct task_struct *t)
+{
+       int empty;
+       int empty_exp;
+       unsigned long flags;
+       struct list_head *np;
+       int special;
+
+       /*
+        * NMI handlers cannot block and cannot safely manipulate state.
+        * They therefore cannot possibly be special, so just leave.
+        */
+       if (in_nmi())
+               return;
+
+       local_irq_save(flags);
+
+       /*
+        * If RCU core is waiting for this CPU to exit critical section,
+        * let it know that we have done so.
+        */
+       special = t->rcu_read_unlock_special;
+       if (special & RCU_READ_UNLOCK_NEED_QS)
+               rcu_preempt_cpu_qs();
+
+       /* Hardware IRQ handlers cannot block. */
+       if (in_irq()) {
+               local_irq_restore(flags);
+               return;
+       }
+
+       /* Clean up if blocked during RCU read-side critical section. */
+       if (special & RCU_READ_UNLOCK_BLOCKED) {
+               t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
+
+               /*
+                * Remove this task from the ->blkd_tasks list and adjust
+                * any pointers that might have been referencing it.
+                */
+               empty = !rcu_preempt_blocked_readers_cgp();
+               empty_exp = rcu_preempt_ctrlblk.exp_tasks == NULL;
+               np = t->rcu_node_entry.next;
+               if (np == &rcu_preempt_ctrlblk.blkd_tasks)
+                       np = NULL;
+               list_del(&t->rcu_node_entry);
+               if (&t->rcu_node_entry == rcu_preempt_ctrlblk.gp_tasks)
+                       rcu_preempt_ctrlblk.gp_tasks = np;
+               if (&t->rcu_node_entry == rcu_preempt_ctrlblk.exp_tasks)
+                       rcu_preempt_ctrlblk.exp_tasks = np;
+               INIT_LIST_HEAD(&t->rcu_node_entry);
+
+               /*
+                * If this was the last task on the current list, and if
+                * we aren't waiting on the CPU, report the quiescent state
+                * and start a new grace period if needed.
+                */
+               if (!empty && !rcu_preempt_blocked_readers_cgp()) {
+                       rcu_preempt_cpu_qs();
+                       rcu_preempt_start_gp();
+               }
+
+               /*
+                * If this was the last task on the expedited lists,
+                * then we need wake up the waiting task.
+                */
+               if (!empty_exp && rcu_preempt_ctrlblk.exp_tasks == NULL)
+                       rcu_report_exp_done();
+       }
+       local_irq_restore(flags);
+}
+
+/*
+ * Tiny-preemptible RCU implementation for rcu_read_unlock().
+ * Decrement ->rcu_read_lock_nesting.  If the result is zero (outermost
+ * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
+ * invoke rcu_read_unlock_special() to clean up after a context switch
+ * in an RCU read-side critical section and other special cases.
+ */
+void __rcu_read_unlock(void)
+{
+       struct task_struct *t = current;
+
+       barrier();  /* needed if we ever invoke rcu_read_unlock in rcutiny.c */
+       --t->rcu_read_lock_nesting;
+       barrier();  /* decrement before load of ->rcu_read_unlock_special */
+       if (t->rcu_read_lock_nesting == 0 &&
+           unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
+               rcu_read_unlock_special(t);
+#ifdef CONFIG_PROVE_LOCKING
+       WARN_ON_ONCE(t->rcu_read_lock_nesting < 0);
+#endif /* #ifdef CONFIG_PROVE_LOCKING */
+}
+EXPORT_SYMBOL_GPL(__rcu_read_unlock);
+
+/*
+ * Check for a quiescent state from the current CPU.  When a task blocks,
+ * the task is recorded in the rcu_preempt_ctrlblk structure, which is
+ * checked elsewhere.  This is called from the scheduling-clock interrupt.
+ *
+ * Caller must disable hard irqs.
+ */
+static void rcu_preempt_check_callbacks(void)
+{
+       struct task_struct *t = current;
+
+       if (rcu_preempt_gp_in_progress() &&
+           (!rcu_preempt_running_reader() ||
+            !rcu_cpu_blocking_cur_gp()))
+               rcu_preempt_cpu_qs();
+       if (&rcu_preempt_ctrlblk.rcb.rcucblist !=
+           rcu_preempt_ctrlblk.rcb.donetail)
+               raise_softirq(RCU_SOFTIRQ);
+       if (rcu_preempt_gp_in_progress() &&
+           rcu_cpu_blocking_cur_gp() &&
+           rcu_preempt_running_reader())
+               t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
+}
+
+/*
+ * TINY_PREEMPT_RCU has an extra callback-list tail pointer to
+ * update, so this is invoked from __rcu_process_callbacks() to
+ * handle that case.  Of course, it is invoked for all flavors of
+ * RCU, but RCU callbacks can appear only on one of the lists, and
+ * neither ->nexttail nor ->donetail can possibly be NULL, so there
+ * is no need for an explicit check.
+ */
+static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
+{
+       if (rcu_preempt_ctrlblk.nexttail == rcp->donetail)
+               rcu_preempt_ctrlblk.nexttail = &rcp->rcucblist;
+}
+
+/*
+ * Process callbacks for preemptible RCU.
+ */
+static void rcu_preempt_process_callbacks(void)
+{
+       __rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb);
+}
+
+/*
+ * Queue a preemptible -RCU callback for invocation after a grace period.
+ */
+void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+{
+       unsigned long flags;
+
+       debug_rcu_head_queue(head);
+       head->func = func;
+       head->next = NULL;
+
+       local_irq_save(flags);
+       *rcu_preempt_ctrlblk.nexttail = head;
+       rcu_preempt_ctrlblk.nexttail = &head->next;
+       rcu_preempt_start_gp();  /* checks to see if GP needed. */
+       local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(call_rcu);
+
+void rcu_barrier(void)
+{
+       struct rcu_synchronize rcu;
+
+       init_rcu_head_on_stack(&rcu.head);
+       init_completion(&rcu.completion);
+       /* Will wake me after RCU finished. */
+       call_rcu(&rcu.head, wakeme_after_rcu);
+       /* Wait for it. */
+       wait_for_completion(&rcu.completion);
+       destroy_rcu_head_on_stack(&rcu.head);
+}
+EXPORT_SYMBOL_GPL(rcu_barrier);
+
+/*
+ * synchronize_rcu - wait until a grace period has elapsed.
+ *
+ * Control will return to the caller some time after a full grace
+ * period has elapsed, in other words after all currently executing RCU
+ * read-side critical sections have completed.  RCU read-side critical
+ * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
+ * and may be nested.
+ */
+void synchronize_rcu(void)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       if (!rcu_scheduler_active)
+               return;
+#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+
+       WARN_ON_ONCE(rcu_preempt_running_reader());
+       if (!rcu_preempt_blocked_readers_any())
+               return;
+
+       /* Once we get past the fastpath checks, same code as rcu_barrier(). */
+       rcu_barrier();
+}
+EXPORT_SYMBOL_GPL(synchronize_rcu);
+
+static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
+static unsigned long sync_rcu_preempt_exp_count;
+static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
+
+/*
+ * Return non-zero if there are any tasks in RCU read-side critical
+ * sections blocking the current preemptible-RCU expedited grace period.
+ * If there is no preemptible-RCU expedited grace period currently in
+ * progress, returns zero unconditionally.
+ */
+static int rcu_preempted_readers_exp(void)
+{
+       return rcu_preempt_ctrlblk.exp_tasks != NULL;
+}
+
+/*
+ * Report the exit from RCU read-side critical section for the last task
+ * that queued itself during or before the current expedited preemptible-RCU
+ * grace period.
+ */
+static void rcu_report_exp_done(void)
+{
+       wake_up(&sync_rcu_preempt_exp_wq);
+}
+
+/*
+ * Wait for an rcu-preempt grace period, but expedite it.  The basic idea
+ * is to rely in the fact that there is but one CPU, and that it is
+ * illegal for a task to invoke synchronize_rcu_expedited() while in a
+ * preemptible-RCU read-side critical section.  Therefore, any such
+ * critical sections must correspond to blocked tasks, which must therefore
+ * be on the ->blkd_tasks list.  So just record the current head of the
+ * list in the ->exp_tasks pointer, and wait for all tasks including and
+ * after the task pointed to by ->exp_tasks to drain.
+ */
+void synchronize_rcu_expedited(void)
+{
+       unsigned long flags;
+       struct rcu_preempt_ctrlblk *rpcp = &rcu_preempt_ctrlblk;
+       unsigned long snap;
+
+       barrier(); /* ensure prior action seen before grace period. */
+
+       WARN_ON_ONCE(rcu_preempt_running_reader());
+
+       /*
+        * Acquire lock so that there is only one preemptible RCU grace
+        * period in flight.  Of course, if someone does the expedited
+        * grace period for us while we are acquiring the lock, just leave.
+        */
+       snap = sync_rcu_preempt_exp_count + 1;
+       mutex_lock(&sync_rcu_preempt_exp_mutex);
+       if (ULONG_CMP_LT(snap, sync_rcu_preempt_exp_count))
+               goto unlock_mb_ret; /* Others did our work for us. */
+
+       local_irq_save(flags);
+
+       /*
+        * All RCU readers have to already be on blkd_tasks because
+        * we cannot legally be executing in an RCU read-side critical
+        * section.
+        */
+
+       /* Snapshot current head of ->blkd_tasks list. */
+       rpcp->exp_tasks = rpcp->blkd_tasks.next;
+       if (rpcp->exp_tasks == &rpcp->blkd_tasks)
+               rpcp->exp_tasks = NULL;
+       local_irq_restore(flags);
+
+       /* Wait for tail of ->blkd_tasks list to drain. */
+       if (rcu_preempted_readers_exp())
+               wait_event(sync_rcu_preempt_exp_wq,
+                          !rcu_preempted_readers_exp());
+
+       /* Clean up and exit. */
+       barrier(); /* ensure expedited GP seen before counter increment. */
+       sync_rcu_preempt_exp_count++;
+unlock_mb_ret:
+       mutex_unlock(&sync_rcu_preempt_exp_mutex);
+       barrier(); /* ensure subsequent action seen after grace period. */
+}
+EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
+
+/*
+ * Does preemptible RCU need the CPU to stay out of dynticks mode?
+ */
+int rcu_preempt_needs_cpu(void)
+{
+       if (!rcu_preempt_running_reader())
+               rcu_preempt_cpu_qs();
+       return rcu_preempt_ctrlblk.rcb.rcucblist != NULL;
+}
+
+/*
+ * Check for a task exiting while in a preemptible -RCU read-side
+ * critical section, clean up if so.  No need to issue warnings,
+ * as debug_check_no_locks_held() already does this if lockdep
+ * is enabled.
+ */
+void exit_rcu(void)
+{
+       struct task_struct *t = current;
+
+       if (t->rcu_read_lock_nesting == 0)
+               return;
+       t->rcu_read_lock_nesting = 1;
+       rcu_read_unlock();
+}
+
+#else /* #ifdef CONFIG_TINY_PREEMPT_RCU */
+
+/*
+ * Because preemptible RCU does not exist, it never has any callbacks
+ * to check.
+ */
+static void rcu_preempt_check_callbacks(void)
+{
+}
+
+/*
+ * Because preemptible RCU does not exist, it never has any callbacks
+ * to remove.
+ */
+static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
+{
+}
+
+/*
+ * Because preemptible RCU does not exist, it never has any callbacks
+ * to process.
+ */
+static void rcu_preempt_process_callbacks(void)
+{
+}
+
+#endif /* #else #ifdef CONFIG_TINY_PREEMPT_RCU */
+
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 
 #include <linux/kernel_stat.h>
index 2e2726d790b98eff18d88d71dbeba8caf8ec7f7a..9d8e8fb2515f4e4801c214841a7f8c95b8b45ffe 100644 (file)
@@ -120,7 +120,7 @@ struct rcu_torture {
 };
 
 static LIST_HEAD(rcu_torture_freelist);
-static struct rcu_torture *rcu_torture_current;
+static struct rcu_torture __rcu *rcu_torture_current;
 static long rcu_torture_current_version;
 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
 static DEFINE_SPINLOCK(rcu_torture_lock);
@@ -153,8 +153,10 @@ int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
 #define FULLSTOP_SHUTDOWN 1    /* System shutdown with rcutorture running. */
 #define FULLSTOP_RMMOD    2    /* Normal rmmod of rcutorture. */
 static int fullstop = FULLSTOP_RMMOD;
-DEFINE_MUTEX(fullstop_mutex);  /* Protect fullstop transitions and spawning */
-                               /*  of kthreads. */
+/*
+ * Protect fullstop transitions and spawning of kthreads.
+ */
+static DEFINE_MUTEX(fullstop_mutex);
 
 /*
  * Detect and respond to a system shutdown.
@@ -303,6 +305,10 @@ static void rcu_read_delay(struct rcu_random_state *rrsp)
                mdelay(longdelay_ms);
        if (!(rcu_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
                udelay(shortdelay_us);
+#ifdef CONFIG_PREEMPT
+       if (!preempt_count() && !(rcu_random(rrsp) % (nrealreaders * 20000)))
+               preempt_schedule();  /* No QS if preempt_disable() in effect */
+#endif
 }
 
 static void rcu_torture_read_unlock(int idx) __releases(RCU)
@@ -536,6 +542,8 @@ static void srcu_read_delay(struct rcu_random_state *rrsp)
        delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);
        if (!delay)
                schedule_timeout_interruptible(longdelay);
+       else
+               rcu_read_delay(rrsp);
 }
 
 static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
@@ -731,7 +739,8 @@ rcu_torture_writer(void *arg)
                        continue;
                rp->rtort_pipe_count = 0;
                udelay(rcu_random(&rand) & 0x3ff);
-               old_rp = rcu_torture_current;
+               old_rp = rcu_dereference_check(rcu_torture_current,
+                                              current == writer_task);
                rp->rtort_mbtest = 1;
                rcu_assign_pointer(rcu_torture_current, rp);
                smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
index d5bc43976c5ad202fa41be0456797d40bdde0c0d..ccdc04c479815addc8dbacea69643174a4636670 100644 (file)
@@ -143,6 +143,11 @@ module_param(blimit, int, 0);
 module_param(qhimark, int, 0);
 module_param(qlowmark, int, 0);
 
+#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
+int rcu_cpu_stall_suppress __read_mostly = RCU_CPU_STALL_SUPPRESS_INIT;
+module_param(rcu_cpu_stall_suppress, int, 0644);
+#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+
 static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
 static int rcu_pending(int cpu);
 
@@ -450,7 +455,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
 
 #ifdef CONFIG_RCU_CPU_STALL_DETECTOR
 
-int rcu_cpu_stall_panicking __read_mostly;
+int rcu_cpu_stall_suppress __read_mostly;
 
 static void record_gp_stall_check_time(struct rcu_state *rsp)
 {
@@ -482,8 +487,11 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
        rcu_print_task_stall(rnp);
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
 
-       /* OK, time to rat on our buddy... */
-
+       /*
+        * OK, time to rat on our buddy...
+        * See Documentation/RCU/stallwarn.txt for info on how to debug
+        * RCU CPU stall warnings.
+        */
        printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks: {",
               rsp->name);
        rcu_for_each_leaf_node(rsp, rnp) {
@@ -512,6 +520,11 @@ static void print_cpu_stall(struct rcu_state *rsp)
        unsigned long flags;
        struct rcu_node *rnp = rcu_get_root(rsp);
 
+       /*
+        * OK, time to rat on ourselves...
+        * See Documentation/RCU/stallwarn.txt for info on how to debug
+        * RCU CPU stall warnings.
+        */
        printk(KERN_ERR "INFO: %s detected stall on CPU %d (t=%lu jiffies)\n",
               rsp->name, smp_processor_id(), jiffies - rsp->gp_start);
        trigger_all_cpu_backtrace();
@@ -530,11 +543,11 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
        long delta;
        struct rcu_node *rnp;
 
-       if (rcu_cpu_stall_panicking)
+       if (rcu_cpu_stall_suppress)
                return;
-       delta = jiffies - rsp->jiffies_stall;
+       delta = jiffies - ACCESS_ONCE(rsp->jiffies_stall);
        rnp = rdp->mynode;
-       if ((rnp->qsmask & rdp->grpmask) && delta >= 0) {
+       if ((ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && delta >= 0) {
 
                /* We haven't checked in, so go dump stack. */
                print_cpu_stall(rsp);
@@ -548,10 +561,26 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
 
 static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
 {
-       rcu_cpu_stall_panicking = 1;
+       rcu_cpu_stall_suppress = 1;
        return NOTIFY_DONE;
 }
 
+/**
+ * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
+ *
+ * Set the stall-warning timeout way off into the future, thus preventing
+ * any RCU CPU stall-warning messages from appearing in the current set of
+ * RCU grace periods.
+ *
+ * The caller must disable hard irqs.
+ */
+void rcu_cpu_stall_reset(void)
+{
+       rcu_sched_state.jiffies_stall = jiffies + ULONG_MAX / 2;
+       rcu_bh_state.jiffies_stall = jiffies + ULONG_MAX / 2;
+       rcu_preempt_stall_reset();
+}
+
 static struct notifier_block rcu_panic_block = {
        .notifier_call = rcu_panic,
 };
@@ -571,6 +600,10 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
 {
 }
 
+void rcu_cpu_stall_reset(void)
+{
+}
+
 static void __init check_cpu_stall_init(void)
 {
 }
@@ -712,7 +745,7 @@ static void
 rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
        __releases(rcu_get_root(rsp)->lock)
 {
-       struct rcu_data *rdp = rsp->rda[smp_processor_id()];
+       struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
        struct rcu_node *rnp = rcu_get_root(rsp);
 
        if (!cpu_needs_another_gp(rsp, rdp) || rsp->fqs_active) {
@@ -960,7 +993,7 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
 static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp)
 {
        int i;
-       struct rcu_data *rdp = rsp->rda[smp_processor_id()];
+       struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
 
        if (rdp->nxtlist == NULL)
                return;  /* irqs disabled, so comparison is stable. */
@@ -971,6 +1004,7 @@ static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp)
        for (i = 0; i < RCU_NEXT_SIZE; i++)
                rdp->nxttail[i] = &rdp->nxtlist;
        rsp->orphan_qlen += rdp->qlen;
+       rdp->n_cbs_orphaned += rdp->qlen;
        rdp->qlen = 0;
        raw_spin_unlock(&rsp->onofflock);  /* irqs remain disabled. */
 }
@@ -984,7 +1018,7 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
        struct rcu_data *rdp;
 
        raw_spin_lock_irqsave(&rsp->onofflock, flags);
-       rdp = rsp->rda[smp_processor_id()];
+       rdp = this_cpu_ptr(rsp->rda);
        if (rsp->orphan_cbs_list == NULL) {
                raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
                return;
@@ -992,6 +1026,7 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
        *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_list;
        rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_tail;
        rdp->qlen += rsp->orphan_qlen;
+       rdp->n_cbs_adopted += rsp->orphan_qlen;
        rsp->orphan_cbs_list = NULL;
        rsp->orphan_cbs_tail = &rsp->orphan_cbs_list;
        rsp->orphan_qlen = 0;
@@ -1007,7 +1042,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
        unsigned long flags;
        unsigned long mask;
        int need_report = 0;
-       struct rcu_data *rdp = rsp->rda[cpu];
+       struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
        struct rcu_node *rnp;
 
        /* Exclude any attempts to start a new grace period. */
@@ -1123,6 +1158,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
 
        /* Update count, and requeue any remaining callbacks. */
        rdp->qlen -= count;
+       rdp->n_cbs_invoked += count;
        if (list != NULL) {
                *tail = rdp->nxtlist;
                rdp->nxtlist = list;
@@ -1226,7 +1262,8 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
                cpu = rnp->grplo;
                bit = 1;
                for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
-                       if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu]))
+                       if ((rnp->qsmask & bit) != 0 &&
+                           f(per_cpu_ptr(rsp->rda, cpu)))
                                mask |= bit;
                }
                if (mask != 0) {
@@ -1402,7 +1439,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
         * a quiescent state betweentimes.
         */
        local_irq_save(flags);
-       rdp = rsp->rda[smp_processor_id()];
+       rdp = this_cpu_ptr(rsp->rda);
        rcu_process_gp_end(rsp, rdp);
        check_for_new_grace_period(rsp, rdp);
 
@@ -1701,7 +1738,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
 {
        unsigned long flags;
        int i;
-       struct rcu_data *rdp = rsp->rda[cpu];
+       struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
        struct rcu_node *rnp = rcu_get_root(rsp);
 
        /* Set up local state, ensuring consistent view of global state. */
@@ -1729,7 +1766,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
 {
        unsigned long flags;
        unsigned long mask;
-       struct rcu_data *rdp = rsp->rda[cpu];
+       struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
        struct rcu_node *rnp = rcu_get_root(rsp);
 
        /* Set up local state, ensuring consistent view of global state. */
@@ -1865,7 +1902,8 @@ static void __init rcu_init_levelspread(struct rcu_state *rsp)
 /*
  * Helper function for rcu_init() that initializes one rcu_state structure.
  */
-static void __init rcu_init_one(struct rcu_state *rsp)
+static void __init rcu_init_one(struct rcu_state *rsp,
+               struct rcu_data __percpu *rda)
 {
        static char *buf[] = { "rcu_node_level_0",
                               "rcu_node_level_1",
@@ -1918,37 +1956,23 @@ static void __init rcu_init_one(struct rcu_state *rsp)
                }
        }
 
+       rsp->rda = rda;
        rnp = rsp->level[NUM_RCU_LVLS - 1];
        for_each_possible_cpu(i) {
                while (i > rnp->grphi)
                        rnp++;
-               rsp->rda[i]->mynode = rnp;
+               per_cpu_ptr(rsp->rda, i)->mynode = rnp;
                rcu_boot_init_percpu_data(i, rsp);
        }
 }
 
-/*
- * Helper macro for __rcu_init() and __rcu_init_preempt().  To be used
- * nowhere else!  Assigns leaf node pointers into each CPU's rcu_data
- * structure.
- */
-#define RCU_INIT_FLAVOR(rsp, rcu_data) \
-do { \
-       int i; \
-       \
-       for_each_possible_cpu(i) { \
-               (rsp)->rda[i] = &per_cpu(rcu_data, i); \
-       } \
-       rcu_init_one(rsp); \
-} while (0)
-
 void __init rcu_init(void)
 {
        int cpu;
 
        rcu_bootup_announce();
-       RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data);
-       RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data);
+       rcu_init_one(&rcu_sched_state, &rcu_sched_data);
+       rcu_init_one(&rcu_bh_state, &rcu_bh_data);
        __rcu_init_preempt();
        open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
 
index 14c040b18ed04a23f34448d9278d9ad55e5ab0c1..91d4170c5c13afd2e8997bd59b28e7cc2a4385e8 100644 (file)
@@ -202,6 +202,9 @@ struct rcu_data {
        long            qlen;           /* # of queued callbacks */
        long            qlen_last_fqs_check;
                                        /* qlen at last check for QS forcing */
+       unsigned long   n_cbs_invoked;  /* count of RCU cbs invoked. */
+       unsigned long   n_cbs_orphaned; /* RCU cbs sent to orphanage. */
+       unsigned long   n_cbs_adopted;  /* RCU cbs adopted from orphanage. */
        unsigned long   n_force_qs_snap;
                                        /* did other CPU force QS recently? */
        long            blimit;         /* Upper limit on a processed batch */
@@ -254,19 +257,23 @@ struct rcu_data {
 #define RCU_STALL_DELAY_DELTA         0
 #endif
 
-#define RCU_SECONDS_TILL_STALL_CHECK   (10 * HZ + RCU_STALL_DELAY_DELTA)
+#define RCU_SECONDS_TILL_STALL_CHECK   (CONFIG_RCU_CPU_STALL_TIMEOUT * HZ + \
+                                       RCU_STALL_DELAY_DELTA)
                                                /* for rsp->jiffies_stall */
-#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ + RCU_STALL_DELAY_DELTA)
+#define RCU_SECONDS_TILL_STALL_RECHECK (3 * RCU_SECONDS_TILL_STALL_CHECK + 30)
                                                /* for rsp->jiffies_stall */
 #define RCU_STALL_RAT_DELAY            2       /* Allow other CPUs time */
                                                /*  to take at least one */
                                                /*  scheduling clock irq */
                                                /*  before ratting on them. */
 
-#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+#ifdef CONFIG_RCU_CPU_STALL_DETECTOR_RUNNABLE
+#define RCU_CPU_STALL_SUPPRESS_INIT 0
+#else
+#define RCU_CPU_STALL_SUPPRESS_INIT 1
+#endif
 
-#define ULONG_CMP_GE(a, b)     (ULONG_MAX / 2 >= (a) - (b))
-#define ULONG_CMP_LT(a, b)     (ULONG_MAX / 2 < (a) - (b))
+#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
 
 /*
  * RCU global state, including node hierarchy.  This hierarchy is
@@ -283,7 +290,7 @@ struct rcu_state {
        struct rcu_node *level[NUM_RCU_LVLS];   /* Hierarchy levels. */
        u32 levelcnt[MAX_RCU_LVLS + 1];         /* # nodes in each level. */
        u8 levelspread[NUM_RCU_LVLS];           /* kids/node in each level. */
-       struct rcu_data *rda[NR_CPUS];          /* array of rdp pointers. */
+       struct rcu_data __percpu *rda;          /* pointer of percu rcu_data. */
 
        /* The following fields are guarded by the root rcu_node's lock. */
 
@@ -365,6 +372,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
 #ifdef CONFIG_RCU_CPU_STALL_DETECTOR
 static void rcu_print_detail_task_stall(struct rcu_state *rsp);
 static void rcu_print_task_stall(struct rcu_node *rnp);
+static void rcu_preempt_stall_reset(void);
 #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
 #ifdef CONFIG_HOTPLUG_CPU
index 0e4f420245d97369b7fdf6bb99815789ffe13233..71a4147473f95f51d2b2e88db4c14372dafe375f 100644 (file)
@@ -57,7 +57,7 @@ static void __init rcu_bootup_announce_oddness(void)
        printk(KERN_INFO
               "\tRCU-based detection of stalled CPUs is disabled.\n");
 #endif
-#ifndef CONFIG_RCU_CPU_STALL_VERBOSE
+#if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
        printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n");
 #endif
 #if NUM_RCU_LVL_4 != 0
@@ -154,7 +154,7 @@ static void rcu_preempt_note_context_switch(int cpu)
            (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
 
                /* Possibly blocking in an RCU read-side critical section. */
-               rdp = rcu_preempt_state.rda[cpu];
+               rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
                rnp = rdp->mynode;
                raw_spin_lock_irqsave(&rnp->lock, flags);
                t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
@@ -201,7 +201,7 @@ static void rcu_preempt_note_context_switch(int cpu)
  */
 void __rcu_read_lock(void)
 {
-       ACCESS_ONCE(current->rcu_read_lock_nesting)++;
+       current->rcu_read_lock_nesting++;
        barrier();  /* needed if we ever invoke rcu_read_lock in rcutree.c */
 }
 EXPORT_SYMBOL_GPL(__rcu_read_lock);
@@ -344,7 +344,9 @@ void __rcu_read_unlock(void)
        struct task_struct *t = current;
 
        barrier();  /* needed if we ever invoke rcu_read_unlock in rcutree.c */
-       if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
+       --t->rcu_read_lock_nesting;
+       barrier();  /* decrement before load of ->rcu_read_unlock_special */
+       if (t->rcu_read_lock_nesting == 0 &&
            unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
                rcu_read_unlock_special(t);
 #ifdef CONFIG_PROVE_LOCKING
@@ -417,6 +419,16 @@ static void rcu_print_task_stall(struct rcu_node *rnp)
        }
 }
 
+/*
+ * Suppress preemptible RCU's CPU stall warnings by pushing the
+ * time of the next stall-warning message comfortably far into the
+ * future.
+ */
+static void rcu_preempt_stall_reset(void)
+{
+       rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2;
+}
+
 #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
 
 /*
@@ -546,9 +558,11 @@ EXPORT_SYMBOL_GPL(call_rcu);
  *
  * Control will return to the caller some time after a full grace
  * period has elapsed, in other words after all currently executing RCU
- * read-side critical sections have completed.  RCU read-side critical
- * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
- * and may be nested.
+ * read-side critical sections have completed.  Note, however, that
+ * upon return from synchronize_rcu(), the caller might well be executing
+ * concurrently with new RCU read-side critical sections that began while
+ * synchronize_rcu() was waiting.  RCU read-side critical sections are
+ * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
  */
 void synchronize_rcu(void)
 {
@@ -771,7 +785,7 @@ static void rcu_preempt_send_cbs_to_orphanage(void)
  */
 static void __init __rcu_init_preempt(void)
 {
-       RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data);
+       rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
 }
 
 /*
@@ -865,6 +879,14 @@ static void rcu_print_task_stall(struct rcu_node *rnp)
 {
 }
 
+/*
+ * Because preemptible RCU does not exist, there is no need to suppress
+ * its CPU stall warnings.
+ */
+static void rcu_preempt_stall_reset(void)
+{
+}
+
 #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
 
 /*
@@ -918,15 +940,6 @@ static void rcu_preempt_process_callbacks(void)
 {
 }
 
-/*
- * In classic RCU, call_rcu() is just call_rcu_sched().
- */
-void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
-{
-       call_rcu_sched(head, func);
-}
-EXPORT_SYMBOL_GPL(call_rcu);
-
 /*
  * Wait for an rcu-preempt grace period, but make it happen quickly.
  * But because preemptable RCU does not exist, map to rcu-sched.
index 36c95b45738ed7f74fb78b901ddcff7fc1488498..d15430b9d122f4d619e76fb6b5069aa1f494a575 100644 (file)
@@ -64,7 +64,9 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
                   rdp->dynticks_fqs);
 #endif /* #ifdef CONFIG_NO_HZ */
        seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi);
-       seq_printf(m, " ql=%ld b=%ld\n", rdp->qlen, rdp->blimit);
+       seq_printf(m, " ql=%ld b=%ld", rdp->qlen, rdp->blimit);
+       seq_printf(m, " ci=%lu co=%lu ca=%lu\n",
+                  rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
 }
 
 #define PRINT_RCU_DATA(name, func, m) \
@@ -119,7 +121,9 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
                   rdp->dynticks_fqs);
 #endif /* #ifdef CONFIG_NO_HZ */
        seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi);
-       seq_printf(m, ",%ld,%ld\n", rdp->qlen, rdp->blimit);
+       seq_printf(m, ",%ld,%ld", rdp->qlen, rdp->blimit);
+       seq_printf(m, ",%lu,%lu,%lu\n",
+                  rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
 }
 
 static int show_rcudata_csv(struct seq_file *m, void *unused)
@@ -128,7 +132,7 @@ static int show_rcudata_csv(struct seq_file *m, void *unused)
 #ifdef CONFIG_NO_HZ
        seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\",");
 #endif /* #ifdef CONFIG_NO_HZ */
-       seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\"\n");
+       seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\",\"ci\",\"co\",\"ca\"\n");
 #ifdef CONFIG_TREE_PREEMPT_RCU
        seq_puts(m, "\"rcu_preempt:\"\n");
        PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m);
@@ -262,7 +266,7 @@ static void print_rcu_pendings(struct seq_file *m, struct rcu_state *rsp)
        struct rcu_data *rdp;
 
        for_each_possible_cpu(cpu) {
-               rdp = rsp->rda[cpu];
+               rdp = per_cpu_ptr(rsp->rda, cpu);
                if (rdp->beenonline)
                        print_one_rcu_pending(m, rdp);
        }
index 09b574e7f4df7c14615d104c3fe736f7c1be0847..d42992bccdfae88569559f3e88f81bbcea8e9494 100644 (file)
@@ -426,9 +426,7 @@ struct root_domain {
         */
        cpumask_var_t rto_mask;
        atomic_t rto_count;
-#ifdef CONFIG_SMP
        struct cpupri cpupri;
-#endif
 };
 
 /*
@@ -437,7 +435,7 @@ struct root_domain {
  */
 static struct root_domain def_root_domain;
 
-#endif
+#endif /* CONFIG_SMP */
 
 /*
  * This is the main, per-CPU runqueue data structure.
@@ -488,11 +486,12 @@ struct rq {
         */
        unsigned long nr_uninterruptible;
 
-       struct task_struct *curr, *idle;
+       struct task_struct *curr, *idle, *stop;
        unsigned long next_balance;
        struct mm_struct *prev_mm;
 
        u64 clock;
+       u64 clock_task;
 
        atomic_t nr_iowait;
 
@@ -520,6 +519,10 @@ struct rq {
        u64 avg_idle;
 #endif
 
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+       u64 prev_irq_time;
+#endif
+
        /* calc_load related fields */
        unsigned long calc_load_update;
        long calc_load_active;
@@ -643,10 +646,22 @@ static inline struct task_group *task_group(struct task_struct *p)
 
 #endif /* CONFIG_CGROUP_SCHED */
 
+static u64 irq_time_cpu(int cpu);
+static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time);
+
 inline void update_rq_clock(struct rq *rq)
 {
-       if (!rq->skip_clock_update)
-               rq->clock = sched_clock_cpu(cpu_of(rq));
+       if (!rq->skip_clock_update) {
+               int cpu = cpu_of(rq);
+               u64 irq_time;
+
+               rq->clock = sched_clock_cpu(cpu);
+               irq_time = irq_time_cpu(cpu);
+               if (rq->clock - irq_time > rq->clock_task)
+                       rq->clock_task = rq->clock - irq_time;
+
+               sched_irq_time_avg_update(rq, irq_time);
+       }
 }
 
 /*
@@ -723,7 +738,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
                size_t cnt, loff_t *ppos)
 {
        char buf[64];
-       char *cmp = buf;
+       char *cmp;
        int neg = 0;
        int i;
 
@@ -734,6 +749,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
                return -EFAULT;
 
        buf[cnt] = 0;
+       cmp = strstrip(buf);
 
        if (strncmp(buf, "NO_", 3) == 0) {
                neg = 1;
@@ -741,9 +757,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
        }
 
        for (i = 0; sched_feat_names[i]; i++) {
-               int len = strlen(sched_feat_names[i]);
-
-               if (strncmp(cmp, sched_feat_names[i], len) == 0) {
+               if (strcmp(cmp, sched_feat_names[i]) == 0) {
                        if (neg)
                                sysctl_sched_features &= ~(1UL << i);
                        else
@@ -1294,6 +1308,10 @@ static void resched_task(struct task_struct *p)
 static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
 {
 }
+
+static void sched_avg_update(struct rq *rq)
+{
+}
 #endif /* CONFIG_SMP */
 
 #if BITS_PER_LONG == 32
@@ -1836,7 +1854,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
 
 static const struct sched_class rt_sched_class;
 
-#define sched_class_highest (&rt_sched_class)
+#define sched_class_highest (&stop_sched_class)
 #define for_each_class(class) \
    for (class = sched_class_highest; class; class = class->next)
 
@@ -1854,12 +1872,6 @@ static void dec_nr_running(struct rq *rq)
 
 static void set_load_weight(struct task_struct *p)
 {
-       if (task_has_rt_policy(p)) {
-               p->se.load.weight = 0;
-               p->se.load.inv_weight = WMULT_CONST;
-               return;
-       }
-
        /*
         * SCHED_IDLE tasks get minimal weight:
         */
@@ -1913,13 +1925,132 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
        dec_nr_running(rq);
 }
 
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+
+/*
+ * There are no locks covering percpu hardirq/softirq time.
+ * They are only modified in account_system_vtime, on corresponding CPU
+ * with interrupts disabled. So, writes are safe.
+ * They are read and saved off onto struct rq in update_rq_clock().
+ * This may result in other CPU reading this CPU's irq time and can
+ * race with irq/account_system_vtime on this CPU. We would either get old
+ * or new value (or semi updated value on 32 bit) with a side effect of
+ * accounting a slice of irq time to wrong task when irq is in progress
+ * while we read rq->clock. That is a worthy compromise in place of having
+ * locks on each irq in account_system_time.
+ */
+static DEFINE_PER_CPU(u64, cpu_hardirq_time);
+static DEFINE_PER_CPU(u64, cpu_softirq_time);
+
+static DEFINE_PER_CPU(u64, irq_start_time);
+static int sched_clock_irqtime;
+
+void enable_sched_clock_irqtime(void)
+{
+       sched_clock_irqtime = 1;
+}
+
+void disable_sched_clock_irqtime(void)
+{
+       sched_clock_irqtime = 0;
+}
+
+static u64 irq_time_cpu(int cpu)
+{
+       if (!sched_clock_irqtime)
+               return 0;
+
+       return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
+}
+
+void account_system_vtime(struct task_struct *curr)
+{
+       unsigned long flags;
+       int cpu;
+       u64 now, delta;
+
+       if (!sched_clock_irqtime)
+               return;
+
+       local_irq_save(flags);
+
+       cpu = smp_processor_id();
+       now = sched_clock_cpu(cpu);
+       delta = now - per_cpu(irq_start_time, cpu);
+       per_cpu(irq_start_time, cpu) = now;
+       /*
+        * We do not account for softirq time from ksoftirqd here.
+        * We want to continue accounting softirq time to ksoftirqd thread
+        * in that case, so as not to confuse scheduler with a special task
+        * that do not consume any time, but still wants to run.
+        */
+       if (hardirq_count())
+               per_cpu(cpu_hardirq_time, cpu) += delta;
+       else if (in_serving_softirq() && !(curr->flags & PF_KSOFTIRQD))
+               per_cpu(cpu_softirq_time, cpu) += delta;
+
+       local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(account_system_vtime);
+
+static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time)
+{
+       if (sched_clock_irqtime && sched_feat(NONIRQ_POWER)) {
+               u64 delta_irq = curr_irq_time - rq->prev_irq_time;
+               rq->prev_irq_time = curr_irq_time;
+               sched_rt_avg_update(rq, delta_irq);
+       }
+}
+
+#else
+
+static u64 irq_time_cpu(int cpu)
+{
+       return 0;
+}
+
+static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) { }
+
+#endif
+
 #include "sched_idletask.c"
 #include "sched_fair.c"
 #include "sched_rt.c"
+#include "sched_stoptask.c"
 #ifdef CONFIG_SCHED_DEBUG
 # include "sched_debug.c"
 #endif
 
+void sched_set_stop_task(int cpu, struct task_struct *stop)
+{
+       struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+       struct task_struct *old_stop = cpu_rq(cpu)->stop;
+
+       if (stop) {
+               /*
+                * Make it appear like a SCHED_FIFO task, its something
+                * userspace knows about and won't get confused about.
+                *
+                * Also, it will make PI more or less work without too
+                * much confusion -- but then, stop work should not
+                * rely on PI working anyway.
+                */
+               sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
+
+               stop->sched_class = &stop_sched_class;
+       }
+
+       cpu_rq(cpu)->stop = stop;
+
+       if (old_stop) {
+               /*
+                * Reset it back to a normal scheduling class so that
+                * it can die in pieces.
+                */
+               old_stop->sched_class = &rt_sched_class;
+       }
+}
+
 /*
  * __normal_prio - return the priority that is based on the static prio
  */
@@ -1999,6 +2130,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
        if (p->sched_class != &fair_sched_class)
                return 0;
 
+       if (unlikely(p->policy == SCHED_IDLE))
+               return 0;
+
        /*
         * Buddy candidates are cache hot:
         */
@@ -2848,14 +2982,14 @@ context_switch(struct rq *rq, struct task_struct *prev,
         */
        arch_start_context_switch(prev);
 
-       if (likely(!mm)) {
+       if (!mm) {
                next->active_mm = oldmm;
                atomic_inc(&oldmm->mm_count);
                enter_lazy_tlb(oldmm, next);
        } else
                switch_mm(oldmm, mm, next);
 
-       if (likely(!prev->mm)) {
+       if (!prev->mm) {
                prev->active_mm = NULL;
                rq->prev_mm = oldmm;
        }
@@ -3182,6 +3316,8 @@ static void update_cpu_load(struct rq *this_rq)
 
                this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
        }
+
+       sched_avg_update(this_rq);
 }
 
 static void update_cpu_load_active(struct rq *this_rq)
@@ -3242,7 +3378,7 @@ static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
 
        if (task_current(rq, p)) {
                update_rq_clock(rq);
-               ns = rq->clock - p->se.exec_start;
+               ns = rq->clock_task - p->se.exec_start;
                if ((s64)ns < 0)
                        ns = 0;
        }
@@ -3391,7 +3527,7 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
        tmp = cputime_to_cputime64(cputime);
        if (hardirq_count() - hardirq_offset)
                cpustat->irq = cputime64_add(cpustat->irq, tmp);
-       else if (softirq_count())
+       else if (in_serving_softirq())
                cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
        else
                cpustat->system = cputime64_add(cpustat->system, tmp);
@@ -3507,9 +3643,9 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
        rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
 
        if (total) {
-               u64 temp;
+               u64 temp = rtime;
 
-               temp = (u64)(rtime * utime);
+               temp *= utime;
                do_div(temp, total);
                utime = (cputime_t)temp;
        } else
@@ -3540,9 +3676,9 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
        rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
 
        if (total) {
-               u64 temp;
+               u64 temp = rtime;
 
-               temp = (u64)(rtime * cputime.utime);
+               temp *= cputime.utime;
                do_div(temp, total);
                utime = (cputime_t)temp;
        } else
@@ -3578,7 +3714,7 @@ void scheduler_tick(void)
        curr->sched_class->task_tick(rq, curr, 0);
        raw_spin_unlock(&rq->lock);
 
-       perf_event_task_tick(curr);
+       perf_event_task_tick();
 
 #ifdef CONFIG_SMP
        rq->idle_at_tick = idle_cpu(cpu);
@@ -3717,17 +3853,13 @@ pick_next_task(struct rq *rq)
                        return p;
        }
 
-       class = sched_class_highest;
-       for ( ; ; ) {
+       for_each_class(class) {
                p = class->pick_next_task(rq);
                if (p)
                        return p;
-               /*
-                * Will never be NULL as the idle class always
-                * returns a non-NULL p:
-                */
-               class = class->next;
        }
+
+       BUG(); /* the idle class will always have a runnable task */
 }
 
 /*
@@ -4352,6 +4484,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
 
        rq = task_rq_lock(p, &flags);
 
+       trace_sched_pi_setprio(p, prio);
        oldprio = p->prio;
        prev_class = p->sched_class;
        on_rq = p->se.on_rq;
@@ -4639,7 +4772,7 @@ recheck:
        }
 
        if (user) {
-               retval = security_task_setscheduler(p, policy, param);
+               retval = security_task_setscheduler(p);
                if (retval)
                        return retval;
        }
@@ -4655,6 +4788,15 @@ recheck:
         */
        rq = __task_rq_lock(p);
 
+       /*
+        * Changing the policy of the stop threads its a very bad idea
+        */
+       if (p == rq->stop) {
+               __task_rq_unlock(rq);
+               raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+               return -EINVAL;
+       }
+
 #ifdef CONFIG_RT_GROUP_SCHED
        if (user) {
                /*
@@ -4881,13 +5023,13 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
        if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
                goto out_unlock;
 
-       retval = security_task_setscheduler(p, 0, NULL);
+       retval = security_task_setscheduler(p);
        if (retval)
                goto out_unlock;
 
        cpuset_cpus_allowed(p, cpus_allowed);
        cpumask_and(new_mask, in_mask, cpus_allowed);
- again:
+again:
        retval = set_cpus_allowed_ptr(p, new_mask);
 
        if (!retval) {
@@ -5331,7 +5473,19 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
        idle->se.exec_start = sched_clock();
 
        cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
+       /*
+        * We're having a chicken and egg problem, even though we are
+        * holding rq->lock, the cpu isn't yet set to this cpu so the
+        * lockdep check in task_group() will fail.
+        *
+        * Similar case to sched_fork(). / Alternatively we could
+        * use task_rq_lock() here and obtain the other rq->lock.
+        *
+        * Silence PROVE_RCU
+        */
+       rcu_read_lock();
        __set_task_cpu(idle, cpu);
+       rcu_read_unlock();
 
        rq->curr = rq->idle = idle;
 #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
@@ -6508,6 +6662,7 @@ struct s_data {
        cpumask_var_t           nodemask;
        cpumask_var_t           this_sibling_map;
        cpumask_var_t           this_core_map;
+       cpumask_var_t           this_book_map;
        cpumask_var_t           send_covered;
        cpumask_var_t           tmpmask;
        struct sched_group      **sched_group_nodes;
@@ -6519,6 +6674,7 @@ enum s_alloc {
        sa_rootdomain,
        sa_tmpmask,
        sa_send_covered,
+       sa_this_book_map,
        sa_this_core_map,
        sa_this_sibling_map,
        sa_nodemask,
@@ -6554,31 +6710,48 @@ cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
 #ifdef CONFIG_SCHED_MC
 static DEFINE_PER_CPU(struct static_sched_domain, core_domains);
 static DEFINE_PER_CPU(struct static_sched_group, sched_group_core);
-#endif /* CONFIG_SCHED_MC */
 
-#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
 static int
 cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
                  struct sched_group **sg, struct cpumask *mask)
 {
        int group;
-
+#ifdef CONFIG_SCHED_SMT
        cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
        group = cpumask_first(mask);
+#else
+       group = cpu;
+#endif
        if (sg)
                *sg = &per_cpu(sched_group_core, group).sg;
        return group;
 }
-#elif defined(CONFIG_SCHED_MC)
+#endif /* CONFIG_SCHED_MC */
+
+/*
+ * book sched-domains:
+ */
+#ifdef CONFIG_SCHED_BOOK
+static DEFINE_PER_CPU(struct static_sched_domain, book_domains);
+static DEFINE_PER_CPU(struct static_sched_group, sched_group_book);
+
 static int
-cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
-                 struct sched_group **sg, struct cpumask *unused)
+cpu_to_book_group(int cpu, const struct cpumask *cpu_map,
+                 struct sched_group **sg, struct cpumask *mask)
 {
+       int group = cpu;
+#ifdef CONFIG_SCHED_MC
+       cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
+       group = cpumask_first(mask);
+#elif defined(CONFIG_SCHED_SMT)
+       cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
+       group = cpumask_first(mask);
+#endif
        if (sg)
-               *sg = &per_cpu(sched_group_core, cpu).sg;
-       return cpu;
+               *sg = &per_cpu(sched_group_book, group).sg;
+       return group;
 }
-#endif
+#endif /* CONFIG_SCHED_BOOK */
 
 static DEFINE_PER_CPU(struct static_sched_domain, phys_domains);
 static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys);
@@ -6588,7 +6761,10 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
                  struct sched_group **sg, struct cpumask *mask)
 {
        int group;
-#ifdef CONFIG_SCHED_MC
+#ifdef CONFIG_SCHED_BOOK
+       cpumask_and(mask, cpu_book_mask(cpu), cpu_map);
+       group = cpumask_first(mask);
+#elif defined(CONFIG_SCHED_MC)
        cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
        group = cpumask_first(mask);
 #elif defined(CONFIG_SCHED_SMT)
@@ -6849,6 +7025,9 @@ SD_INIT_FUNC(CPU)
 #ifdef CONFIG_SCHED_MC
  SD_INIT_FUNC(MC)
 #endif
+#ifdef CONFIG_SCHED_BOOK
+ SD_INIT_FUNC(BOOK)
+#endif
 
 static int default_relax_domain_level = -1;
 
@@ -6898,6 +7077,8 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
                free_cpumask_var(d->tmpmask); /* fall through */
        case sa_send_covered:
                free_cpumask_var(d->send_covered); /* fall through */
+       case sa_this_book_map:
+               free_cpumask_var(d->this_book_map); /* fall through */
        case sa_this_core_map:
                free_cpumask_var(d->this_core_map); /* fall through */
        case sa_this_sibling_map:
@@ -6944,8 +7125,10 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
                return sa_nodemask;
        if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL))
                return sa_this_sibling_map;
-       if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL))
+       if (!alloc_cpumask_var(&d->this_book_map, GFP_KERNEL))
                return sa_this_core_map;
+       if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL))
+               return sa_this_book_map;
        if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL))
                return sa_send_covered;
        d->rd = alloc_rootdomain();
@@ -7003,6 +7186,23 @@ static struct sched_domain *__build_cpu_sched_domain(struct s_data *d,
        return sd;
 }
 
+static struct sched_domain *__build_book_sched_domain(struct s_data *d,
+       const struct cpumask *cpu_map, struct sched_domain_attr *attr,
+       struct sched_domain *parent, int i)
+{
+       struct sched_domain *sd = parent;
+#ifdef CONFIG_SCHED_BOOK
+       sd = &per_cpu(book_domains, i).sd;
+       SD_INIT(sd, BOOK);
+       set_domain_attribute(sd, attr);
+       cpumask_and(sched_domain_span(sd), cpu_map, cpu_book_mask(i));
+       sd->parent = parent;
+       parent->child = sd;
+       cpu_to_book_group(i, cpu_map, &sd->groups, d->tmpmask);
+#endif
+       return sd;
+}
+
 static struct sched_domain *__build_mc_sched_domain(struct s_data *d,
        const struct cpumask *cpu_map, struct sched_domain_attr *attr,
        struct sched_domain *parent, int i)
@@ -7059,6 +7259,15 @@ static void build_sched_groups(struct s_data *d, enum sched_domain_level l,
                                                &cpu_to_core_group,
                                                d->send_covered, d->tmpmask);
                break;
+#endif
+#ifdef CONFIG_SCHED_BOOK
+       case SD_LV_BOOK: /* set up book groups */
+               cpumask_and(d->this_book_map, cpu_map, cpu_book_mask(cpu));
+               if (cpu == cpumask_first(d->this_book_map))
+                       init_sched_build_groups(d->this_book_map, cpu_map,
+                                               &cpu_to_book_group,
+                                               d->send_covered, d->tmpmask);
+               break;
 #endif
        case SD_LV_CPU: /* set up physical groups */
                cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map);
@@ -7107,12 +7316,14 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
 
                sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
                sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i);
+               sd = __build_book_sched_domain(&d, cpu_map, attr, sd, i);
                sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i);
                sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i);
        }
 
        for_each_cpu(i, cpu_map) {
                build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i);
+               build_sched_groups(&d, SD_LV_BOOK, cpu_map, i);
                build_sched_groups(&d, SD_LV_MC, cpu_map, i);
        }
 
@@ -7143,6 +7354,12 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
                init_sched_groups_power(i, sd);
        }
 #endif
+#ifdef CONFIG_SCHED_BOOK
+       for_each_cpu(i, cpu_map) {
+               sd = &per_cpu(book_domains, i).sd;
+               init_sched_groups_power(i, sd);
+       }
+#endif
 
        for_each_cpu(i, cpu_map) {
                sd = &per_cpu(phys_domains, i).sd;
@@ -7168,6 +7385,8 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
                sd = &per_cpu(cpu_domains, i).sd;
 #elif defined(CONFIG_SCHED_MC)
                sd = &per_cpu(core_domains, i).sd;
+#elif defined(CONFIG_SCHED_BOOK)
+               sd = &per_cpu(book_domains, i).sd;
 #else
                sd = &per_cpu(phys_domains, i).sd;
 #endif
@@ -8072,9 +8291,9 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
 
        return 1;
 
- err_free_rq:
+err_free_rq:
        kfree(cfs_rq);
- err:
+err:
        return 0;
 }
 
@@ -8162,9 +8381,9 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
 
        return 1;
 
- err_free_rq:
+err_free_rq:
        kfree(rt_rq);
- err:
+err:
        return 0;
 }
 
@@ -8522,7 +8741,7 @@ static int tg_set_bandwidth(struct task_group *tg,
                raw_spin_unlock(&rt_rq->rt_runtime_lock);
        }
        raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
- unlock:
+unlock:
        read_unlock(&tasklist_lock);
        mutex_unlock(&rt_constraints_mutex);
 
index ab661ebc4895a8471ecc808825477cf0c3558444..933f3d1b62ea0affb63767f87152545f7659dc6f 100644 (file)
@@ -25,7 +25,7 @@
 
 /*
  * Targeted preemption latency for CPU-bound tasks:
- * (default: 5ms * (1 + ilog(ncpus)), units: nanoseconds)
+ * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
  *
  * NOTE: this latency value is not the same as the concept of
  * 'timeslice length' - timeslices in CFS are of variable length
@@ -52,15 +52,15 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling
 
 /*
  * Minimal preemption granularity for CPU-bound tasks:
- * (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds)
+ * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
  */
-unsigned int sysctl_sched_min_granularity = 2000000ULL;
-unsigned int normalized_sysctl_sched_min_granularity = 2000000ULL;
+unsigned int sysctl_sched_min_granularity = 750000ULL;
+unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
 
 /*
  * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
  */
-static unsigned int sched_nr_latency = 3;
+static unsigned int sched_nr_latency = 8;
 
 /*
  * After fork, child runs first. If set to 0 (default) then
@@ -519,7 +519,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
 static void update_curr(struct cfs_rq *cfs_rq)
 {
        struct sched_entity *curr = cfs_rq->curr;
-       u64 now = rq_of(cfs_rq)->clock;
+       u64 now = rq_of(cfs_rq)->clock_task;
        unsigned long delta_exec;
 
        if (unlikely(!curr))
@@ -602,7 +602,7 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
        /*
         * We are starting a new run period:
         */
-       se->exec_start = rq_of(cfs_rq)->clock;
+       se->exec_start = rq_of(cfs_rq)->clock_task;
 }
 
 /**************************************************
@@ -1313,7 +1313,7 @@ static struct sched_group *
 find_idlest_group(struct sched_domain *sd, struct task_struct *p,
                  int this_cpu, int load_idx)
 {
-       struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
+       struct sched_group *idlest = NULL, *group = sd->groups;
        unsigned long min_load = ULONG_MAX, this_load = 0;
        int imbalance = 100 + (sd->imbalance_pct-100)/2;
 
@@ -1348,7 +1348,6 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
 
                if (local_group) {
                        this_load = avg_load;
-                       this = group;
                } else if (avg_load < min_load) {
                        min_load = avg_load;
                        idlest = group;
@@ -1765,6 +1764,10 @@ static void pull_task(struct rq *src_rq, struct task_struct *p,
        set_task_cpu(p, this_cpu);
        activate_task(this_rq, p, 0);
        check_preempt_curr(this_rq, p, 0);
+
+       /* re-arm NEWIDLE balancing when moving tasks */
+       src_rq->avg_idle = this_rq->avg_idle = 2*sysctl_sched_migration_cost;
+       this_rq->idle_stamp = 0;
 }
 
 /*
@@ -1799,7 +1802,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
         * 2) too many balance attempts have failed.
         */
 
-       tsk_cache_hot = task_hot(p, rq->clock, sd);
+       tsk_cache_hot = task_hot(p, rq->clock_task, sd);
        if (!tsk_cache_hot ||
                sd->nr_balance_failed > sd->cache_nice_tries) {
 #ifdef CONFIG_SCHEDSTATS
@@ -2031,12 +2034,14 @@ struct sd_lb_stats {
        unsigned long this_load;
        unsigned long this_load_per_task;
        unsigned long this_nr_running;
+       unsigned long this_has_capacity;
 
        /* Statistics of the busiest group */
        unsigned long max_load;
        unsigned long busiest_load_per_task;
        unsigned long busiest_nr_running;
        unsigned long busiest_group_capacity;
+       unsigned long busiest_has_capacity;
 
        int group_imb; /* Is there imbalance in this sd */
 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
@@ -2059,6 +2064,7 @@ struct sg_lb_stats {
        unsigned long sum_weighted_load; /* Weighted load of group's tasks */
        unsigned long group_capacity;
        int group_imb; /* Is there an imbalance in the group ? */
+       int group_has_capacity; /* Is there extra capacity in the group? */
 };
 
 /**
@@ -2268,10 +2274,14 @@ unsigned long scale_rt_power(int cpu)
        struct rq *rq = cpu_rq(cpu);
        u64 total, available;
 
-       sched_avg_update(rq);
-
        total = sched_avg_period() + (rq->clock - rq->age_stamp);
-       available = total - rq->rt_avg;
+
+       if (unlikely(total < rq->rt_avg)) {
+               /* Ensures that power won't end up being negative */
+               available = 0;
+       } else {
+               available = total - rq->rt_avg;
+       }
 
        if (unlikely((s64)total < SCHED_LOAD_SCALE))
                total = SCHED_LOAD_SCALE;
@@ -2381,7 +2391,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
                        int local_group, const struct cpumask *cpus,
                        int *balance, struct sg_lb_stats *sgs)
 {
-       unsigned long load, max_cpu_load, min_cpu_load;
+       unsigned long load, max_cpu_load, min_cpu_load, max_nr_running;
        int i;
        unsigned int balance_cpu = -1, first_idle_cpu = 0;
        unsigned long avg_load_per_task = 0;
@@ -2392,6 +2402,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
        /* Tally up the load of all CPUs in the group */
        max_cpu_load = 0;
        min_cpu_load = ~0UL;
+       max_nr_running = 0;
 
        for_each_cpu_and(i, sched_group_cpus(group), cpus) {
                struct rq *rq = cpu_rq(i);
@@ -2409,8 +2420,10 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
                        load = target_load(i, load_idx);
                } else {
                        load = source_load(i, load_idx);
-                       if (load > max_cpu_load)
+                       if (load > max_cpu_load) {
                                max_cpu_load = load;
+                               max_nr_running = rq->nr_running;
+                       }
                        if (min_cpu_load > load)
                                min_cpu_load = load;
                }
@@ -2450,13 +2463,15 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
        if (sgs->sum_nr_running)
                avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
 
-       if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
+       if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task && max_nr_running > 1)
                sgs->group_imb = 1;
 
-       sgs->group_capacity =
-               DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
+       sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
        if (!sgs->group_capacity)
                sgs->group_capacity = fix_small_capacity(sd, group);
+
+       if (sgs->group_capacity > sgs->sum_nr_running)
+               sgs->group_has_capacity = 1;
 }
 
 /**
@@ -2545,9 +2560,14 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
                /*
                 * In case the child domain prefers tasks go to siblings
                 * first, lower the sg capacity to one so that we'll try
-                * and move all the excess tasks away.
+                * and move all the excess tasks away. We lower the capacity
+                * of a group only if the local group has the capacity to fit
+                * these excess tasks, i.e. nr_running < group_capacity. The
+                * extra check prevents the case where you always pull from the
+                * heaviest group when it is already under-utilized (possible
+                * with a large weight task outweighs the tasks on the system).
                 */
-               if (prefer_sibling)
+               if (prefer_sibling && !local_group && sds->this_has_capacity)
                        sgs.group_capacity = min(sgs.group_capacity, 1UL);
 
                if (local_group) {
@@ -2555,12 +2575,14 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
                        sds->this = sg;
                        sds->this_nr_running = sgs.sum_nr_running;
                        sds->this_load_per_task = sgs.sum_weighted_load;
+                       sds->this_has_capacity = sgs.group_has_capacity;
                } else if (update_sd_pick_busiest(sd, sds, sg, &sgs, this_cpu)) {
                        sds->max_load = sgs.avg_load;
                        sds->busiest = sg;
                        sds->busiest_nr_running = sgs.sum_nr_running;
                        sds->busiest_group_capacity = sgs.group_capacity;
                        sds->busiest_load_per_task = sgs.sum_weighted_load;
+                       sds->busiest_has_capacity = sgs.group_has_capacity;
                        sds->group_imb = sgs.group_imb;
                }
 
@@ -2757,6 +2779,7 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
                return fix_small_imbalance(sds, this_cpu, imbalance);
 
 }
+
 /******* find_busiest_group() helpers end here *********************/
 
 /**
@@ -2808,6 +2831,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
         * 4) This group is more busy than the avg busieness at this
         *    sched_domain.
         * 5) The imbalance is within the specified limit.
+        *
+        * Note: when doing newidle balance, if the local group has excess
+        * capacity (i.e. nr_running < group_capacity) and the busiest group
+        * does not have any capacity, we force a load balance to pull tasks
+        * to the local group. In this case, we skip past checks 3, 4 and 5.
         */
        if (!(*balance))
                goto ret;
@@ -2819,6 +2847,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
        if (!sds.busiest || sds.busiest_nr_running == 0)
                goto out_balanced;
 
+       /*  SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
+       if (idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
+                       !sds.busiest_has_capacity)
+               goto force_balance;
+
        if (sds.this_load >= sds.max_load)
                goto out_balanced;
 
@@ -2830,6 +2863,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
        if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
                goto out_balanced;
 
+force_balance:
        /* Looks like there is an imbalance. Compute it */
        calculate_imbalance(&sds, this_cpu, imbalance);
        return sds.busiest;
@@ -3034,7 +3068,14 @@ redo:
 
        if (!ld_moved) {
                schedstat_inc(sd, lb_failed[idle]);
-               sd->nr_balance_failed++;
+               /*
+                * Increment the failure counter only on periodic balance.
+                * We do not want newidle balance, which can be very
+                * frequent, pollute the failure counter causing
+                * excessive cache_hot migrations and active balances.
+                */
+               if (idle != CPU_NEWLY_IDLE)
+                       sd->nr_balance_failed++;
 
                if (need_active_balance(sd, sd_idle, idle, cpu_of(busiest),
                                        this_cpu)) {
@@ -3156,10 +3197,8 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
                interval = msecs_to_jiffies(sd->balance_interval);
                if (time_after(next_balance, sd->last_balance + interval))
                        next_balance = sd->last_balance + interval;
-               if (pulled_task) {
-                       this_rq->idle_stamp = 0;
+               if (pulled_task)
                        break;
-               }
        }
 
        raw_spin_lock(&this_rq->lock);
@@ -3633,7 +3672,7 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu)
        if (time_before(now, nohz.next_balance))
                return 0;
 
-       if (!rq->nr_running)
+       if (rq->idle_at_tick)
                return 0;
 
        first_pick_cpu = atomic_read(&nohz.first_pick_cpu);
@@ -3754,8 +3793,11 @@ static void task_fork_fair(struct task_struct *p)
 
        update_rq_clock(rq);
 
-       if (unlikely(task_cpu(p) != this_cpu))
+       if (unlikely(task_cpu(p) != this_cpu)) {
+               rcu_read_lock();
                __set_task_cpu(p, this_cpu);
+               rcu_read_unlock();
+       }
 
        update_curr(cfs_rq);
 
index 83c66e8ad3ee314704456e14dfc23607d00c5f0d..185f920ec1a2e923b0d966f787c610ff26a7b6cb 100644 (file)
@@ -61,3 +61,8 @@ SCHED_FEAT(ASYM_EFF_LOAD, 1)
  * release the lock. Decreases scheduling overhead.
  */
 SCHED_FEAT(OWNER_SPIN, 1)
+
+/*
+ * Decrement CPU power based on irq activity
+ */
+SCHED_FEAT(NONIRQ_POWER, 1)
index d10c80ebb67a2821038a9c59b912bf8e323d67e3..bea7d79f7e9ca958bba514cbd8eb48ceab47bab3 100644 (file)
@@ -609,7 +609,7 @@ static void update_curr_rt(struct rq *rq)
        if (!task_has_rt_policy(curr))
                return;
 
-       delta_exec = rq->clock - curr->se.exec_start;
+       delta_exec = rq->clock_task - curr->se.exec_start;
        if (unlikely((s64)delta_exec < 0))
                delta_exec = 0;
 
@@ -618,7 +618,7 @@ static void update_curr_rt(struct rq *rq)
        curr->se.sum_exec_runtime += delta_exec;
        account_group_exec_runtime(curr, delta_exec);
 
-       curr->se.exec_start = rq->clock;
+       curr->se.exec_start = rq->clock_task;
        cpuacct_charge(curr, delta_exec);
 
        sched_rt_avg_update(rq, delta_exec);
@@ -960,18 +960,19 @@ select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
         * runqueue. Otherwise simply start this RT task
         * on its current runqueue.
         *
-        * We want to avoid overloading runqueues. Even if
-        * the RT task is of higher priority than the current RT task.
-        * RT tasks behave differently than other tasks. If
-        * one gets preempted, we try to push it off to another queue.
-        * So trying to keep a preempting RT task on the same
-        * cache hot CPU will force the running RT task to
-        * a cold CPU. So we waste all the cache for the lower
-        * RT task in hopes of saving some of a RT task
-        * that is just being woken and probably will have
-        * cold cache anyway.
+        * We want to avoid overloading runqueues. If the woken
+        * task is a higher priority, then it will stay on this CPU
+        * and the lower prio task should be moved to another CPU.
+        * Even though this will probably make the lower prio task
+        * lose its cache, we do not want to bounce a higher task
+        * around just because it gave up its CPU, perhaps for a
+        * lock?
+        *
+        * For equal prio tasks, we just let the scheduler sort it out.
         */
        if (unlikely(rt_task(rq->curr)) &&
+           (rq->curr->rt.nr_cpus_allowed < 2 ||
+            rq->curr->prio < p->prio) &&
            (p->rt.nr_cpus_allowed > 1)) {
                int cpu = find_lowest_rq(p);
 
@@ -1074,7 +1075,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
        } while (rt_rq);
 
        p = rt_task_of(rt_se);
-       p->se.exec_start = rq->clock;
+       p->se.exec_start = rq->clock_task;
 
        return p;
 }
@@ -1139,7 +1140,7 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
        for_each_leaf_rt_rq(rt_rq, rq) {
                array = &rt_rq->active;
                idx = sched_find_first_bit(array->bitmap);
- next_idx:
+next_idx:
                if (idx >= MAX_RT_PRIO)
                        continue;
                if (next && next->prio < idx)
@@ -1315,7 +1316,7 @@ static int push_rt_task(struct rq *rq)
        if (!next_task)
                return 0;
 
- retry:
+retry:
        if (unlikely(next_task == rq->curr)) {
                WARN_ON(1);
                return 0;
@@ -1463,7 +1464,7 @@ static int pull_rt_task(struct rq *this_rq)
                         * but possible)
                         */
                }
- skip:
+skip:
                double_unlock_balance(this_rq, src_rq);
        }
 
@@ -1491,7 +1492,10 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p)
        if (!task_running(rq, p) &&
            !test_tsk_need_resched(rq->curr) &&
            has_pushable_tasks(rq) &&
-           p->rt.nr_cpus_allowed > 1)
+           p->rt.nr_cpus_allowed > 1 &&
+           rt_task(rq->curr) &&
+           (rq->curr->rt.nr_cpus_allowed < 2 ||
+            rq->curr->prio < p->prio))
                push_rt_tasks(rq);
 }
 
@@ -1709,7 +1713,7 @@ static void set_curr_task_rt(struct rq *rq)
 {
        struct task_struct *p = rq->curr;
 
-       p->se.exec_start = rq->clock;
+       p->se.exec_start = rq->clock_task;
 
        /* The running task is never eligible for pushing */
        dequeue_pushable_task(rq, p);
diff --git a/kernel/sched_stoptask.c b/kernel/sched_stoptask.c
new file mode 100644 (file)
index 0000000..45bddc0
--- /dev/null
@@ -0,0 +1,108 @@
+/*
+ * stop-task scheduling class.
+ *
+ * The stop task is the highest priority task in the system, it preempts
+ * everything and will be preempted by nothing.
+ *
+ * See kernel/stop_machine.c
+ */
+
+#ifdef CONFIG_SMP
+static int
+select_task_rq_stop(struct rq *rq, struct task_struct *p,
+                   int sd_flag, int flags)
+{
+       return task_cpu(p); /* stop tasks as never migrate */
+}
+#endif /* CONFIG_SMP */
+
+static void
+check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
+{
+       resched_task(rq->curr); /* we preempt everything */
+}
+
+static struct task_struct *pick_next_task_stop(struct rq *rq)
+{
+       struct task_struct *stop = rq->stop;
+
+       if (stop && stop->state == TASK_RUNNING)
+               return stop;
+
+       return NULL;
+}
+
+static void
+enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
+{
+}
+
+static void
+dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
+{
+}
+
+static void yield_task_stop(struct rq *rq)
+{
+       BUG(); /* the stop task should never yield, its pointless. */
+}
+
+static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
+{
+}
+
+static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
+{
+}
+
+static void set_curr_task_stop(struct rq *rq)
+{
+}
+
+static void switched_to_stop(struct rq *rq, struct task_struct *p,
+                            int running)
+{
+       BUG(); /* its impossible to change to this class */
+}
+
+static void prio_changed_stop(struct rq *rq, struct task_struct *p,
+                             int oldprio, int running)
+{
+       BUG(); /* how!?, what priority? */
+}
+
+static unsigned int
+get_rr_interval_stop(struct rq *rq, struct task_struct *task)
+{
+       return 0;
+}
+
+/*
+ * Simple, special scheduling class for the per-CPU stop tasks:
+ */
+static const struct sched_class stop_sched_class = {
+       .next                   = &rt_sched_class,
+
+       .enqueue_task           = enqueue_task_stop,
+       .dequeue_task           = dequeue_task_stop,
+       .yield_task             = yield_task_stop,
+
+       .check_preempt_curr     = check_preempt_curr_stop,
+
+       .pick_next_task         = pick_next_task_stop,
+       .put_prev_task          = put_prev_task_stop,
+
+#ifdef CONFIG_SMP
+       .select_task_rq         = select_task_rq_stop,
+#endif
+
+       .set_curr_task          = set_curr_task_stop,
+       .task_tick              = task_tick_stop,
+
+       .get_rr_interval        = get_rr_interval_stop,
+
+       .prio_changed           = prio_changed_stop,
+       .switched_to            = switched_to_stop,
+
+       /* no .task_new for stop tasks */
+};
index bded65187780f5f288bd779920a5c04c190528dc..919562c3d6b720d58ff246b2c412114d77c0b419 100644 (file)
@@ -2214,6 +2214,14 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
                err |= __put_user(from->si_addr, &to->si_addr);
 #ifdef __ARCH_SI_TRAPNO
                err |= __put_user(from->si_trapno, &to->si_trapno);
+#endif
+#ifdef BUS_MCEERR_AO
+               /* 
+                * Other callers might not initialize the si_lsb field,
+                * so check explicitely for the right codes here.
+                */
+               if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
+                       err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
 #endif
                break;
        case __SI_CHLD:
index 75c970c715d399f1385d72e92e0c47c509e568dc..ed6aacfcb7efb307fe313ea798e7074f2c8f4f92 100644 (file)
@@ -365,9 +365,10 @@ call:
 EXPORT_SYMBOL_GPL(smp_call_function_any);
 
 /**
- * __smp_call_function_single(): Run a function on another CPU
+ * __smp_call_function_single(): Run a function on a specific CPU
  * @cpu: The CPU to run on.
  * @data: Pre-allocated and setup data structure
+ * @wait: If true, wait until function has completed on specified CPU.
  *
  * Like smp_call_function_single(), but allow caller to pass in a
  * pre-allocated data structure. Useful for embedding @data inside
@@ -376,8 +377,10 @@ EXPORT_SYMBOL_GPL(smp_call_function_any);
 void __smp_call_function_single(int cpu, struct call_single_data *data,
                                int wait)
 {
-       csd_lock(data);
+       unsigned int this_cpu;
+       unsigned long flags;
 
+       this_cpu = get_cpu();
        /*
         * Can deadlock when called with interrupts disabled.
         * We allow cpu's that are not yet online though, as no one else can
@@ -387,7 +390,15 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
        WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
                     && !oops_in_progress);
 
-       generic_exec_single(cpu, data, wait);
+       if (cpu == this_cpu) {
+               local_irq_save(flags);
+               data->func(data->info);
+               local_irq_restore(flags);
+       } else {
+               csd_lock(data);
+               generic_exec_single(cpu, data, wait);
+       }
+       put_cpu();
 }
 
 /**
index 07b4f1b1a73a9b6a309a3e7fe249c813007b5d17..79ee8f1fc0e71a343cf7de924c8ae5fa69b554b3 100644 (file)
@@ -76,12 +76,22 @@ void wakeup_softirqd(void)
                wake_up_process(tsk);
 }
 
+/*
+ * preempt_count and SOFTIRQ_OFFSET usage:
+ * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
+ *   softirq processing.
+ * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
+ *   on local_bh_disable or local_bh_enable.
+ * This lets us distinguish between whether we are currently processing
+ * softirq and whether we just have bh disabled.
+ */
+
 /*
  * This one is for softirq.c-internal use,
  * where hardirqs are disabled legitimately:
  */
 #ifdef CONFIG_TRACE_IRQFLAGS
-static void __local_bh_disable(unsigned long ip)
+static void __local_bh_disable(unsigned long ip, unsigned int cnt)
 {
        unsigned long flags;
 
@@ -95,32 +105,43 @@ static void __local_bh_disable(unsigned long ip)
         * We must manually increment preempt_count here and manually
         * call the trace_preempt_off later.
         */
-       preempt_count() += SOFTIRQ_OFFSET;
+       preempt_count() += cnt;
        /*
         * Were softirqs turned off above:
         */
-       if (softirq_count() == SOFTIRQ_OFFSET)
+       if (softirq_count() == cnt)
                trace_softirqs_off(ip);
        raw_local_irq_restore(flags);
 
-       if (preempt_count() == SOFTIRQ_OFFSET)
+       if (preempt_count() == cnt)
                trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
 }
 #else /* !CONFIG_TRACE_IRQFLAGS */
-static inline void __local_bh_disable(unsigned long ip)
+static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
 {
-       add_preempt_count(SOFTIRQ_OFFSET);
+       add_preempt_count(cnt);
        barrier();
 }
 #endif /* CONFIG_TRACE_IRQFLAGS */
 
 void local_bh_disable(void)
 {
-       __local_bh_disable((unsigned long)__builtin_return_address(0));
+       __local_bh_disable((unsigned long)__builtin_return_address(0),
+                               SOFTIRQ_DISABLE_OFFSET);
 }
 
 EXPORT_SYMBOL(local_bh_disable);
 
+static void __local_bh_enable(unsigned int cnt)
+{
+       WARN_ON_ONCE(in_irq());
+       WARN_ON_ONCE(!irqs_disabled());
+
+       if (softirq_count() == cnt)
+               trace_softirqs_on((unsigned long)__builtin_return_address(0));
+       sub_preempt_count(cnt);
+}
+
 /*
  * Special-case - softirqs can safely be enabled in
  * cond_resched_softirq(), or by __do_softirq(),
@@ -128,12 +149,7 @@ EXPORT_SYMBOL(local_bh_disable);
  */
 void _local_bh_enable(void)
 {
-       WARN_ON_ONCE(in_irq());
-       WARN_ON_ONCE(!irqs_disabled());
-
-       if (softirq_count() == SOFTIRQ_OFFSET)
-               trace_softirqs_on((unsigned long)__builtin_return_address(0));
-       sub_preempt_count(SOFTIRQ_OFFSET);
+       __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
 }
 
 EXPORT_SYMBOL(_local_bh_enable);
@@ -147,13 +163,13 @@ static inline void _local_bh_enable_ip(unsigned long ip)
        /*
         * Are softirqs going to be turned on now:
         */
-       if (softirq_count() == SOFTIRQ_OFFSET)
+       if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
                trace_softirqs_on(ip);
        /*
         * Keep preemption disabled until we are done with
         * softirq processing:
         */
-       sub_preempt_count(SOFTIRQ_OFFSET - 1);
+       sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1);
 
        if (unlikely(!in_interrupt() && local_softirq_pending()))
                do_softirq();
@@ -198,7 +214,8 @@ asmlinkage void __do_softirq(void)
        pending = local_softirq_pending();
        account_system_vtime(current);
 
-       __local_bh_disable((unsigned long)__builtin_return_address(0));
+       __local_bh_disable((unsigned long)__builtin_return_address(0),
+                               SOFTIRQ_OFFSET);
        lockdep_softirq_enter();
 
        cpu = smp_processor_id();
@@ -245,7 +262,7 @@ restart:
        lockdep_softirq_exit();
 
        account_system_vtime(current);
-       _local_bh_enable();
+       __local_bh_enable(SOFTIRQ_OFFSET);
 }
 
 #ifndef __ARCH_HAS_DO_SOFTIRQ
@@ -279,10 +296,16 @@ void irq_enter(void)
 
        rcu_irq_enter();
        if (idle_cpu(cpu) && !in_interrupt()) {
-               __irq_enter();
+               /*
+                * Prevent raise_softirq from needlessly waking up ksoftirqd
+                * here, as softirq will be serviced on return from interrupt.
+                */
+               local_bh_disable();
                tick_check_idle(cpu);
-       } else
-               __irq_enter();
+               _local_bh_enable();
+       }
+
+       __irq_enter();
 }
 
 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
@@ -696,6 +719,7 @@ static int run_ksoftirqd(void * __bind_cpu)
 {
        set_current_state(TASK_INTERRUPTIBLE);
 
+       current->flags |= PF_KSOFTIRQD;
        while (!kthread_should_stop()) {
                preempt_disable();
                if (!local_softirq_pending()) {
index 2980da3fd50925f7902a5ba42e64934f6c4b0650..c71e075005368eceff3aab4340f94beca4aee249 100644 (file)
@@ -46,11 +46,9 @@ static int init_srcu_struct_fields(struct srcu_struct *sp)
 int __init_srcu_struct(struct srcu_struct *sp, const char *name,
                       struct lock_class_key *key)
 {
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
        /* Don't re-initialize a lock while it is held. */
        debug_check_no_locks_freed((void *)sp, sizeof(*sp));
        lockdep_init_map(&sp->dep_map, name, key, 0);
-#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
        return init_srcu_struct_fields(sp);
 }
 EXPORT_SYMBOL_GPL(__init_srcu_struct);
index 4372ccb25127ea8548719c78cb4a610edb94febf..090c28812ce101fbd055821fe12806265bd1ed9b 100644 (file)
@@ -287,11 +287,12 @@ repeat:
        goto repeat;
 }
 
+extern void sched_set_stop_task(int cpu, struct task_struct *stop);
+
 /* manage stopper for a cpu, mostly lifted from sched migration thread mgmt */
 static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
                                           unsigned long action, void *hcpu)
 {
-       struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
        unsigned int cpu = (unsigned long)hcpu;
        struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
        struct task_struct *p;
@@ -304,13 +305,13 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
                                   cpu);
                if (IS_ERR(p))
                        return NOTIFY_BAD;
-               sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
                get_task_struct(p);
+               kthread_bind(p, cpu);
+               sched_set_stop_task(cpu, p);
                stopper->thread = p;
                break;
 
        case CPU_ONLINE:
-               kthread_bind(stopper->thread, cpu);
                /* strictly unnecessary, as first user will wake it */
                wake_up_process(stopper->thread);
                /* mark enabled */
@@ -325,6 +326,7 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
        {
                struct cpu_stop_work *work;
 
+               sched_set_stop_task(cpu, NULL);
                /* kill the stopper */
                kthread_stop(stopper->thread);
                /* drain remaining works */
index e9ad4448982860af9919df53c3368156a4bf2445..7f5a0cd296a96ca44e43f0db028026094dbbb57a 100644 (file)
@@ -931,6 +931,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
                pgid = pid;
        if (pgid < 0)
                return -EINVAL;
+       rcu_read_lock();
 
        /* From this point forward we keep holding onto the tasklist lock
         * so that our parent does not change from under us. -DaveM
@@ -984,6 +985,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
 out:
        /* All paths lead to here, thus we are safe. -DaveM */
        write_unlock_irq(&tasklist_lock);
+       rcu_read_unlock();
        return err;
 }
 
index ca38e8e3e907557f74faaad7ddb57d330bd43d2d..3a45c224770fb82fa4bd76f9c7d4f2f989ee5aa9 100644 (file)
@@ -1713,10 +1713,7 @@ static __init int sysctl_init(void)
 {
        sysctl_set_parent(NULL, root_table);
 #ifdef CONFIG_SYSCTL_SYSCALL_CHECK
-       {
-               int err;
-               err = sysctl_check_table(current->nsproxy, root_table);
-       }
+       sysctl_check_table(current->nsproxy, root_table);
 #endif
        return 0;
 }
@@ -2488,7 +2485,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
                kbuf[left] = 0;
        }
 
-       for (; left && vleft--; i++, min++, max++, first=0) {
+       for (; left && vleft--; i++, first = 0) {
                unsigned long val;
 
                if (write) {
index 04cdcf72c827e7601cdca63ab4c54a3f16473c50..10b90d8a03c48678258c6aaf3de353af7b06ed36 100644 (file)
@@ -143,15 +143,6 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
                                if (!table->maxlen)
                                        set_fail(&fail, table, "No maxlen");
                        }
-                       if ((table->proc_handler == proc_doulongvec_minmax) ||
-                           (table->proc_handler == proc_doulongvec_ms_jiffies_minmax)) {
-                               if (table->maxlen > sizeof (unsigned long)) {
-                                       if (!table->extra1)
-                                               set_fail(&fail, table, "No min");
-                                       if (!table->extra2)
-                                               set_fail(&fail, table, "No max");
-                               }
-                       }
 #ifdef CONFIG_PROC_SYSCTL
                        if (table->procname && !table->proc_handler)
                                set_fail(&fail, table, "No proc_handler");
index 4f104515a19bcb18ca73226c745786a41d41b4ae..f8b11a283171b65849c5ed3bc0e162397f76fbea 100644 (file)
@@ -115,7 +115,9 @@ static int test_kprobes(void)
        int ret;
        struct kprobe *kps[2] = {&kp, &kp2};
 
-       kp.addr = 0; /* addr should be cleard for reusing kprobe. */
+       /* addr and flags should be cleard for reusing kprobe. */
+       kp.addr = NULL;
+       kp.flags = 0;
        ret = register_kprobes(kps, 2);
        if (ret < 0) {
                printk(KERN_ERR "Kprobe smoke test failed: "
@@ -210,7 +212,9 @@ static int test_jprobes(void)
        int ret;
        struct jprobe *jps[2] = {&jp, &jp2};
 
-       jp.kp.addr = 0; /* addr should be cleard for reusing kprobe. */
+       /* addr and flags should be cleard for reusing kprobe. */
+       jp.kp.addr = NULL;
+       jp.kp.flags = 0;
        ret = register_jprobes(jps, 2);
        if (ret < 0) {
                printk(KERN_ERR "Kprobe smoke test failed: "
@@ -323,7 +327,9 @@ static int test_kretprobes(void)
        int ret;
        struct kretprobe *rps[2] = {&rp, &rp2};
 
-       rp.kp.addr = 0; /* addr should be cleard for reusing kprobe. */
+       /* addr and flags should be cleard for reusing kprobe. */
+       rp.kp.addr = NULL;
+       rp.kp.flags = 0;
        ret = register_kretprobes(rps, 2);
        if (ret < 0) {
                printk(KERN_ERR "Kprobe smoke test failed: "
index 97bf05baade7cb4b9db4a5b76cf26255b6a67753..68a9ae7679b717f6eb4782ce5114ad405dec5a4c 100644 (file)
@@ -37,7 +37,7 @@
 #include <linux/delay.h>
 #include <linux/tick.h>
 #include <linux/kallsyms.h>
-#include <linux/perf_event.h>
+#include <linux/irq_work.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
 
@@ -1279,7 +1279,10 @@ void update_process_times(int user_tick)
        run_local_timers();
        rcu_check_callbacks(cpu, user_tick);
        printk_tick();
-       perf_event_do_pending();
+#ifdef CONFIG_IRQ_WORK
+       if (in_irq())
+               irq_work_run();
+#endif
        scheduler_tick();
        run_posix_cpu_timers(p);
 }
index 538501c6ea5058cf703eaa2608307f03f3aee89a..e550d2eda1dfdbf843e60c170d1fff3ec4e4eaf9 100644 (file)
@@ -49,6 +49,11 @@ config HAVE_SYSCALL_TRACEPOINTS
        help
          See Documentation/trace/ftrace-design.txt
 
+config HAVE_C_RECORDMCOUNT
+       bool
+       help
+         C version of recordmcount available?
+
 config TRACER_MAX_TRACE
        bool
 
index 0d88ce9b9fb8828c9a81fdffcd47763ae5cc2543..ebd80d50c474e60fd84fd4a577da69b052fe14eb 100644 (file)
@@ -381,12 +381,19 @@ static int function_stat_show(struct seq_file *m, void *v)
 {
        struct ftrace_profile *rec = v;
        char str[KSYM_SYMBOL_LEN];
+       int ret = 0;
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       static DEFINE_MUTEX(mutex);
        static struct trace_seq s;
        unsigned long long avg;
        unsigned long long stddev;
 #endif
+       mutex_lock(&ftrace_profile_lock);
+
+       /* we raced with function_profile_reset() */
+       if (unlikely(rec->counter == 0)) {
+               ret = -EBUSY;
+               goto out;
+       }
 
        kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
        seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
@@ -408,7 +415,6 @@ static int function_stat_show(struct seq_file *m, void *v)
                do_div(stddev, (rec->counter - 1) * 1000);
        }
 
-       mutex_lock(&mutex);
        trace_seq_init(&s);
        trace_print_graph_duration(rec->time, &s);
        trace_seq_puts(&s, "    ");
@@ -416,11 +422,12 @@ static int function_stat_show(struct seq_file *m, void *v)
        trace_seq_puts(&s, "    ");
        trace_print_graph_duration(stddev, &s);
        trace_print_seq(m, &s);
-       mutex_unlock(&mutex);
 #endif
        seq_putc(m, '\n');
+out:
+       mutex_unlock(&ftrace_profile_lock);
 
-       return 0;
+       return ret;
 }
 
 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
@@ -877,10 +884,8 @@ enum {
        FTRACE_ENABLE_CALLS             = (1 << 0),
        FTRACE_DISABLE_CALLS            = (1 << 1),
        FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
-       FTRACE_ENABLE_MCOUNT            = (1 << 3),
-       FTRACE_DISABLE_MCOUNT           = (1 << 4),
-       FTRACE_START_FUNC_RET           = (1 << 5),
-       FTRACE_STOP_FUNC_RET            = (1 << 6),
+       FTRACE_START_FUNC_RET           = (1 << 3),
+       FTRACE_STOP_FUNC_RET            = (1 << 4),
 };
 
 static int ftrace_filtered;
@@ -1219,8 +1224,6 @@ static void ftrace_shutdown(int command)
 
 static void ftrace_startup_sysctl(void)
 {
-       int command = FTRACE_ENABLE_MCOUNT;
-
        if (unlikely(ftrace_disabled))
                return;
 
@@ -1228,23 +1231,17 @@ static void ftrace_startup_sysctl(void)
        saved_ftrace_func = NULL;
        /* ftrace_start_up is true if we want ftrace running */
        if (ftrace_start_up)
-               command |= FTRACE_ENABLE_CALLS;
-
-       ftrace_run_update_code(command);
+               ftrace_run_update_code(FTRACE_ENABLE_CALLS);
 }
 
 static void ftrace_shutdown_sysctl(void)
 {
-       int command = FTRACE_DISABLE_MCOUNT;
-
        if (unlikely(ftrace_disabled))
                return;
 
        /* ftrace_start_up is true if ftrace is running */
        if (ftrace_start_up)
-               command |= FTRACE_DISABLE_CALLS;
-
-       ftrace_run_update_code(command);
+               ftrace_run_update_code(FTRACE_DISABLE_CALLS);
 }
 
 static cycle_t         ftrace_update_time;
@@ -1361,24 +1358,29 @@ enum {
 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
 
 struct ftrace_iterator {
-       struct ftrace_page      *pg;
-       int                     hidx;
-       int                     idx;
-       unsigned                flags;
-       struct trace_parser     parser;
+       loff_t                          pos;
+       loff_t                          func_pos;
+       struct ftrace_page              *pg;
+       struct dyn_ftrace               *func;
+       struct ftrace_func_probe        *probe;
+       struct trace_parser             parser;
+       int                             hidx;
+       int                             idx;
+       unsigned                        flags;
 };
 
 static void *
-t_hash_next(struct seq_file *m, void *v, loff_t *pos)
+t_hash_next(struct seq_file *m, loff_t *pos)
 {
        struct ftrace_iterator *iter = m->private;
-       struct hlist_node *hnd = v;
+       struct hlist_node *hnd = NULL;
        struct hlist_head *hhd;
 
-       WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
-
        (*pos)++;
+       iter->pos = *pos;
 
+       if (iter->probe)
+               hnd = &iter->probe->node;
  retry:
        if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
                return NULL;
@@ -1401,7 +1403,12 @@ t_hash_next(struct seq_file *m, void *v, loff_t *pos)
                }
        }
 
-       return hnd;
+       if (WARN_ON_ONCE(!hnd))
+               return NULL;
+
+       iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
+
+       return iter;
 }
 
 static void *t_hash_start(struct seq_file *m, loff_t *pos)
@@ -1410,26 +1417,32 @@ static void *t_hash_start(struct seq_file *m, loff_t *pos)
        void *p = NULL;
        loff_t l;
 
-       if (!(iter->flags & FTRACE_ITER_HASH))
-               *pos = 0;
-
-       iter->flags |= FTRACE_ITER_HASH;
+       if (iter->func_pos > *pos)
+               return NULL;
 
        iter->hidx = 0;
-       for (l = 0; l <= *pos; ) {
-               p = t_hash_next(m, p, &l);
+       for (l = 0; l <= (*pos - iter->func_pos); ) {
+               p = t_hash_next(m, &l);
                if (!p)
                        break;
        }
-       return p;
+       if (!p)
+               return NULL;
+
+       /* Only set this if we have an item */
+       iter->flags |= FTRACE_ITER_HASH;
+
+       return iter;
 }
 
-static int t_hash_show(struct seq_file *m, void *v)
+static int
+t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
 {
        struct ftrace_func_probe *rec;
-       struct hlist_node *hnd = v;
 
-       rec = hlist_entry(hnd, struct ftrace_func_probe, node);
+       rec = iter->probe;
+       if (WARN_ON_ONCE(!rec))
+               return -EIO;
 
        if (rec->ops->print)
                return rec->ops->print(m, rec->ip, rec->ops, rec->data);
@@ -1450,12 +1463,13 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
        struct dyn_ftrace *rec = NULL;
 
        if (iter->flags & FTRACE_ITER_HASH)
-               return t_hash_next(m, v, pos);
+               return t_hash_next(m, pos);
 
        (*pos)++;
+       iter->pos = *pos;
 
        if (iter->flags & FTRACE_ITER_PRINTALL)
-               return NULL;
+               return t_hash_start(m, pos);
 
  retry:
        if (iter->idx >= iter->pg->index) {
@@ -1484,7 +1498,20 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
                }
        }
 
-       return rec;
+       if (!rec)
+               return t_hash_start(m, pos);
+
+       iter->func_pos = *pos;
+       iter->func = rec;
+
+       return iter;
+}
+
+static void reset_iter_read(struct ftrace_iterator *iter)
+{
+       iter->pos = 0;
+       iter->func_pos = 0;
+       iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
 }
 
 static void *t_start(struct seq_file *m, loff_t *pos)
@@ -1494,6 +1521,12 @@ static void *t_start(struct seq_file *m, loff_t *pos)
        loff_t l;
 
        mutex_lock(&ftrace_lock);
+       /*
+        * If an lseek was done, then reset and start from beginning.
+        */
+       if (*pos < iter->pos)
+               reset_iter_read(iter);
+
        /*
         * For set_ftrace_filter reading, if we have the filter
         * off, we can short cut and just print out that all
@@ -1503,12 +1536,19 @@ static void *t_start(struct seq_file *m, loff_t *pos)
                if (*pos > 0)
                        return t_hash_start(m, pos);
                iter->flags |= FTRACE_ITER_PRINTALL;
+               /* reset in case of seek/pread */
+               iter->flags &= ~FTRACE_ITER_HASH;
                return iter;
        }
 
        if (iter->flags & FTRACE_ITER_HASH)
                return t_hash_start(m, pos);
 
+       /*
+        * Unfortunately, we need to restart at ftrace_pages_start
+        * every time we let go of the ftrace_mutex. This is because
+        * those pointers can change without the lock.
+        */
        iter->pg = ftrace_pages_start;
        iter->idx = 0;
        for (l = 0; l <= *pos; ) {
@@ -1517,10 +1557,14 @@ static void *t_start(struct seq_file *m, loff_t *pos)
                        break;
        }
 
-       if (!p && iter->flags & FTRACE_ITER_FILTER)
-               return t_hash_start(m, pos);
+       if (!p) {
+               if (iter->flags & FTRACE_ITER_FILTER)
+                       return t_hash_start(m, pos);
 
-       return p;
+               return NULL;
+       }
+
+       return iter;
 }
 
 static void t_stop(struct seq_file *m, void *p)
@@ -1531,16 +1575,18 @@ static void t_stop(struct seq_file *m, void *p)
 static int t_show(struct seq_file *m, void *v)
 {
        struct ftrace_iterator *iter = m->private;
-       struct dyn_ftrace *rec = v;
+       struct dyn_ftrace *rec;
 
        if (iter->flags & FTRACE_ITER_HASH)
-               return t_hash_show(m, v);
+               return t_hash_show(m, iter);
 
        if (iter->flags & FTRACE_ITER_PRINTALL) {
                seq_printf(m, "#### all functions enabled ####\n");
                return 0;
        }
 
+       rec = iter->func;
+
        if (!rec)
                return 0;
 
@@ -1592,8 +1638,8 @@ ftrace_failures_open(struct inode *inode, struct file *file)
 
        ret = ftrace_avail_open(inode, file);
        if (!ret) {
-               m = (struct seq_file *)file->private_data;
-               iter = (struct ftrace_iterator *)m->private;
+               m = file->private_data;
+               iter = m->private;
                iter->flags = FTRACE_ITER_FAILURES;
        }
 
index 19cccc3c302871beae5fd39ad937b0791a2e785d..c5a632a669e1f7a4ebc6a07889ef593265b3d76a 100644 (file)
@@ -405,7 +405,7 @@ static inline int test_time_stamp(u64 delta)
 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
 
 /* Max number of timestamps that can fit on a page */
-#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP)
+#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_EXTEND)
 
 int ring_buffer_print_page_header(struct trace_seq *s)
 {
@@ -2606,6 +2606,19 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
 }
 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
 
+/*
+ * The total entries in the ring buffer is the running counter
+ * of entries entered into the ring buffer, minus the sum of
+ * the entries read from the ring buffer and the number of
+ * entries that were overwritten.
+ */
+static inline unsigned long
+rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
+{
+       return local_read(&cpu_buffer->entries) -
+               (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
+}
+
 /**
  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
  * @buffer: The ring buffer
@@ -2614,16 +2627,13 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
-       unsigned long ret;
 
        if (!cpumask_test_cpu(cpu, buffer->cpumask))
                return 0;
 
        cpu_buffer = buffer->buffers[cpu];
-       ret = (local_read(&cpu_buffer->entries) - local_read(&cpu_buffer->overrun))
-               - cpu_buffer->read;
 
-       return ret;
+       return rb_num_of_entries(cpu_buffer);
 }
 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
 
@@ -2684,8 +2694,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)
        /* if you care about this being correct, lock the buffer */
        for_each_buffer_cpu(buffer, cpu) {
                cpu_buffer = buffer->buffers[cpu];
-               entries += (local_read(&cpu_buffer->entries) -
-                           local_read(&cpu_buffer->overrun)) - cpu_buffer->read;
+               entries += rb_num_of_entries(cpu_buffer);
        }
 
        return entries;
@@ -2985,13 +2994,11 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
 
 static void rb_advance_iter(struct ring_buffer_iter *iter)
 {
-       struct ring_buffer *buffer;
        struct ring_buffer_per_cpu *cpu_buffer;
        struct ring_buffer_event *event;
        unsigned length;
 
        cpu_buffer = iter->cpu_buffer;
-       buffer = cpu_buffer->buffer;
 
        /*
         * Check if we are at the end of the buffer.
index 9ec59f541156625b5c4b0aea9267086c928ae07a..001bcd2ccf4afb5170cb06d500bcff90844bf90e 100644 (file)
@@ -2196,7 +2196,7 @@ int tracing_open_generic(struct inode *inode, struct file *filp)
 
 static int tracing_release(struct inode *inode, struct file *file)
 {
-       struct seq_file *m = (struct seq_file *)file->private_data;
+       struct seq_file *m = file->private_data;
        struct trace_iterator *iter;
        int cpu;
 
index d39b3c5454a5e684b8c720e0791894b36529f1cf..9021f8c0c0c3e379edbd8f39770bd9345794e266 100644 (file)
@@ -343,6 +343,10 @@ void trace_function(struct trace_array *tr,
                    unsigned long ip,
                    unsigned long parent_ip,
                    unsigned long flags, int pc);
+void trace_graph_function(struct trace_array *tr,
+                   unsigned long ip,
+                   unsigned long parent_ip,
+                   unsigned long flags, int pc);
 void trace_default_header(struct seq_file *m);
 void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
 int trace_empty(struct trace_iterator *iter);
index 000e6e85b445906893d7003b2f28c615453bb726..39c059ca670e64156e6681782ffa708c6b8d720f 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/kprobes.h>
 #include "trace.h"
 
-static char *perf_trace_buf[4];
+static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
 
 /*
  * Force it to be aligned to unsigned long to avoid misaligned accesses
@@ -24,7 +24,7 @@ static int    total_ref_count;
 static int perf_trace_event_init(struct ftrace_event_call *tp_event,
                                 struct perf_event *p_event)
 {
-       struct hlist_head *list;
+       struct hlist_head __percpu *list;
        int ret = -ENOMEM;
        int cpu;
 
@@ -42,11 +42,11 @@ static int perf_trace_event_init(struct ftrace_event_call *tp_event,
        tp_event->perf_events = list;
 
        if (!total_ref_count) {
-               char *buf;
+               char __percpu *buf;
                int i;
 
-               for (i = 0; i < 4; i++) {
-                       buf = (char *)alloc_percpu(perf_trace_t);
+               for (i = 0; i < PERF_NR_CONTEXTS; i++) {
+                       buf = (char __percpu *)alloc_percpu(perf_trace_t);
                        if (!buf)
                                goto fail;
 
@@ -65,7 +65,7 @@ fail:
        if (!total_ref_count) {
                int i;
 
-               for (i = 0; i < 4; i++) {
+               for (i = 0; i < PERF_NR_CONTEXTS; i++) {
                        free_percpu(perf_trace_buf[i]);
                        perf_trace_buf[i] = NULL;
                }
@@ -91,6 +91,8 @@ int perf_trace_init(struct perf_event *p_event)
                    tp_event->class && tp_event->class->reg &&
                    try_module_get(tp_event->mod)) {
                        ret = perf_trace_event_init(tp_event, p_event);
+                       if (ret)
+                               module_put(tp_event->mod);
                        break;
                }
        }
@@ -99,22 +101,26 @@ int perf_trace_init(struct perf_event *p_event)
        return ret;
 }
 
-int perf_trace_enable(struct perf_event *p_event)
+int perf_trace_add(struct perf_event *p_event, int flags)
 {
        struct ftrace_event_call *tp_event = p_event->tp_event;
+       struct hlist_head __percpu *pcpu_list;
        struct hlist_head *list;
 
-       list = tp_event->perf_events;
-       if (WARN_ON_ONCE(!list))
+       pcpu_list = tp_event->perf_events;
+       if (WARN_ON_ONCE(!pcpu_list))
                return -EINVAL;
 
-       list = this_cpu_ptr(list);
+       if (!(flags & PERF_EF_START))
+               p_event->hw.state = PERF_HES_STOPPED;
+
+       list = this_cpu_ptr(pcpu_list);
        hlist_add_head_rcu(&p_event->hlist_entry, list);
 
        return 0;
 }
 
-void perf_trace_disable(struct perf_event *p_event)
+void perf_trace_del(struct perf_event *p_event, int flags)
 {
        hlist_del_rcu(&p_event->hlist_entry);
 }
@@ -140,12 +146,13 @@ void perf_trace_destroy(struct perf_event *p_event)
        tp_event->perf_events = NULL;
 
        if (!--total_ref_count) {
-               for (i = 0; i < 4; i++) {
+               for (i = 0; i < PERF_NR_CONTEXTS; i++) {
                        free_percpu(perf_trace_buf[i]);
                        perf_trace_buf[i] = NULL;
                }
        }
 out:
+       module_put(tp_event->mod);
        mutex_unlock(&event_mutex);
 }
 
index 4c758f146328f18ce82a318fb60a0413006aca8f..398c0e8b332c1840e16bc0599c1230f9594d29a2 100644 (file)
@@ -600,21 +600,29 @@ out:
 
 enum {
        FORMAT_HEADER           = 1,
-       FORMAT_PRINTFMT         = 2,
+       FORMAT_FIELD_SEPERATOR  = 2,
+       FORMAT_PRINTFMT         = 3,
 };
 
 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
 {
        struct ftrace_event_call *call = m->private;
        struct ftrace_event_field *field;
-       struct list_head *head;
+       struct list_head *common_head = &ftrace_common_fields;
+       struct list_head *head = trace_get_fields(call);
 
        (*pos)++;
 
        switch ((unsigned long)v) {
        case FORMAT_HEADER:
-               head = &ftrace_common_fields;
+               if (unlikely(list_empty(common_head)))
+                       return NULL;
+
+               field = list_entry(common_head->prev,
+                                  struct ftrace_event_field, link);
+               return field;
 
+       case FORMAT_FIELD_SEPERATOR:
                if (unlikely(list_empty(head)))
                        return NULL;
 
@@ -626,31 +634,10 @@ static void *f_next(struct seq_file *m, void *v, loff_t *pos)
                return NULL;
        }
 
-       head = trace_get_fields(call);
-
-       /*
-        * To separate common fields from event fields, the
-        * LSB is set on the first event field. Clear it in case.
-        */
-       v = (void *)((unsigned long)v & ~1L);
-
        field = v;
-       /*
-        * If this is a common field, and at the end of the list, then
-        * continue with main list.
-        */
-       if (field->link.prev == &ftrace_common_fields) {
-               if (unlikely(list_empty(head)))
-                       return NULL;
-               field = list_entry(head->prev, struct ftrace_event_field, link);
-               /* Set the LSB to notify f_show to print an extra newline */
-               field = (struct ftrace_event_field *)
-                       ((unsigned long)field | 1);
-               return field;
-       }
-
-       /* If we are done tell f_show to print the format */
-       if (field->link.prev == head)
+       if (field->link.prev == common_head)
+               return (void *)FORMAT_FIELD_SEPERATOR;
+       else if (field->link.prev == head)
                return (void *)FORMAT_PRINTFMT;
 
        field = list_entry(field->link.prev, struct ftrace_event_field, link);
@@ -688,22 +675,16 @@ static int f_show(struct seq_file *m, void *v)
                seq_printf(m, "format:\n");
                return 0;
 
+       case FORMAT_FIELD_SEPERATOR:
+               seq_putc(m, '\n');
+               return 0;
+
        case FORMAT_PRINTFMT:
                seq_printf(m, "\nprint fmt: %s\n",
                           call->print_fmt);
                return 0;
        }
 
-       /*
-        * To separate common fields from event fields, the
-        * LSB is set on the first event field. Clear it and
-        * print a newline if it is set.
-        */
-       if ((unsigned long)v & 1) {
-               seq_putc(m, '\n');
-               v = (void *)((unsigned long)v & ~1L);
-       }
-
        field = v;
 
        /*
index 6f233698518ede15cc9302e889de9f108aa0f1cb..76b05980225cb79df8bfad7bb35cd64bd52bcaf2 100644 (file)
 #include "trace.h"
 #include "trace_output.h"
 
+/* When set, irq functions will be ignored */
+static int ftrace_graph_skip_irqs;
+
 struct fgraph_cpu_data {
        pid_t           last_pid;
        int             depth;
+       int             depth_irq;
        int             ignore;
        unsigned long   enter_funcs[FTRACE_RETFUNC_DEPTH];
 };
 
 struct fgraph_data {
-       struct fgraph_cpu_data          *cpu_data;
+       struct fgraph_cpu_data __percpu *cpu_data;
 
        /* Place to preserve last processed entry. */
        struct ftrace_graph_ent_entry   ent;
@@ -41,6 +45,7 @@ struct fgraph_data {
 #define TRACE_GRAPH_PRINT_PROC         0x8
 #define TRACE_GRAPH_PRINT_DURATION     0x10
 #define TRACE_GRAPH_PRINT_ABS_TIME     0x20
+#define TRACE_GRAPH_PRINT_IRQS         0x40
 
 static struct tracer_opt trace_opts[] = {
        /* Display overruns? (for self-debug purpose) */
@@ -55,13 +60,15 @@ static struct tracer_opt trace_opts[] = {
        { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
        /* Display absolute time of an entry */
        { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
+       /* Display interrupts */
+       { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
        { } /* Empty entry */
 };
 
 static struct tracer_flags tracer_flags = {
        /* Don't display overruns and proc by default */
        .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
-              TRACE_GRAPH_PRINT_DURATION,
+              TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
        .opts = trace_opts
 };
 
@@ -204,6 +211,14 @@ int __trace_graph_entry(struct trace_array *tr,
        return 1;
 }
 
+static inline int ftrace_graph_ignore_irqs(void)
+{
+       if (!ftrace_graph_skip_irqs)
+               return 0;
+
+       return in_irq();
+}
+
 int trace_graph_entry(struct ftrace_graph_ent *trace)
 {
        struct trace_array *tr = graph_array;
@@ -218,7 +233,8 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
                return 0;
 
        /* trace it when it is-nested-in or is a function enabled. */
-       if (!(trace->depth || ftrace_graph_addr(trace->func)))
+       if (!(trace->depth || ftrace_graph_addr(trace->func)) ||
+             ftrace_graph_ignore_irqs())
                return 0;
 
        local_irq_save(flags);
@@ -246,6 +262,34 @@ int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
                return trace_graph_entry(trace);
 }
 
+static void
+__trace_graph_function(struct trace_array *tr,
+               unsigned long ip, unsigned long flags, int pc)
+{
+       u64 time = trace_clock_local();
+       struct ftrace_graph_ent ent = {
+               .func  = ip,
+               .depth = 0,
+       };
+       struct ftrace_graph_ret ret = {
+               .func     = ip,
+               .depth    = 0,
+               .calltime = time,
+               .rettime  = time,
+       };
+
+       __trace_graph_entry(tr, &ent, flags, pc);
+       __trace_graph_return(tr, &ret, flags, pc);
+}
+
+void
+trace_graph_function(struct trace_array *tr,
+               unsigned long ip, unsigned long parent_ip,
+               unsigned long flags, int pc)
+{
+       __trace_graph_function(tr, ip, flags, pc);
+}
+
 void __trace_graph_return(struct trace_array *tr,
                                struct ftrace_graph_ret *trace,
                                unsigned long flags,
@@ -649,8 +693,9 @@ trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
 
        /* Print nsecs (we don't want to exceed 7 numbers) */
        if (len < 7) {
-               snprintf(nsecs_str, min(sizeof(nsecs_str), 8UL - len), "%03lu",
-                        nsecs_rem);
+               size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
+
+               snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
                ret = trace_seq_printf(s, ".%s", nsecs_str);
                if (!ret)
                        return TRACE_TYPE_PARTIAL_LINE;
@@ -855,6 +900,108 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
        return 0;
 }
 
+/*
+ * Entry check for irq code
+ *
+ * returns 1 if
+ *  - we are inside irq code
+ *  - we just extered irq code
+ *
+ * retunns 0 if
+ *  - funcgraph-interrupts option is set
+ *  - we are not inside irq code
+ */
+static int
+check_irq_entry(struct trace_iterator *iter, u32 flags,
+               unsigned long addr, int depth)
+{
+       int cpu = iter->cpu;
+       int *depth_irq;
+       struct fgraph_data *data = iter->private;
+
+       /*
+        * If we are either displaying irqs, or we got called as
+        * a graph event and private data does not exist,
+        * then we bypass the irq check.
+        */
+       if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
+           (!data))
+               return 0;
+
+       depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
+
+       /*
+        * We are inside the irq code
+        */
+       if (*depth_irq >= 0)
+               return 1;
+
+       if ((addr < (unsigned long)__irqentry_text_start) ||
+           (addr >= (unsigned long)__irqentry_text_end))
+               return 0;
+
+       /*
+        * We are entering irq code.
+        */
+       *depth_irq = depth;
+       return 1;
+}
+
+/*
+ * Return check for irq code
+ *
+ * returns 1 if
+ *  - we are inside irq code
+ *  - we just left irq code
+ *
+ * returns 0 if
+ *  - funcgraph-interrupts option is set
+ *  - we are not inside irq code
+ */
+static int
+check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
+{
+       int cpu = iter->cpu;
+       int *depth_irq;
+       struct fgraph_data *data = iter->private;
+
+       /*
+        * If we are either displaying irqs, or we got called as
+        * a graph event and private data does not exist,
+        * then we bypass the irq check.
+        */
+       if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
+           (!data))
+               return 0;
+
+       depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
+
+       /*
+        * We are not inside the irq code.
+        */
+       if (*depth_irq == -1)
+               return 0;
+
+       /*
+        * We are inside the irq code, and this is returning entry.
+        * Let's not trace it and clear the entry depth, since
+        * we are out of irq code.
+        *
+        * This condition ensures that we 'leave the irq code' once
+        * we are out of the entry depth. Thus protecting us from
+        * the RETURN entry loss.
+        */
+       if (*depth_irq >= depth) {
+               *depth_irq = -1;
+               return 1;
+       }
+
+       /*
+        * We are inside the irq code, and this is not the entry.
+        */
+       return 1;
+}
+
 static enum print_line_t
 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
                        struct trace_iterator *iter, u32 flags)
@@ -865,6 +1012,9 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
        static enum print_line_t ret;
        int cpu = iter->cpu;
 
+       if (check_irq_entry(iter, flags, call->func, call->depth))
+               return TRACE_TYPE_HANDLED;
+
        if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
                return TRACE_TYPE_PARTIAL_LINE;
 
@@ -902,6 +1052,9 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
        int ret;
        int i;
 
+       if (check_irq_return(iter, flags, trace->depth))
+               return TRACE_TYPE_HANDLED;
+
        if (data) {
                struct fgraph_cpu_data *cpu_data;
                int cpu = iter->cpu;
@@ -1054,7 +1207,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
 
 
 enum print_line_t
-print_graph_function_flags(struct trace_iterator *iter, u32 flags)
+__print_graph_function_flags(struct trace_iterator *iter, u32 flags)
 {
        struct ftrace_graph_ent_entry *field;
        struct fgraph_data *data = iter->private;
@@ -1117,7 +1270,18 @@ print_graph_function_flags(struct trace_iterator *iter, u32 flags)
 static enum print_line_t
 print_graph_function(struct trace_iterator *iter)
 {
-       return print_graph_function_flags(iter, tracer_flags.val);
+       return __print_graph_function_flags(iter, tracer_flags.val);
+}
+
+enum print_line_t print_graph_function_flags(struct trace_iterator *iter,
+                                            u32 flags)
+{
+       if (trace_flags & TRACE_ITER_LATENCY_FMT)
+               flags |= TRACE_GRAPH_PRINT_DURATION;
+       else
+               flags |= TRACE_GRAPH_PRINT_ABS_TIME;
+
+       return __print_graph_function_flags(iter, flags);
 }
 
 static enum print_line_t
@@ -1149,7 +1313,7 @@ static void print_lat_header(struct seq_file *s, u32 flags)
        seq_printf(s, "#%.*s|||| /                     \n", size, spaces);
 }
 
-void print_graph_headers_flags(struct seq_file *s, u32 flags)
+static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
 {
        int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
 
@@ -1190,6 +1354,23 @@ void print_graph_headers(struct seq_file *s)
        print_graph_headers_flags(s, tracer_flags.val);
 }
 
+void print_graph_headers_flags(struct seq_file *s, u32 flags)
+{
+       struct trace_iterator *iter = s->private;
+
+       if (trace_flags & TRACE_ITER_LATENCY_FMT) {
+               /* print nothing if the buffers are empty */
+               if (trace_empty(iter))
+                       return;
+
+               print_trace_header(s, iter);
+               flags |= TRACE_GRAPH_PRINT_DURATION;
+       } else
+               flags |= TRACE_GRAPH_PRINT_ABS_TIME;
+
+       __print_graph_headers_flags(s, flags);
+}
+
 void graph_trace_open(struct trace_iterator *iter)
 {
        /* pid and depth on the last trace processed */
@@ -1210,9 +1391,12 @@ void graph_trace_open(struct trace_iterator *iter)
                pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
                int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
                int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
+               int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
+
                *pid = -1;
                *depth = 0;
                *ignore = 0;
+               *depth_irq = -1;
        }
 
        iter->private = data;
@@ -1235,6 +1419,14 @@ void graph_trace_close(struct trace_iterator *iter)
        }
 }
 
+static int func_graph_set_flag(u32 old_flags, u32 bit, int set)
+{
+       if (bit == TRACE_GRAPH_PRINT_IRQS)
+               ftrace_graph_skip_irqs = !set;
+
+       return 0;
+}
+
 static struct trace_event_functions graph_functions = {
        .trace          = print_graph_function_event,
 };
@@ -1261,6 +1453,7 @@ static struct tracer graph_trace __read_mostly = {
        .print_line     = print_graph_function,
        .print_header   = print_graph_headers,
        .flags          = &tracer_flags,
+       .set_flag       = func_graph_set_flag,
 #ifdef CONFIG_FTRACE_SELFTEST
        .selftest       = trace_selftest_startup_function_graph,
 #endif
index 73a6b0601f2e301c0cd5575cc96f7f0b788853d9..5cf8c602b8804c054bbed65475b4815fd29d5b82 100644 (file)
@@ -87,14 +87,22 @@ static __cacheline_aligned_in_smp   unsigned long max_sequence;
 
 #ifdef CONFIG_FUNCTION_TRACER
 /*
- * irqsoff uses its own tracer function to keep the overhead down:
+ * Prologue for the preempt and irqs off function tracers.
+ *
+ * Returns 1 if it is OK to continue, and data->disabled is
+ *            incremented.
+ *         0 if the trace is to be ignored, and data->disabled
+ *            is kept the same.
+ *
+ * Note, this function is also used outside this ifdef but
+ *  inside the #ifdef of the function graph tracer below.
+ *  This is OK, since the function graph tracer is
+ *  dependent on the function tracer.
  */
-static void
-irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
+static int func_prolog_dec(struct trace_array *tr,
+                          struct trace_array_cpu **data,
+                          unsigned long *flags)
 {
-       struct trace_array *tr = irqsoff_trace;
-       struct trace_array_cpu *data;
-       unsigned long flags;
        long disabled;
        int cpu;
 
@@ -106,18 +114,38 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
         */
        cpu = raw_smp_processor_id();
        if (likely(!per_cpu(tracing_cpu, cpu)))
-               return;
+               return 0;
 
-       local_save_flags(flags);
+       local_save_flags(*flags);
        /* slight chance to get a false positive on tracing_cpu */
-       if (!irqs_disabled_flags(flags))
-               return;
+       if (!irqs_disabled_flags(*flags))
+               return 0;
 
-       data = tr->data[cpu];
-       disabled = atomic_inc_return(&data->disabled);
+       *data = tr->data[cpu];
+       disabled = atomic_inc_return(&(*data)->disabled);
 
        if (likely(disabled == 1))
-               trace_function(tr, ip, parent_ip, flags, preempt_count());
+               return 1;
+
+       atomic_dec(&(*data)->disabled);
+
+       return 0;
+}
+
+/*
+ * irqsoff uses its own tracer function to keep the overhead down:
+ */
+static void
+irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
+{
+       struct trace_array *tr = irqsoff_trace;
+       struct trace_array_cpu *data;
+       unsigned long flags;
+
+       if (!func_prolog_dec(tr, &data, &flags))
+               return;
+
+       trace_function(tr, ip, parent_ip, flags, preempt_count());
 
        atomic_dec(&data->disabled);
 }
@@ -155,30 +183,16 @@ static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
        struct trace_array *tr = irqsoff_trace;
        struct trace_array_cpu *data;
        unsigned long flags;
-       long disabled;
        int ret;
-       int cpu;
        int pc;
 
-       cpu = raw_smp_processor_id();
-       if (likely(!per_cpu(tracing_cpu, cpu)))
+       if (!func_prolog_dec(tr, &data, &flags))
                return 0;
 
-       local_save_flags(flags);
-       /* slight chance to get a false positive on tracing_cpu */
-       if (!irqs_disabled_flags(flags))
-               return 0;
-
-       data = tr->data[cpu];
-       disabled = atomic_inc_return(&data->disabled);
-
-       if (likely(disabled == 1)) {
-               pc = preempt_count();
-               ret = __trace_graph_entry(tr, trace, flags, pc);
-       } else
-               ret = 0;
-
+       pc = preempt_count();
+       ret = __trace_graph_entry(tr, trace, flags, pc);
        atomic_dec(&data->disabled);
+
        return ret;
 }
 
@@ -187,27 +201,13 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
        struct trace_array *tr = irqsoff_trace;
        struct trace_array_cpu *data;
        unsigned long flags;
-       long disabled;
-       int cpu;
        int pc;
 
-       cpu = raw_smp_processor_id();
-       if (likely(!per_cpu(tracing_cpu, cpu)))
+       if (!func_prolog_dec(tr, &data, &flags))
                return;
 
-       local_save_flags(flags);
-       /* slight chance to get a false positive on tracing_cpu */
-       if (!irqs_disabled_flags(flags))
-               return;
-
-       data = tr->data[cpu];
-       disabled = atomic_inc_return(&data->disabled);
-
-       if (likely(disabled == 1)) {
-               pc = preempt_count();
-               __trace_graph_return(tr, trace, flags, pc);
-       }
-
+       pc = preempt_count();
+       __trace_graph_return(tr, trace, flags, pc);
        atomic_dec(&data->disabled);
 }
 
@@ -229,75 +229,33 @@ static void irqsoff_trace_close(struct trace_iterator *iter)
 
 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
 {
-       u32 flags = GRAPH_TRACER_FLAGS;
-
-       if (trace_flags & TRACE_ITER_LATENCY_FMT)
-               flags |= TRACE_GRAPH_PRINT_DURATION;
-       else
-               flags |= TRACE_GRAPH_PRINT_ABS_TIME;
-
        /*
         * In graph mode call the graph tracer output function,
         * otherwise go with the TRACE_FN event handler
         */
        if (is_graph())
-               return print_graph_function_flags(iter, flags);
+               return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
 
        return TRACE_TYPE_UNHANDLED;
 }
 
 static void irqsoff_print_header(struct seq_file *s)
 {
-       if (is_graph()) {
-               struct trace_iterator *iter = s->private;
-               u32 flags = GRAPH_TRACER_FLAGS;
-
-               if (trace_flags & TRACE_ITER_LATENCY_FMT) {
-                       /* print nothing if the buffers are empty */
-                       if (trace_empty(iter))
-                               return;
-
-                       print_trace_header(s, iter);
-                       flags |= TRACE_GRAPH_PRINT_DURATION;
-               } else
-                       flags |= TRACE_GRAPH_PRINT_ABS_TIME;
-
-               print_graph_headers_flags(s, flags);
-       } else
+       if (is_graph())
+               print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
+       else
                trace_default_header(s);
 }
 
-static void
-trace_graph_function(struct trace_array *tr,
-                unsigned long ip, unsigned long flags, int pc)
-{
-       u64 time = trace_clock_local();
-       struct ftrace_graph_ent ent = {
-               .func  = ip,
-               .depth = 0,
-       };
-       struct ftrace_graph_ret ret = {
-               .func     = ip,
-               .depth    = 0,
-               .calltime = time,
-               .rettime  = time,
-       };
-
-       __trace_graph_entry(tr, &ent, flags, pc);
-       __trace_graph_return(tr, &ret, flags, pc);
-}
-
 static void
 __trace_function(struct trace_array *tr,
                 unsigned long ip, unsigned long parent_ip,
                 unsigned long flags, int pc)
 {
-       if (!is_graph())
+       if (is_graph())
+               trace_graph_function(tr, ip, parent_ip, flags, pc);
+       else
                trace_function(tr, ip, parent_ip, flags, pc);
-       else {
-               trace_graph_function(tr, parent_ip, flags, pc);
-               trace_graph_function(tr, ip, flags, pc);
-       }
 }
 
 #else
index 8b27c9849b427905ea9a5ef83864a88526afe405..544301d29dee45b0dc089db788bcfe89687bfa45 100644 (file)
@@ -514,8 +514,8 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
                                struct pt_regs *regs);
 
-/* Check the name is good for event/group */
-static int check_event_name(const char *name)
+/* Check the name is good for event/group/fields */
+static int is_good_name(const char *name)
 {
        if (!isalpha(*name) && *name != '_')
                return 0;
@@ -557,7 +557,7 @@ static struct trace_probe *alloc_trace_probe(const char *group,
        else
                tp->rp.kp.pre_handler = kprobe_dispatcher;
 
-       if (!event || !check_event_name(event)) {
+       if (!event || !is_good_name(event)) {
                ret = -EINVAL;
                goto error;
        }
@@ -567,7 +567,7 @@ static struct trace_probe *alloc_trace_probe(const char *group,
        if (!tp->call.name)
                goto error;
 
-       if (!group || !check_event_name(group)) {
+       if (!group || !is_good_name(group)) {
                ret = -EINVAL;
                goto error;
        }
@@ -883,7 +883,7 @@ static int create_trace_probe(int argc, char **argv)
        int i, ret = 0;
        int is_return = 0, is_delete = 0;
        char *symbol = NULL, *event = NULL, *group = NULL;
-       char *arg, *tmp;
+       char *arg;
        unsigned long offset = 0;
        void *addr = NULL;
        char buf[MAX_EVENT_NAME_LEN];
@@ -992,26 +992,36 @@ static int create_trace_probe(int argc, char **argv)
        /* parse arguments */
        ret = 0;
        for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
+               /* Increment count for freeing args in error case */
+               tp->nr_args++;
+
                /* Parse argument name */
                arg = strchr(argv[i], '=');
-               if (arg)
+               if (arg) {
                        *arg++ = '\0';
-               else
+                       tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
+               } else {
                        arg = argv[i];
+                       /* If argument name is omitted, set "argN" */
+                       snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
+                       tp->args[i].name = kstrdup(buf, GFP_KERNEL);
+               }
 
-               tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
                if (!tp->args[i].name) {
-                       pr_info("Failed to allocate argument%d name '%s'.\n",
-                               i, argv[i]);
+                       pr_info("Failed to allocate argument[%d] name.\n", i);
                        ret = -ENOMEM;
                        goto error;
                }
-               tmp = strchr(tp->args[i].name, ':');
-               if (tmp)
-                       *tmp = '_';     /* convert : to _ */
+
+               if (!is_good_name(tp->args[i].name)) {
+                       pr_info("Invalid argument[%d] name: %s\n",
+                               i, tp->args[i].name);
+                       ret = -EINVAL;
+                       goto error;
+               }
 
                if (conflict_field_name(tp->args[i].name, tp->args, i)) {
-                       pr_info("Argument%d name '%s' conflicts with "
+                       pr_info("Argument[%d] name '%s' conflicts with "
                                "another field.\n", i, argv[i]);
                        ret = -EINVAL;
                        goto error;
@@ -1020,12 +1030,9 @@ static int create_trace_probe(int argc, char **argv)
                /* Parse fetch argument */
                ret = parse_probe_arg(arg, tp, &tp->args[i], is_return);
                if (ret) {
-                       pr_info("Parse error at argument%d. (%d)\n", i, ret);
-                       kfree(tp->args[i].name);
+                       pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
                        goto error;
                }
-
-               tp->nr_args++;
        }
 
        ret = register_trace_probe(tp);
index 4086eae6e81b1c1b9762edc01600d3294c6bc130..7319559ed59f0a10197a15dc9d62a99a1fa7aeb7 100644 (file)
@@ -31,48 +31,98 @@ static int                  wakeup_rt;
 static arch_spinlock_t wakeup_lock =
        (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 
+static void wakeup_reset(struct trace_array *tr);
 static void __wakeup_reset(struct trace_array *tr);
+static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
+static void wakeup_graph_return(struct ftrace_graph_ret *trace);
 
 static int save_lat_flag;
 
+#define TRACE_DISPLAY_GRAPH     1
+
+static struct tracer_opt trace_opts[] = {
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       /* display latency trace as call graph */
+       { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
+#endif
+       { } /* Empty entry */
+};
+
+static struct tracer_flags tracer_flags = {
+       .val  = 0,
+       .opts = trace_opts,
+};
+
+#define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
+
 #ifdef CONFIG_FUNCTION_TRACER
+
 /*
- * irqsoff uses its own tracer function to keep the overhead down:
+ * Prologue for the wakeup function tracers.
+ *
+ * Returns 1 if it is OK to continue, and preemption
+ *            is disabled and data->disabled is incremented.
+ *         0 if the trace is to be ignored, and preemption
+ *            is not disabled and data->disabled is
+ *            kept the same.
+ *
+ * Note, this function is also used outside this ifdef but
+ *  inside the #ifdef of the function graph tracer below.
+ *  This is OK, since the function graph tracer is
+ *  dependent on the function tracer.
  */
-static void
-wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
+static int
+func_prolog_preempt_disable(struct trace_array *tr,
+                           struct trace_array_cpu **data,
+                           int *pc)
 {
-       struct trace_array *tr = wakeup_trace;
-       struct trace_array_cpu *data;
-       unsigned long flags;
        long disabled;
        int cpu;
-       int pc;
 
        if (likely(!wakeup_task))
-               return;
+               return 0;
 
-       pc = preempt_count();
+       *pc = preempt_count();
        preempt_disable_notrace();
 
        cpu = raw_smp_processor_id();
        if (cpu != wakeup_current_cpu)
                goto out_enable;
 
-       data = tr->data[cpu];
-       disabled = atomic_inc_return(&data->disabled);
+       *data = tr->data[cpu];
+       disabled = atomic_inc_return(&(*data)->disabled);
        if (unlikely(disabled != 1))
                goto out;
 
-       local_irq_save(flags);
+       return 1;
 
-       trace_function(tr, ip, parent_ip, flags, pc);
+out:
+       atomic_dec(&(*data)->disabled);
+
+out_enable:
+       preempt_enable_notrace();
+       return 0;
+}
 
+/*
+ * wakeup uses its own tracer function to keep the overhead down:
+ */
+static void
+wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
+{
+       struct trace_array *tr = wakeup_trace;
+       struct trace_array_cpu *data;
+       unsigned long flags;
+       int pc;
+
+       if (!func_prolog_preempt_disable(tr, &data, &pc))
+               return;
+
+       local_irq_save(flags);
+       trace_function(tr, ip, parent_ip, flags, pc);
        local_irq_restore(flags);
 
- out:
        atomic_dec(&data->disabled);
- out_enable:
        preempt_enable_notrace();
 }
 
@@ -82,6 +132,156 @@ static struct ftrace_ops trace_ops __read_mostly =
 };
 #endif /* CONFIG_FUNCTION_TRACER */
 
+static int start_func_tracer(int graph)
+{
+       int ret;
+
+       if (!graph)
+               ret = register_ftrace_function(&trace_ops);
+       else
+               ret = register_ftrace_graph(&wakeup_graph_return,
+                                           &wakeup_graph_entry);
+
+       if (!ret && tracing_is_enabled())
+               tracer_enabled = 1;
+       else
+               tracer_enabled = 0;
+
+       return ret;
+}
+
+static void stop_func_tracer(int graph)
+{
+       tracer_enabled = 0;
+
+       if (!graph)
+               unregister_ftrace_function(&trace_ops);
+       else
+               unregister_ftrace_graph();
+}
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static int wakeup_set_flag(u32 old_flags, u32 bit, int set)
+{
+
+       if (!(bit & TRACE_DISPLAY_GRAPH))
+               return -EINVAL;
+
+       if (!(is_graph() ^ set))
+               return 0;
+
+       stop_func_tracer(!set);
+
+       wakeup_reset(wakeup_trace);
+       tracing_max_latency = 0;
+
+       return start_func_tracer(set);
+}
+
+static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
+{
+       struct trace_array *tr = wakeup_trace;
+       struct trace_array_cpu *data;
+       unsigned long flags;
+       int pc, ret = 0;
+
+       if (!func_prolog_preempt_disable(tr, &data, &pc))
+               return 0;
+
+       local_save_flags(flags);
+       ret = __trace_graph_entry(tr, trace, flags, pc);
+       atomic_dec(&data->disabled);
+       preempt_enable_notrace();
+
+       return ret;
+}
+
+static void wakeup_graph_return(struct ftrace_graph_ret *trace)
+{
+       struct trace_array *tr = wakeup_trace;
+       struct trace_array_cpu *data;
+       unsigned long flags;
+       int pc;
+
+       if (!func_prolog_preempt_disable(tr, &data, &pc))
+               return;
+
+       local_save_flags(flags);
+       __trace_graph_return(tr, trace, flags, pc);
+       atomic_dec(&data->disabled);
+
+       preempt_enable_notrace();
+       return;
+}
+
+static void wakeup_trace_open(struct trace_iterator *iter)
+{
+       if (is_graph())
+               graph_trace_open(iter);
+}
+
+static void wakeup_trace_close(struct trace_iterator *iter)
+{
+       if (iter->private)
+               graph_trace_close(iter);
+}
+
+#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC)
+
+static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
+{
+       /*
+        * In graph mode call the graph tracer output function,
+        * otherwise go with the TRACE_FN event handler
+        */
+       if (is_graph())
+               return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
+
+       return TRACE_TYPE_UNHANDLED;
+}
+
+static void wakeup_print_header(struct seq_file *s)
+{
+       if (is_graph())
+               print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
+       else
+               trace_default_header(s);
+}
+
+static void
+__trace_function(struct trace_array *tr,
+                unsigned long ip, unsigned long parent_ip,
+                unsigned long flags, int pc)
+{
+       if (is_graph())
+               trace_graph_function(tr, ip, parent_ip, flags, pc);
+       else
+               trace_function(tr, ip, parent_ip, flags, pc);
+}
+#else
+#define __trace_function trace_function
+
+static int wakeup_set_flag(u32 old_flags, u32 bit, int set)
+{
+       return -EINVAL;
+}
+
+static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
+{
+       return -1;
+}
+
+static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
+{
+       return TRACE_TYPE_UNHANDLED;
+}
+
+static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
+static void wakeup_print_header(struct seq_file *s) { }
+static void wakeup_trace_open(struct trace_iterator *iter) { }
+static void wakeup_trace_close(struct trace_iterator *iter) { }
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
 /*
  * Should this new latency be reported/recorded?
  */
@@ -152,7 +352,7 @@ probe_wakeup_sched_switch(void *ignore,
        /* The task we are waiting for is waking up */
        data = wakeup_trace->data[wakeup_cpu];
 
-       trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
+       __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
        tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
 
        T0 = data->preempt_timestamp;
@@ -252,7 +452,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success)
         * is not called by an assembly function  (where as schedule is)
         * it should be safe to use it here.
         */
-       trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
+       __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
 
 out_locked:
        arch_spin_unlock(&wakeup_lock);
@@ -303,12 +503,8 @@ static void start_wakeup_tracer(struct trace_array *tr)
         */
        smp_wmb();
 
-       register_ftrace_function(&trace_ops);
-
-       if (tracing_is_enabled())
-               tracer_enabled = 1;
-       else
-               tracer_enabled = 0;
+       if (start_func_tracer(is_graph()))
+               printk(KERN_ERR "failed to start wakeup tracer\n");
 
        return;
 fail_deprobe_wake_new:
@@ -320,7 +516,7 @@ fail_deprobe:
 static void stop_wakeup_tracer(struct trace_array *tr)
 {
        tracer_enabled = 0;
-       unregister_ftrace_function(&trace_ops);
+       stop_func_tracer(is_graph());
        unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
        unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
        unregister_trace_sched_wakeup(probe_wakeup, NULL);
@@ -379,9 +575,15 @@ static struct tracer wakeup_tracer __read_mostly =
        .start          = wakeup_tracer_start,
        .stop           = wakeup_tracer_stop,
        .print_max      = 1,
+       .print_header   = wakeup_print_header,
+       .print_line     = wakeup_print_line,
+       .flags          = &tracer_flags,
+       .set_flag       = wakeup_set_flag,
 #ifdef CONFIG_FTRACE_SELFTEST
        .selftest    = trace_selftest_startup_wakeup,
 #endif
+       .open           = wakeup_trace_open,
+       .close          = wakeup_trace_close,
        .use_max_tr     = 1,
 };
 
@@ -394,9 +596,15 @@ static struct tracer wakeup_rt_tracer __read_mostly =
        .stop           = wakeup_tracer_stop,
        .wait_pipe      = poll_wait_pipe,
        .print_max      = 1,
+       .print_header   = wakeup_print_header,
+       .print_line     = wakeup_print_line,
+       .flags          = &tracer_flags,
+       .set_flag       = wakeup_set_flag,
 #ifdef CONFIG_FTRACE_SELFTEST
        .selftest    = trace_selftest_startup_wakeup,
 #endif
+       .open           = wakeup_trace_open,
+       .close          = wakeup_trace_close,
        .use_max_tr     = 1,
 };
 
index a7cc3793baf6897d2535737a52322552040d2737..209b379a47210dad4170e5914e5ecc68207f8784 100644 (file)
@@ -263,6 +263,11 @@ int __init trace_workqueue_early_init(void)
 {
        int ret, cpu;
 
+       for_each_possible_cpu(cpu) {
+               spin_lock_init(&workqueue_cpu_stat(cpu)->lock);
+               INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list);
+       }
+
        ret = register_trace_workqueue_insertion(probe_workqueue_insertion, NULL);
        if (ret)
                goto out;
@@ -279,11 +284,6 @@ int __init trace_workqueue_early_init(void)
        if (ret)
                goto no_creation;
 
-       for_each_possible_cpu(cpu) {
-               spin_lock_init(&workqueue_cpu_stat(cpu)->lock);
-               INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list);
-       }
-
        return 0;
 
 no_creation:
index c77f3eceea250e49b0ff001959f8df9eeb8e0716..e95ee7f31d43309949893d9e63d1703e619a6ca9 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
+#include <linux/jump_label.h>
 
 extern struct tracepoint __start___tracepoints[];
 extern struct tracepoint __stop___tracepoints[];
@@ -263,7 +264,13 @@ static void set_tracepoint(struct tracepoint_entry **entry,
         * is used.
         */
        rcu_assign_pointer(elem->funcs, (*entry)->funcs);
-       elem->state = active;
+       if (!elem->state && active) {
+               jump_label_enable(&elem->state);
+               elem->state = active;
+       } else if (elem->state && !active) {
+               jump_label_disable(&elem->state);
+               elem->state = active;
+       }
 }
 
 /*
@@ -277,7 +284,10 @@ static void disable_tracepoint(struct tracepoint *elem)
        if (elem->unregfunc && elem->state)
                elem->unregfunc();
 
-       elem->state = 0;
+       if (elem->state) {
+               jump_label_disable(&elem->state);
+               elem->state = 0;
+       }
        rcu_assign_pointer(elem->funcs, NULL);
 }
 
index 0d53c8e853b12450cf0c74665d13a22e91a47543..bafba687a6d849a11f0201acd3d7ae5e63e9301e 100644 (file)
@@ -43,7 +43,6 @@ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
 #endif
 
-static int __read_mostly did_panic;
 static int __initdata no_watchdog;
 
 
@@ -122,7 +121,7 @@ static void __touch_watchdog(void)
 
 void touch_softlockup_watchdog(void)
 {
-       __get_cpu_var(watchdog_touch_ts) = 0;
+       __raw_get_cpu_var(watchdog_touch_ts) = 0;
 }
 EXPORT_SYMBOL(touch_softlockup_watchdog);
 
@@ -142,7 +141,14 @@ void touch_all_softlockup_watchdogs(void)
 #ifdef CONFIG_HARDLOCKUP_DETECTOR
 void touch_nmi_watchdog(void)
 {
-       __get_cpu_var(watchdog_nmi_touch) = true;
+       if (watchdog_enabled) {
+               unsigned cpu;
+
+               for_each_present_cpu(cpu) {
+                       if (per_cpu(watchdog_nmi_touch, cpu) != true)
+                               per_cpu(watchdog_nmi_touch, cpu) = true;
+               }
+       }
        touch_softlockup_watchdog();
 }
 EXPORT_SYMBOL(touch_nmi_watchdog);
@@ -180,18 +186,6 @@ static int is_softlockup(unsigned long touch_ts)
        return 0;
 }
 
-static int
-watchdog_panic(struct notifier_block *this, unsigned long event, void *ptr)
-{
-       did_panic = 1;
-
-       return NOTIFY_DONE;
-}
-
-static struct notifier_block panic_block = {
-       .notifier_call = watchdog_panic,
-};
-
 #ifdef CONFIG_HARDLOCKUP_DETECTOR
 static struct perf_event_attr wd_hw_attr = {
        .type           = PERF_TYPE_HARDWARE,
@@ -202,7 +196,7 @@ static struct perf_event_attr wd_hw_attr = {
 };
 
 /* Callback function for perf event subsystem */
-void watchdog_overflow_callback(struct perf_event *event, int nmi,
+static void watchdog_overflow_callback(struct perf_event *event, int nmi,
                 struct perf_sample_data *data,
                 struct pt_regs *regs)
 {
@@ -364,14 +358,14 @@ static int watchdog_nmi_enable(int cpu)
        /* Try to register using hardware perf events */
        wd_attr = &wd_hw_attr;
        wd_attr->sample_period = hw_nmi_get_sample_period();
-       event = perf_event_create_kernel_counter(wd_attr, cpu, -1, watchdog_overflow_callback);
+       event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback);
        if (!IS_ERR(event)) {
                printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n");
                goto out_save;
        }
 
        printk(KERN_ERR "NMI watchdog failed to create perf event on cpu%i: %p\n", cpu, event);
-       return -1;
+       return PTR_ERR(event);
 
        /* success path */
 out_save:
@@ -415,17 +409,19 @@ static int watchdog_prepare_cpu(int cpu)
 static int watchdog_enable(int cpu)
 {
        struct task_struct *p = per_cpu(softlockup_watchdog, cpu);
+       int err;
 
        /* enable the perf event */
-       if (watchdog_nmi_enable(cpu) != 0)
-               return -1;
+       err = watchdog_nmi_enable(cpu);
+       if (err)
+               return err;
 
        /* create the watchdog thread */
        if (!p) {
                p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu);
                if (IS_ERR(p)) {
                        printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu);
-                       return -1;
+                       return PTR_ERR(p);
                }
                kthread_bind(p, cpu);
                per_cpu(watchdog_touch_ts, cpu) = 0;
@@ -433,6 +429,9 @@ static int watchdog_enable(int cpu)
                wake_up_process(p);
        }
 
+       /* if any cpu succeeds, watchdog is considered enabled for the system */
+       watchdog_enabled = 1;
+
        return 0;
 }
 
@@ -455,9 +454,6 @@ static void watchdog_disable(int cpu)
                per_cpu(softlockup_watchdog, cpu) = NULL;
                kthread_stop(p);
        }
-
-       /* if any cpu succeeds, watchdog is considered enabled for the system */
-       watchdog_enabled = 1;
 }
 
 static void watchdog_enable_all_cpus(void)
@@ -477,6 +473,9 @@ static void watchdog_disable_all_cpus(void)
 {
        int cpu;
 
+       if (no_watchdog)
+               return;
+
        for_each_online_cpu(cpu)
                watchdog_disable(cpu);
 
@@ -519,17 +518,16 @@ static int __cpuinit
 cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
 {
        int hotcpu = (unsigned long)hcpu;
+       int err = 0;
 
        switch (action) {
        case CPU_UP_PREPARE:
        case CPU_UP_PREPARE_FROZEN:
-               if (watchdog_prepare_cpu(hotcpu))
-                       return NOTIFY_BAD;
+               err = watchdog_prepare_cpu(hotcpu);
                break;
        case CPU_ONLINE:
        case CPU_ONLINE_FROZEN:
-               if (watchdog_enable(hotcpu))
-                       return NOTIFY_BAD;
+               err = watchdog_enable(hotcpu);
                break;
 #ifdef CONFIG_HOTPLUG_CPU
        case CPU_UP_CANCELED:
@@ -542,7 +540,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
                break;
 #endif /* CONFIG_HOTPLUG_CPU */
        }
-       return NOTIFY_OK;
+       return notifier_from_errno(err);
 }
 
 static struct notifier_block __cpuinitdata cpu_nfb = {
@@ -558,13 +556,11 @@ static int __init spawn_watchdog_task(void)
                return 0;
 
        err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
-       WARN_ON(err == NOTIFY_BAD);
+       WARN_ON(notifier_to_errno(err));
 
        cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
        register_cpu_notifier(&cpu_nfb);
 
-       atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
-
        return 0;
 }
 early_initcall(spawn_watchdog_task);
index 727f24e563aef326b8eba951d2a31a9aa864d32b..f77afd93922968d0c216bee148c0f10ed0ccaa72 100644 (file)
@@ -1,19 +1,26 @@
 /*
- * linux/kernel/workqueue.c
+ * kernel/workqueue.c - generic async execution with shared worker pool
  *
- * Generic mechanism for defining kernel helper threads for running
- * arbitrary tasks in process context.
+ * Copyright (C) 2002          Ingo Molnar
  *
- * Started by Ingo Molnar, Copyright (C) 2002
+ *   Derived from the taskqueue/keventd code by:
+ *     David Woodhouse <dwmw2@infradead.org>
+ *     Andrew Morton
+ *     Kai Petzke <wpp@marie.physik.tu-berlin.de>
+ *     Theodore Ts'o <tytso@mit.edu>
  *
- * Derived from the taskqueue/keventd code by:
+ * Made to use alloc_percpu by Christoph Lameter.
  *
- *   David Woodhouse <dwmw2@infradead.org>
- *   Andrew Morton
- *   Kai Petzke <wpp@marie.physik.tu-berlin.de>
- *   Theodore Ts'o <tytso@mit.edu>
+ * Copyright (C) 2010          SUSE Linux Products GmbH
+ * Copyright (C) 2010          Tejun Heo <tj@kernel.org>
  *
- * Made to use alloc_percpu by Christoph Lameter.
+ * This is the generic async execution mechanism.  Work items as are
+ * executed in process context.  The worker pool is shared and
+ * automatically managed.  There is one worker pool for each CPU and
+ * one extra for works which are better served by workers which are
+ * not bound to any specific CPU.
+ *
+ * Please read Documentation/workqueue.txt for details.
  */
 
 #include <linux/module.h>
index 1b4afd2e6ca089de0babdacc5781426ef118da5c..21ac83070a805030150bab1a229be48c3f79485a 100644 (file)
@@ -482,6 +482,7 @@ config PROVE_LOCKING
        select DEBUG_SPINLOCK
        select DEBUG_MUTEXES
        select DEBUG_LOCK_ALLOC
+       select TRACE_IRQFLAGS
        default n
        help
         This feature enables the kernel to prove that all locking
@@ -539,6 +540,23 @@ config PROVE_RCU_REPEATEDLY
         disabling, allowing multiple RCU-lockdep warnings to be printed
         on a single reboot.
 
+        Say Y to allow multiple RCU-lockdep warnings per boot.
+
+        Say N if you are unsure.
+
+config SPARSE_RCU_POINTER
+       bool "RCU debugging: sparse-based checks for pointer usage"
+       default n
+       help
+        This feature enables the __rcu sparse annotation for
+        RCU-protected pointers.  This annotation will cause sparse
+        to flag any non-RCU used of annotated pointers.  This can be
+        helpful when debugging RCU usage.  Please note that this feature
+        is not intended to enforce code cleanliness; it is instead merely
+        a debugging aid.
+
+        Say Y to make sparse flag questionable use of RCU-protected pointers
+
         Say N if you are unsure.
 
 config LOCKDEP
@@ -579,11 +597,10 @@ config DEBUG_LOCKDEP
          of more runtime overhead.
 
 config TRACE_IRQFLAGS
-       depends on DEBUG_KERNEL
        bool
-       default y
-       depends on TRACE_IRQFLAGS_SUPPORT
-       depends on PROVE_LOCKING
+       help
+         Enables hooks to interrupt enabling and disabling for
+         either tracing or lock debugging.
 
 config DEBUG_SPINLOCK_SLEEP
        bool "Spinlock debugging: sleep-inside-spinlock checking"
@@ -832,6 +849,30 @@ config RCU_CPU_STALL_DETECTOR
 
          Say Y if you are unsure.
 
+config RCU_CPU_STALL_TIMEOUT
+       int "RCU CPU stall timeout in seconds"
+       depends on RCU_CPU_STALL_DETECTOR
+       range 3 300
+       default 60
+       help
+         If a given RCU grace period extends more than the specified
+         number of seconds, a CPU stall warning is printed.  If the
+         RCU grace period persists, additional CPU stall warnings are
+         printed at more widely spaced intervals.
+
+config RCU_CPU_STALL_DETECTOR_RUNNABLE
+       bool "RCU CPU stall checking starts automatically at boot"
+       depends on RCU_CPU_STALL_DETECTOR
+       default y
+       help
+         If set, start checking for RCU CPU stalls immediately on
+         boot.  Otherwise, RCU CPU stall checking must be manually
+         enabled.
+
+         Say Y if you are unsure.
+
+         Say N if you wish to suppress RCU CPU stall checking during boot.
+
 config RCU_CPU_STALL_VERBOSE
        bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR"
        depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU
index 7cdfad88128fa5d3d3076d552fbde276ef500d66..19552096d16b06bd2dac2a9b10212e482a4d0da3 100644 (file)
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -72,8 +72,8 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr)
        return NULL;
 }
 
-int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
-                       struct module *mod)
+void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
+                        struct module *mod)
 {
        char *secstrings;
        unsigned int i;
@@ -97,8 +97,6 @@ int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
         * could potentially lead to deadlock and thus be counter-productive.
         */
        list_add(&mod->bug_list, &module_bug_list);
-
-       return 0;
 }
 
 void module_bug_cleanup(struct module *mod)
index 02afc25337284329d23a998e636240d5a41d337d..7bd6df781ce50198689fae375eef696751c0a47a 100644 (file)
 #include <linux/dynamic_debug.h>
 #include <linux/debugfs.h>
 #include <linux/slab.h>
+#include <linux/jump_label.h>
 
 extern struct _ddebug __start___verbose[];
 extern struct _ddebug __stop___verbose[];
 
-/* dynamic_debug_enabled, and dynamic_debug_enabled2 are bitmasks in which
- * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
- * use independent hash functions, to reduce the chance of false positives.
- */
-long long dynamic_debug_enabled;
-EXPORT_SYMBOL_GPL(dynamic_debug_enabled);
-long long dynamic_debug_enabled2;
-EXPORT_SYMBOL_GPL(dynamic_debug_enabled2);
-
 struct ddebug_table {
        struct list_head link;
        char *mod_name;
@@ -87,26 +79,6 @@ static char *ddebug_describe_flags(struct _ddebug *dp, char *buf,
        return buf;
 }
 
-/*
- * must be called with ddebug_lock held
- */
-
-static int disabled_hash(char hash, bool first_table)
-{
-       struct ddebug_table *dt;
-       char table_hash_value;
-
-       list_for_each_entry(dt, &ddebug_tables, link) {
-               if (first_table)
-                       table_hash_value = dt->ddebugs->primary_hash;
-               else
-                       table_hash_value = dt->ddebugs->secondary_hash;
-               if (dt->num_enabled && (hash == table_hash_value))
-                       return 0;
-       }
-       return 1;
-}
-
 /*
  * Search the tables for _ddebug's which match the given
  * `query' and apply the `flags' and `mask' to them.  Tells
@@ -170,17 +142,9 @@ static void ddebug_change(const struct ddebug_query *query,
                                dt->num_enabled++;
                        dp->flags = newflags;
                        if (newflags) {
-                               dynamic_debug_enabled |=
-                                               (1LL << dp->primary_hash);
-                               dynamic_debug_enabled2 |=
-                                               (1LL << dp->secondary_hash);
+                               jump_label_enable(&dp->enabled);
                        } else {
-                               if (disabled_hash(dp->primary_hash, true))
-                                       dynamic_debug_enabled &=
-                                               ~(1LL << dp->primary_hash);
-                               if (disabled_hash(dp->secondary_hash, false))
-                                       dynamic_debug_enabled2 &=
-                                               ~(1LL << dp->secondary_hash);
+                               jump_label_disable(&dp->enabled);
                        }
                        if (verbose)
                                printk(KERN_INFO
index 4b5cb794c38bb270b8b72b70a47de265ec05c210..a7616fa3162e844f5b3c7090c1911543c826147e 100644 (file)
@@ -70,7 +70,7 @@ static void merge_and_restore_back_links(void *priv,
                 * element comparison is needed, so the client's cmp()
                 * routine can invoke cond_resched() periodically.
                 */
-               (*cmp)(priv, tail, tail);
+               (*cmp)(priv, tail->next, tail->next);
 
                tail->next->prev = tail;
                tail = tail->next;
index efd16fa80b1cfd55f2e1f1295f1cd45765925abf..6f412ab4c24f812fc8b7290c4a1e06cc0250ea62 100644 (file)
@@ -49,7 +49,7 @@ struct radix_tree_node {
        unsigned int    height;         /* Height from the bottom */
        unsigned int    count;
        struct rcu_head rcu_head;
-       void            *slots[RADIX_TREE_MAP_SIZE];
+       void __rcu      *slots[RADIX_TREE_MAP_SIZE];
        unsigned long   tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
 };
 
index a5ec42868f99d8d6700f34ca81c061c1ef21d15b..4ceb05d772aed12d392d618358284ea71cb51dd2 100644 (file)
@@ -248,8 +248,18 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
                left -= sg_size;
 
                sg = alloc_fn(alloc_size, gfp_mask);
-               if (unlikely(!sg))
-                       return -ENOMEM;
+               if (unlikely(!sg)) {
+                       /*
+                        * Adjust entry count to reflect that the last
+                        * entry of the previous table won't be used for
+                        * linkage.  Without this, sg_kfree() may get
+                        * confused.
+                        */
+                       if (prv)
+                               table->nents = ++table->orig_nents;
+
+                       return -ENOMEM;
+               }
 
                sg_init_table(sg, alloc_size);
                table->nents = table->orig_nents += sg_size;
index 34e3082632d8311575e27c6d2a4b87c9985dd574..7c06ee51a29a9bfbf79692af3b7e211751005e3c 100644 (file)
@@ -70,7 +70,7 @@ static unsigned long io_tlb_nslabs;
  */
 static unsigned long io_tlb_overflow = 32*1024;
 
-void *io_tlb_overflow_buffer;
+static void *io_tlb_overflow_buffer;
 
 /*
  * This is a free list describing the number of free entries available from
@@ -147,16 +147,16 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
         * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
         * between io_tlb_start and io_tlb_end.
         */
-       io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
+       io_tlb_list = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
        for (i = 0; i < io_tlb_nslabs; i++)
                io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
        io_tlb_index = 0;
-       io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t));
+       io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
 
        /*
         * Get the overflow emergency buffer
         */
-       io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
+       io_tlb_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow));
        if (!io_tlb_overflow_buffer)
                panic("Cannot allocate SWIOTLB overflow buffer!\n");
        if (verbose)
@@ -182,7 +182,7 @@ swiotlb_init_with_default_size(size_t default_size, int verbose)
        /*
         * Get IO TLB memory from the low pages
         */
-       io_tlb_start = alloc_bootmem_low_pages(bytes);
+       io_tlb_start = alloc_bootmem_low_pages(PAGE_ALIGN(bytes));
        if (!io_tlb_start)
                panic("Cannot allocate SWIOTLB buffer");
 
@@ -308,13 +308,13 @@ void __init swiotlb_free(void)
                           get_order(io_tlb_nslabs << IO_TLB_SHIFT));
        } else {
                free_bootmem_late(__pa(io_tlb_overflow_buffer),
-                                 io_tlb_overflow);
+                                 PAGE_ALIGN(io_tlb_overflow));
                free_bootmem_late(__pa(io_tlb_orig_addr),
-                                 io_tlb_nslabs * sizeof(phys_addr_t));
+                                 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
                free_bootmem_late(__pa(io_tlb_list),
-                                 io_tlb_nslabs * sizeof(int));
+                                 PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
                free_bootmem_late(__pa(io_tlb_start),
-                                 io_tlb_nslabs << IO_TLB_SHIFT);
+                                 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
        }
 }
 
index f4e516e9c37cc4c62f93faf92131a632098d2ca5..f0fb9124e410c436c0f240d69f089f4935af2e92 100644 (file)
@@ -189,7 +189,7 @@ config COMPACTION
 config MIGRATION
        bool "Page migration"
        def_bool y
-       depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE
+       depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION
        help
          Allows the migration of the physical location of pages of processes
          while the virtual addresses are not changed. This is useful in
index eaa4a5bbe0634390fc802ebffdbad4291fa3b991..65d420499a615bf68a3be8313c3b0d8b1b330178 100644 (file)
@@ -30,6 +30,7 @@ EXPORT_SYMBOL_GPL(default_backing_dev_info);
 
 struct backing_dev_info noop_backing_dev_info = {
        .name           = "noop",
+       .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK,
 };
 EXPORT_SYMBOL_GPL(noop_backing_dev_info);
 
@@ -243,6 +244,7 @@ static int __init default_bdi_init(void)
        err = bdi_init(&default_backing_dev_info);
        if (!err)
                bdi_register(&default_backing_dev_info, NULL, "default");
+       err = bdi_init(&noop_backing_dev_info);
 
        return err;
 }
@@ -445,8 +447,8 @@ static int bdi_forker_thread(void *ptr)
                switch (action) {
                case FORK_THREAD:
                        __set_current_state(TASK_RUNNING);
-                       task = kthread_run(bdi_writeback_thread, &bdi->wb, "flush-%s",
-                                          dev_name(bdi->dev));
+                       task = kthread_create(bdi_writeback_thread, &bdi->wb,
+                                             "flush-%s", dev_name(bdi->dev));
                        if (IS_ERR(task)) {
                                /*
                                 * If thread creation fails, force writeout of
@@ -457,10 +459,13 @@ static int bdi_forker_thread(void *ptr)
                                /*
                                 * The spinlock makes sure we do not lose
                                 * wake-ups when racing with 'bdi_queue_work()'.
+                                * And as soon as the bdi thread is visible, we
+                                * can start it.
                                 */
                                spin_lock_bh(&bdi->wb_lock);
                                bdi->wb.task = task;
                                spin_unlock_bh(&bdi->wb_lock);
+                               wake_up_process(task);
                        }
                        break;
 
index 13b6dad1eed272bec61a388d17f116be62cb1bb5..1481de68184bce6d8fae978d3b6be8e3223319a5 100644 (file)
@@ -116,8 +116,8 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
                 */
                vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
 
-               flush_dcache_page(tovec->bv_page);
                bounce_copy_vec(tovec, vfrom);
+               flush_dcache_page(tovec->bv_page);
        }
 }
 
index 94cce51b0b3535af75c20f29ecb86a11aba32a71..4d709ee5901370842534224a9f81e7d13943e196 100644 (file)
@@ -214,15 +214,16 @@ static void acct_isolated(struct zone *zone, struct compact_control *cc)
 /* Similar to reclaim, but different enough that they don't share logic */
 static bool too_many_isolated(struct zone *zone)
 {
-
-       unsigned long inactive, isolated;
+       unsigned long active, inactive, isolated;
 
        inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
                                        zone_page_state(zone, NR_INACTIVE_ANON);
+       active = zone_page_state(zone, NR_ACTIVE_FILE) +
+                                       zone_page_state(zone, NR_ACTIVE_ANON);
        isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
                                        zone_page_state(zone, NR_ISOLATED_ANON);
 
-       return isolated > inactive;
+       return isolated > (inactive + active) / 2;
 }
 
 /*
index 46f5dacf90a2cd62427fdf89b67fa01ef8af68e0..ec520c7b28dffedb5027de6b27834831938671cd 100644 (file)
@@ -125,7 +125,6 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
 {
        struct mm_struct *mm = current->mm;
        struct address_space *mapping;
-       unsigned long end = start + size;
        struct vm_area_struct *vma;
        int err = -EINVAL;
        int has_write_lock = 0;
@@ -142,6 +141,10 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
        if (start + size <= start)
                return err;
 
+       /* Does pgoff wrap? */
+       if (pgoff + (size >> PAGE_SHIFT) < pgoff)
+               return err;
+
        /* Can we represent this offset inside this architecture's pte's? */
 #if PTE_FILE_MAX_BITS < BITS_PER_LONG
        if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
@@ -168,7 +171,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
        if (!(vma->vm_flags & VM_CAN_NONLINEAR))
                goto out;
 
-       if (end <= start || start < vma->vm_start || end > vma->vm_end)
+       if (start < vma->vm_start || start + size > vma->vm_end)
                goto out;
 
        /* Must set VM_NONLINEAR before any pages are populated. */
index cc5be788a39fe132c72cbc1d2fb1c03f71708575..c03273807182dde1d9dd2e905c0db11a6dfe2441 100644 (file)
@@ -2324,11 +2324,8 @@ retry_avoidcopy:
         * and just make the page writable */
        avoidcopy = (page_mapcount(old_page) == 1);
        if (avoidcopy) {
-               if (!trylock_page(old_page)) {
-                       if (PageAnon(old_page))
-                               page_move_anon_rmap(old_page, vma, address);
-               } else
-                       unlock_page(old_page);
+               if (PageAnon(old_page))
+                       page_move_anon_rmap(old_page, vma, address);
                set_huge_ptep_writable(vma, address, ptep);
                return 0;
        }
@@ -2404,7 +2401,7 @@ retry_avoidcopy:
                set_huge_pte_at(mm, address, ptep,
                                make_huge_pte(vma, new_page, 1));
                page_remove_rmap(old_page);
-               hugepage_add_anon_rmap(new_page, vma, address);
+               hugepage_add_new_anon_rmap(new_page, vma, address);
                /* Make the old page be freed below */
                new_page = old_page;
                mmu_notifier_invalidate_range_end(mm,
@@ -2631,10 +2628,16 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                                                vma, address);
        }
 
-       if (!pagecache_page) {
-               page = pte_page(entry);
+       /*
+        * hugetlb_cow() requires page locks of pte_page(entry) and
+        * pagecache_page, so here we need take the former one
+        * when page != pagecache_page or !pagecache_page.
+        * Note that locking order is always pagecache_page -> page,
+        * so no worry about deadlock.
+        */
+       page = pte_page(entry);
+       if (page != pagecache_page)
                lock_page(page);
-       }
 
        spin_lock(&mm->page_table_lock);
        /* Check for a racing update before calling hugetlb_cow */
@@ -2661,9 +2664,8 @@ out_page_table_lock:
        if (pagecache_page) {
                unlock_page(pagecache_page);
                put_page(pagecache_page);
-       } else {
-               unlock_page(page);
        }
+       unlock_page(page);
 
 out_mutex:
        mutex_unlock(&hugetlb_instantiation_mutex);
index e2ae00458320786a380a1ab370efe0dc6cfd6e1a..65ab5c7067d994ad934c4f4bd5fd5809235a0756 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -712,7 +712,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
        if (!ptep)
                goto out;
 
-       if (pte_write(*ptep)) {
+       if (pte_write(*ptep) || pte_dirty(*ptep)) {
                pte_t entry;
 
                swapped = PageSwapCache(page);
@@ -735,7 +735,9 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
                        set_pte_at(mm, addr, ptep, entry);
                        goto out_unlock;
                }
-               entry = pte_wrprotect(entry);
+               if (pte_dirty(entry))
+                       set_page_dirty(page);
+               entry = pte_mkclean(pte_wrprotect(entry));
                set_pte_at_notify(mm, addr, ptep, entry);
        }
        *orig_pte = *ptep;
@@ -1504,8 +1506,6 @@ struct page *ksm_does_need_to_copy(struct page *page,
 {
        struct page *new_page;
 
-       unlock_page(page);      /* any racers will COW it, not modify it */
-
        new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
        if (new_page) {
                copy_user_highpage(new_page, page, address, vma);
@@ -1521,7 +1521,6 @@ struct page *ksm_does_need_to_copy(struct page *page,
                        add_page_to_unevictable_list(new_page);
        }
 
-       page_cache_release(page);
        return new_page;
 }
 
index 3eed583895a6f31eb434a252697371c7daa6bbd4..9be3cf8a5da462d4b1b4103eef61f8d5a9a6e06c 100644 (file)
@@ -3587,9 +3587,13 @@ unlock:
 
 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
 {
-       __mem_cgroup_threshold(memcg, false);
-       if (do_swap_account)
-               __mem_cgroup_threshold(memcg, true);
+       while (memcg) {
+               __mem_cgroup_threshold(memcg, false);
+               if (do_swap_account)
+                       __mem_cgroup_threshold(memcg, true);
+
+               memcg = parent_mem_cgroup(memcg);
+       }
 }
 
 static int compare_thresholds(const void *a, const void *b)
index 9c26eeca13425886690cddaf6dd45954fd3f0097..757f6b0accfe84d959b7fe5899b5916ad0ed1f14 100644 (file)
@@ -183,7 +183,7 @@ EXPORT_SYMBOL_GPL(hwpoison_filter);
  * signal.
  */
 static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
-                       unsigned long pfn)
+                       unsigned long pfn, struct page *page)
 {
        struct siginfo si;
        int ret;
@@ -198,7 +198,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
 #ifdef __ARCH_SI_TRAPNO
        si.si_trapno = trapno;
 #endif
-       si.si_addr_lsb = PAGE_SHIFT;
+       si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT;
        /*
         * Don't use force here, it's convenient if the signal
         * can be temporarily blocked.
@@ -235,7 +235,7 @@ void shake_page(struct page *p, int access)
                int nr;
                do {
                        nr = shrink_slab(1000, GFP_KERNEL, 1000);
-                       if (page_count(p) == 0)
+                       if (page_count(p) == 1)
                                break;
                } while (nr > 10);
        }
@@ -327,7 +327,7 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
  * wrong earlier.
  */
 static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno,
-                         int fail, unsigned long pfn)
+                         int fail, struct page *page, unsigned long pfn)
 {
        struct to_kill *tk, *next;
 
@@ -352,7 +352,7 @@ static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno,
                         * process anyways.
                         */
                        else if (kill_proc_ao(tk->tsk, tk->addr, trapno,
-                                             pfn) < 0)
+                                             pfn, page) < 0)
                                printk(KERN_ERR
                "MCE %#lx: Cannot send advisory machine check signal to %s:%d\n",
                                        pfn, tk->tsk->comm, tk->tsk->pid);
@@ -928,7 +928,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
         * any accesses to the poisoned memory.
         */
        kill_procs_ao(&tokill, !!PageDirty(hpage), trapno,
-                     ret != SWAP_SUCCESS, pfn);
+                     ret != SWAP_SUCCESS, p, pfn);
 
        return ret;
 }
index 6b2ab10518512052c895dd5db7ff0f20fd1df2f3..98b58fecedeffc236a9c7285689fe4720409bd30 100644 (file)
@@ -2623,7 +2623,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                unsigned int flags, pte_t orig_pte)
 {
        spinlock_t *ptl;
-       struct page *page;
+       struct page *page, *swapcache = NULL;
        swp_entry_t entry;
        pte_t pte;
        struct mem_cgroup *ptr = NULL;
@@ -2679,10 +2679,25 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        lock_page(page);
        delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
 
-       page = ksm_might_need_to_copy(page, vma, address);
-       if (!page) {
-               ret = VM_FAULT_OOM;
-               goto out;
+       /*
+        * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
+        * release the swapcache from under us.  The page pin, and pte_same
+        * test below, are not enough to exclude that.  Even if it is still
+        * swapcache, we need to check that the page's swap has not changed.
+        */
+       if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val))
+               goto out_page;
+
+       if (ksm_might_need_to_copy(page, vma, address)) {
+               swapcache = page;
+               page = ksm_does_need_to_copy(page, vma, address);
+
+               if (unlikely(!page)) {
+                       ret = VM_FAULT_OOM;
+                       page = swapcache;
+                       swapcache = NULL;
+                       goto out_page;
+               }
        }
 
        if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
@@ -2735,6 +2750,18 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
                try_to_free_swap(page);
        unlock_page(page);
+       if (swapcache) {
+               /*
+                * Hold the lock to avoid the swap entry to be reused
+                * until we take the PT lock for the pte_same() check
+                * (to avoid false positives from pte_same). For
+                * further safety release the lock after the swap_free
+                * so that the swap count won't change under a
+                * parallel locked swapcache.
+                */
+               unlock_page(swapcache);
+               page_cache_release(swapcache);
+       }
 
        if (flags & FAULT_FLAG_WRITE) {
                ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
@@ -2756,6 +2783,10 @@ out_page:
        unlock_page(page);
 out_release:
        page_cache_release(page);
+       if (swapcache) {
+               unlock_page(swapcache);
+               page_cache_release(swapcache);
+       }
        return ret;
 }
 
@@ -3154,7 +3185,7 @@ static inline int handle_pte_fault(struct mm_struct *mm,
                 * with threads.
                 */
                if (flags & FAULT_FLAG_WRITE)
-                       flush_tlb_page(vma, address);
+                       flush_tlb_fix_spurious_fault(vma, address);
        }
 unlock:
        pte_unmap_unlock(pte, ptl);
index a4cfcdc00455de4be15fcec98c76e45f8de5feab..dd186c1a5d53f9ebd27de4c1bedcaac471d6c93e 100644 (file)
@@ -584,19 +584,19 @@ static inline int pageblock_free(struct page *page)
 /* Return the start of the next active pageblock after a given page */
 static struct page *next_active_pageblock(struct page *page)
 {
-       int pageblocks_stride;
-
        /* Ensure the starting page is pageblock-aligned */
        BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
 
-       /* Move forward by at least 1 * pageblock_nr_pages */
-       pageblocks_stride = 1;
-
        /* If the entire pageblock is free, move to the end of free page */
-       if (pageblock_free(page))
-               pageblocks_stride += page_order(page) - pageblock_order;
+       if (pageblock_free(page)) {
+               int order;
+               /* be careful. we don't have locks, page_order can be changed.*/
+               order = page_order(page);
+               if ((order < MAX_ORDER) && (order >= pageblock_order))
+                       return page + (1 << order);
+       }
 
-       return page + (pageblocks_stride * pageblock_nr_pages);
+       return page + pageblock_nr_pages;
 }
 
 /* Checks if this range of memory is likely to be hot-removable. */
index cbae7c5b95680a1bfca1df7e11a215bfce15b57c..b70919ce4f72e6941f67b1a5462f5f270c231536 100644 (file)
@@ -135,12 +135,6 @@ void munlock_vma_page(struct page *page)
        }
 }
 
-/* Is the vma a continuation of the stack vma above it? */
-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
-{
-       return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
-}
-
 static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
 {
        return (vma->vm_flags & VM_GROWSDOWN) &&
index 6128dc8e5ede709cada129438fbac101895aa09d..00161a48a45100c611ebaa053f0b1f1486d09f29 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2009,6 +2009,7 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
                        removed_exe_file_vma(mm);
                fput(new->vm_file);
        }
+       unlink_anon_vmas(new);
  out_free_mpol:
        mpol_put(pol);
  out_free_vma:
index f5b7d1760213e53db3c46e84dde56daf219ea0cd..e35bfb82c8555b7377334dbea42bfcf588b0bab8 100644 (file)
@@ -87,3 +87,24 @@ int memmap_valid_within(unsigned long pfn,
        return 1;
 }
 #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
+
+#ifdef CONFIG_SMP
+/* Called when a more accurate view of NR_FREE_PAGES is needed */
+unsigned long zone_nr_free_pages(struct zone *zone)
+{
+       unsigned long nr_free_pages = zone_page_state(zone, NR_FREE_PAGES);
+
+       /*
+        * While kswapd is awake, it is considered the zone is under some
+        * memory pressure. Under pressure, there is a risk that
+        * per-cpu-counter-drift will allow the min watermark to be breached
+        * potentially causing a live-lock. While kswapd is awake and
+        * free pages are low, get a better estimate for free pages
+        */
+       if (nr_free_pages < zone->percpu_drift_mark &&
+                       !waitqueue_active(&zone->zone_pgdat->kswapd_wait))
+               return zone_page_state_snapshot(zone, NR_FREE_PAGES);
+
+       return nr_free_pages;
+}
+#endif /* CONFIG_SMP */
index fc81cb22869ef54e6871daf39f51b32e3377aa98..4029583a10241aaa84e3937ee216740e0a88a363 100644 (file)
@@ -121,8 +121,8 @@ struct task_struct *find_lock_task_mm(struct task_struct *p)
 }
 
 /* return true if the task is not adequate as candidate victim task. */
-static bool oom_unkillable_task(struct task_struct *p, struct mem_cgroup *mem,
-                          const nodemask_t *nodemask)
+static bool oom_unkillable_task(struct task_struct *p,
+               const struct mem_cgroup *mem, const nodemask_t *nodemask)
 {
        if (is_global_init(p))
                return true;
@@ -208,8 +208,13 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
         */
        points += p->signal->oom_score_adj;
 
-       if (points < 0)
-               return 0;
+       /*
+        * Never return 0 for an eligible task that may be killed since it's
+        * possible that no single user task uses more than 0.1% of memory and
+        * no single admin tasks uses more than 3.0%.
+        */
+       if (points <= 0)
+               return 1;
        return (points < 1000) ? points : 1000;
 }
 
@@ -339,26 +344,24 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
 /**
  * dump_tasks - dump current memory state of all system tasks
  * @mem: current's memory controller, if constrained
+ * @nodemask: nodemask passed to page allocator for mempolicy ooms
  *
- * Dumps the current memory state of all system tasks, excluding kernel threads.
+ * Dumps the current memory state of all eligible tasks.  Tasks not in the same
+ * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
+ * are not shown.
  * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj
  * value, oom_score_adj value, and name.
  *
- * If the actual is non-NULL, only tasks that are a member of the mem_cgroup are
- * shown.
- *
  * Call with tasklist_lock read-locked.
  */
-static void dump_tasks(const struct mem_cgroup *mem)
+static void dump_tasks(const struct mem_cgroup *mem, const nodemask_t *nodemask)
 {
        struct task_struct *p;
        struct task_struct *task;
 
        pr_info("[ pid ]   uid  tgid total_vm      rss cpu oom_adj oom_score_adj name\n");
        for_each_process(p) {
-               if (p->flags & PF_KTHREAD)
-                       continue;
-               if (mem && !task_in_mem_cgroup(p, mem))
+               if (oom_unkillable_task(p, mem, nodemask))
                        continue;
 
                task = find_lock_task_mm(p);
@@ -381,7 +384,7 @@ static void dump_tasks(const struct mem_cgroup *mem)
 }
 
 static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
-                                                       struct mem_cgroup *mem)
+                       struct mem_cgroup *mem, const nodemask_t *nodemask)
 {
        task_lock(current);
        pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
@@ -394,7 +397,7 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
        mem_cgroup_print_oom_info(mem, p);
        show_mem();
        if (sysctl_oom_dump_tasks)
-               dump_tasks(mem);
+               dump_tasks(mem, nodemask);
 }
 
 #define K(x) ((x) << (PAGE_SHIFT-10))
@@ -436,7 +439,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
        unsigned int victim_points = 0;
 
        if (printk_ratelimit())
-               dump_header(p, gfp_mask, order, mem);
+               dump_header(p, gfp_mask, order, mem, nodemask);
 
        /*
         * If the task is already exiting, don't alarm the sysadmin or kill
@@ -482,7 +485,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
  * Determines whether the kernel must panic because of the panic_on_oom sysctl.
  */
 static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
-                               int order)
+                               int order, const nodemask_t *nodemask)
 {
        if (likely(!sysctl_panic_on_oom))
                return;
@@ -496,7 +499,7 @@ static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
                        return;
        }
        read_lock(&tasklist_lock);
-       dump_header(NULL, gfp_mask, order, NULL);
+       dump_header(NULL, gfp_mask, order, NULL, nodemask);
        read_unlock(&tasklist_lock);
        panic("Out of memory: %s panic_on_oom is enabled\n",
                sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
@@ -509,7 +512,7 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
        unsigned int points = 0;
        struct task_struct *p;
 
-       check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0);
+       check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL);
        limit = mem_cgroup_get_limit(mem) >> PAGE_SHIFT;
        read_lock(&tasklist_lock);
 retry:
@@ -641,6 +644,7 @@ static void clear_system_oom(void)
 void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
                int order, nodemask_t *nodemask)
 {
+       const nodemask_t *mpol_mask;
        struct task_struct *p;
        unsigned long totalpages;
        unsigned long freed = 0;
@@ -670,7 +674,8 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
         */
        constraint = constrained_alloc(zonelist, gfp_mask, nodemask,
                                                &totalpages);
-       check_panic_on_oom(constraint, gfp_mask, order);
+       mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL;
+       check_panic_on_oom(constraint, gfp_mask, order, mpol_mask);
 
        read_lock(&tasklist_lock);
        if (sysctl_oom_kill_allocating_task &&
@@ -688,15 +693,13 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
        }
 
 retry:
-       p = select_bad_process(&points, totalpages, NULL,
-                       constraint == CONSTRAINT_MEMORY_POLICY ? nodemask :
-                                                                NULL);
+       p = select_bad_process(&points, totalpages, NULL, mpol_mask);
        if (PTR_ERR(p) == -1UL)
                goto out;
 
        /* Found nothing?!?! Either we hang forever, or we panic. */
        if (!p) {
-               dump_header(NULL, gfp_mask, order, NULL);
+               dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
                read_unlock(&tasklist_lock);
                panic("Out of memory and no killable processes...\n");
        }
index a9649f4b261e6b3c01632939c46a77f19f447de1..f12ad1836abe115b1b8e3bf4b9187c01249e8a30 100644 (file)
@@ -588,13 +588,13 @@ static void free_pcppages_bulk(struct zone *zone, int count,
 {
        int migratetype = 0;
        int batch_free = 0;
+       int to_free = count;
 
        spin_lock(&zone->lock);
        zone->all_unreclaimable = 0;
        zone->pages_scanned = 0;
 
-       __mod_zone_page_state(zone, NR_FREE_PAGES, count);
-       while (count) {
+       while (to_free) {
                struct page *page;
                struct list_head *list;
 
@@ -619,8 +619,9 @@ static void free_pcppages_bulk(struct zone *zone, int count,
                        /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
                        __free_one_page(page, zone, 0, page_private(page));
                        trace_mm_page_pcpu_drain(page, 0, page_private(page));
-               } while (--count && --batch_free && !list_empty(list));
+               } while (--to_free && --batch_free && !list_empty(list));
        }
+       __mod_zone_page_state(zone, NR_FREE_PAGES, count);
        spin_unlock(&zone->lock);
 }
 
@@ -631,8 +632,8 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
        zone->all_unreclaimable = 0;
        zone->pages_scanned = 0;
 
-       __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
        __free_one_page(page, zone, order, migratetype);
+       __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
        spin_unlock(&zone->lock);
 }
 
@@ -1461,7 +1462,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
 {
        /* free_pages my go negative - that's OK */
        long min = mark;
-       long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
+       long free_pages = zone_nr_free_pages(z) - (1 << order) + 1;
        int o;
 
        if (alloc_flags & ALLOC_HIGH)
@@ -1846,6 +1847,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
        struct page *page = NULL;
        struct reclaim_state reclaim_state;
        struct task_struct *p = current;
+       bool drained = false;
 
        cond_resched();
 
@@ -1864,14 +1866,25 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
 
        cond_resched();
 
-       if (order != 0)
-               drain_all_pages();
+       if (unlikely(!(*did_some_progress)))
+               return NULL;
 
-       if (likely(*did_some_progress))
-               page = get_page_from_freelist(gfp_mask, nodemask, order,
+retry:
+       page = get_page_from_freelist(gfp_mask, nodemask, order,
                                        zonelist, high_zoneidx,
                                        alloc_flags, preferred_zone,
                                        migratetype);
+
+       /*
+        * If an allocation failed after direct reclaim, it could be because
+        * pages are pinned on the per-cpu lists. Drain them and try again
+        */
+       if (!page && !drained) {
+               drain_all_pages();
+               drained = true;
+               goto retry;
+       }
+
        return page;
 }
 
@@ -2423,7 +2436,7 @@ void show_free_areas(void)
                        " all_unreclaimable? %s"
                        "\n",
                        zone->name,
-                       K(zone_page_state(zone, NR_FREE_PAGES)),
+                       K(zone_nr_free_pages(zone)),
                        K(min_wmark_pages(zone)),
                        K(low_wmark_pages(zone)),
                        K(high_wmark_pages(zone)),
@@ -5169,9 +5182,9 @@ void *__init alloc_large_system_hash(const char *tablename,
        if (!table)
                panic("Failed to allocate %s hash table\n", tablename);
 
-       printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n",
+       printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
               tablename,
-              (1U << log2qty),
+              (1UL << log2qty),
               ilog2(size) - PAGE_SHIFT,
               size);
 
index 58c572b18b07ffbca4e2120d4e1600705db5fc0e..c76ef3891e0da1c71ac3d1b2d4b1db0abdfe8b9f 100644 (file)
@@ -1401,9 +1401,9 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
 
                        if (pcpu_first_unit_cpu == NR_CPUS)
                                pcpu_first_unit_cpu = cpu;
+                       pcpu_last_unit_cpu = cpu;
                }
        }
-       pcpu_last_unit_cpu = cpu;
        pcpu_nr_units = unit;
 
        for_each_possible_cpu(cpu)
index f6f0d2dda2eae8480860cf57f5a9cfce69820716..92e6757f196ed4e3b3598c1f8b7214616a4cbe39 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -381,7 +381,13 @@ vma_address(struct page *page, struct vm_area_struct *vma)
 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
 {
        if (PageAnon(page)) {
-               if (vma->anon_vma->root != page_anon_vma(page)->root)
+               struct anon_vma *page__anon_vma = page_anon_vma(page);
+               /*
+                * Note: swapoff's unuse_vma() is more efficient with this
+                * check, and needs it to match anon_vma when KSM is active.
+                */
+               if (!vma->anon_vma || !page__anon_vma ||
+                   vma->anon_vma->root != page__anon_vma->root)
                        return -EFAULT;
        } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
                if (!vma->vm_file ||
@@ -1564,13 +1570,14 @@ static void __hugepage_set_anon_rmap(struct page *page,
        struct vm_area_struct *vma, unsigned long address, int exclusive)
 {
        struct anon_vma *anon_vma = vma->anon_vma;
+
        BUG_ON(!anon_vma);
-       if (!exclusive) {
-               struct anon_vma_chain *avc;
-               avc = list_entry(vma->anon_vma_chain.prev,
-                                struct anon_vma_chain, same_vma);
-               anon_vma = avc->anon_vma;
-       }
+
+       if (PageAnon(page))
+               return;
+       if (!exclusive)
+               anon_vma = anon_vma->root;
+
        anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
        page->mapping = (struct address_space *) anon_vma;
        page->index = linear_page_index(vma, address);
@@ -1581,6 +1588,8 @@ void hugepage_add_anon_rmap(struct page *page,
 {
        struct anon_vma *anon_vma = vma->anon_vma;
        int first;
+
+       BUG_ON(!PageLocked(page));
        BUG_ON(!anon_vma);
        BUG_ON(address < vma->vm_start || address >= vma->vm_end);
        first = atomic_inc_and_test(&page->_mapcount);
index 1f3f9c59a73ab5be4ff4bb37f428364df7544706..7c703ff2f36f0b760b79eb36149084f07621a0a1 100644 (file)
@@ -47,8 +47,6 @@ long nr_swap_pages;
 long total_swap_pages;
 static int least_priority;
 
-static bool swap_for_hibernation;
-
 static const char Bad_file[] = "Bad swap file entry ";
 static const char Unused_file[] = "Unused swap file entry ";
 static const char Bad_offset[] = "Bad swap offset entry ";
@@ -141,8 +139,7 @@ static int discard_swap(struct swap_info_struct *si)
        nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
        if (nr_blocks) {
                err = blkdev_issue_discard(si->bdev, start_block,
-                               nr_blocks, GFP_KERNEL,
-                               BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
+                               nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT);
                if (err)
                        return err;
                cond_resched();
@@ -153,8 +150,7 @@ static int discard_swap(struct swap_info_struct *si)
                nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
 
                err = blkdev_issue_discard(si->bdev, start_block,
-                               nr_blocks, GFP_KERNEL,
-                               BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
+                               nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT);
                if (err)
                        break;
 
@@ -193,8 +189,7 @@ static void discard_swap_cluster(struct swap_info_struct *si,
                        start_block <<= PAGE_SHIFT - 9;
                        nr_blocks <<= PAGE_SHIFT - 9;
                        if (blkdev_issue_discard(si->bdev, start_block,
-                                   nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT |
-                                                       BLKDEV_IFL_BARRIER))
+                                   nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT))
                                break;
                }
 
@@ -320,10 +315,8 @@ checks:
        if (offset > si->highest_bit)
                scan_base = offset = si->lowest_bit;
 
-       /* reuse swap entry of cache-only swap if not hibernation. */
-       if (vm_swap_full()
-               && usage == SWAP_HAS_CACHE
-               && si->swap_map[offset] == SWAP_HAS_CACHE) {
+       /* reuse swap entry of cache-only swap if not busy. */
+       if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
                int swap_was_freed;
                spin_unlock(&swap_lock);
                swap_was_freed = __try_to_reclaim_swap(si, offset);
@@ -453,8 +446,6 @@ swp_entry_t get_swap_page(void)
        spin_lock(&swap_lock);
        if (nr_swap_pages <= 0)
                goto noswap;
-       if (swap_for_hibernation)
-               goto noswap;
        nr_swap_pages--;
 
        for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
@@ -487,6 +478,28 @@ noswap:
        return (swp_entry_t) {0};
 }
 
+/* The only caller of this function is now susupend routine */
+swp_entry_t get_swap_page_of_type(int type)
+{
+       struct swap_info_struct *si;
+       pgoff_t offset;
+
+       spin_lock(&swap_lock);
+       si = swap_info[type];
+       if (si && (si->flags & SWP_WRITEOK)) {
+               nr_swap_pages--;
+               /* This is called for allocating swap entry, not cache */
+               offset = scan_swap_map(si, 1);
+               if (offset) {
+                       spin_unlock(&swap_lock);
+                       return swp_entry(type, offset);
+               }
+               nr_swap_pages++;
+       }
+       spin_unlock(&swap_lock);
+       return (swp_entry_t) {0};
+}
+
 static struct swap_info_struct *swap_info_get(swp_entry_t entry)
 {
        struct swap_info_struct *p;
@@ -670,6 +683,24 @@ int try_to_free_swap(struct page *page)
        if (page_swapcount(page))
                return 0;
 
+       /*
+        * Once hibernation has begun to create its image of memory,
+        * there's a danger that one of the calls to try_to_free_swap()
+        * - most probably a call from __try_to_reclaim_swap() while
+        * hibernation is allocating its own swap pages for the image,
+        * but conceivably even a call from memory reclaim - will free
+        * the swap from a page which has already been recorded in the
+        * image as a clean swapcache page, and then reuse its swap for
+        * another page of the image.  On waking from hibernation, the
+        * original page might be freed under memory pressure, then
+        * later read back in from swap, now with the wrong data.
+        *
+        * Hibernation clears bits from gfp_allowed_mask to prevent
+        * memory reclaim from writing to disk, so check that here.
+        */
+       if (!(gfp_allowed_mask & __GFP_IO))
+               return 0;
+
        delete_from_swap_cache(page);
        SetPageDirty(page);
        return 1;
@@ -746,74 +777,6 @@ int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep)
 #endif
 
 #ifdef CONFIG_HIBERNATION
-
-static pgoff_t hibernation_offset[MAX_SWAPFILES];
-/*
- * Once hibernation starts to use swap, we freeze swap_map[]. Otherwise,
- * saved swap_map[] image to the disk will be an incomplete because it's
- * changing without synchronization with hibernation snap shot.
- * At resume, we just make swap_for_hibernation=false. We can forget
- * used maps easily.
- */
-void hibernation_freeze_swap(void)
-{
-       int i;
-
-       spin_lock(&swap_lock);
-
-       printk(KERN_INFO "PM: Freeze Swap\n");
-       swap_for_hibernation = true;
-       for (i = 0; i < MAX_SWAPFILES; i++)
-               hibernation_offset[i] = 1;
-       spin_unlock(&swap_lock);
-}
-
-void hibernation_thaw_swap(void)
-{
-       spin_lock(&swap_lock);
-       if (swap_for_hibernation) {
-               printk(KERN_INFO "PM: Thaw Swap\n");
-               swap_for_hibernation = false;
-       }
-       spin_unlock(&swap_lock);
-}
-
-/*
- * Because updateing swap_map[] can make not-saved-status-change,
- * we use our own easy allocator.
- * Please see kernel/power/swap.c, Used swaps are recorded into
- * RB-tree.
- */
-swp_entry_t get_swap_for_hibernation(int type)
-{
-       pgoff_t off;
-       swp_entry_t val = {0};
-       struct swap_info_struct *si;
-
-       spin_lock(&swap_lock);
-
-       si = swap_info[type];
-       if (!si || !(si->flags & SWP_WRITEOK))
-               goto done;
-
-       for (off = hibernation_offset[type]; off < si->max; ++off) {
-               if (!si->swap_map[off])
-                       break;
-       }
-       if (off < si->max) {
-               val = swp_entry(type, off);
-               hibernation_offset[type] = off + 1;
-       }
-done:
-       spin_unlock(&swap_lock);
-       return val;
-}
-
-void swap_free_for_hibernation(swp_entry_t ent)
-{
-       /* Nothing to do */
-}
-
 /*
  * Find the swap type that corresponds to given device (if any).
  *
@@ -2084,7 +2047,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
                        p->flags |= SWP_SOLIDSTATE;
                        p->cluster_next = 1 + (random32() % p->highest_bit);
                }
-               if (discard_swap(p) == 0)
+               if (discard_swap(p) == 0 && (swap_flags & SWAP_FLAG_DISCARD))
                        p->flags |= SWP_DISCARDABLE;
        }
 
index 6b8889da69a60612301c2bd26244ae0f3e1e1966..d8087f0db5073fc9ed3f879c7bc582c4fc462d0f 100644 (file)
@@ -516,6 +516,15 @@ static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
 /* for per-CPU blocks */
 static void purge_fragmented_blocks_allcpus(void);
 
+/*
+ * called before a call to iounmap() if the caller wants vm_area_struct's
+ * immediately freed.
+ */
+void set_iounmap_nonlazy(void)
+{
+       atomic_set(&vmap_lazy_nr, lazy_max_pages()+1);
+}
+
 /*
  * Purges all lazily-freed vmap areas.
  *
index c391c320dbafcda04260923f36336891283b614f..c5dfabf25f115a34df8f9111843af28a8d58d906 100644 (file)
@@ -1804,12 +1804,11 @@ static void shrink_zone(int priority, struct zone *zone,
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static bool shrink_zones(int priority, struct zonelist *zonelist,
+static void shrink_zones(int priority, struct zonelist *zonelist,
                                        struct scan_control *sc)
 {
        struct zoneref *z;
        struct zone *zone;
-       bool all_unreclaimable = true;
 
        for_each_zone_zonelist_nodemask(zone, z, zonelist,
                                        gfp_zone(sc->gfp_mask), sc->nodemask) {
@@ -1827,8 +1826,38 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
                }
 
                shrink_zone(priority, zone, sc);
-               all_unreclaimable = false;
        }
+}
+
+static bool zone_reclaimable(struct zone *zone)
+{
+       return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
+}
+
+/*
+ * As hibernation is going on, kswapd is freezed so that it can't mark
+ * the zone into all_unreclaimable. It can't handle OOM during hibernation.
+ * So let's check zone's unreclaimable in direct reclaim as well as kswapd.
+ */
+static bool all_unreclaimable(struct zonelist *zonelist,
+               struct scan_control *sc)
+{
+       struct zoneref *z;
+       struct zone *zone;
+       bool all_unreclaimable = true;
+
+       for_each_zone_zonelist_nodemask(zone, z, zonelist,
+                       gfp_zone(sc->gfp_mask), sc->nodemask) {
+               if (!populated_zone(zone))
+                       continue;
+               if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
+                       continue;
+               if (zone_reclaimable(zone)) {
+                       all_unreclaimable = false;
+                       break;
+               }
+       }
+
        return all_unreclaimable;
 }
 
@@ -1852,7 +1881,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                                        struct scan_control *sc)
 {
        int priority;
-       bool all_unreclaimable;
        unsigned long total_scanned = 0;
        struct reclaim_state *reclaim_state = current->reclaim_state;
        struct zoneref *z;
@@ -1869,7 +1897,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                sc->nr_scanned = 0;
                if (!priority)
                        disable_swap_token();
-               all_unreclaimable = shrink_zones(priority, zonelist, sc);
+               shrink_zones(priority, zonelist, sc);
                /*
                 * Don't shrink slabs when reclaiming memory from
                 * over limit cgroups
@@ -1931,7 +1959,7 @@ out:
                return sc->nr_reclaimed;
 
        /* top priority shrink_zones still had more to do? don't OOM, then */
-       if (scanning_global_lru(sc) && !all_unreclaimable)
+       if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc))
                return 1;
 
        return 0;
@@ -2197,8 +2225,7 @@ loop_again:
                        total_scanned += sc.nr_scanned;
                        if (zone->all_unreclaimable)
                                continue;
-                       if (nr_slab == 0 &&
-                           zone->pages_scanned >= (zone_reclaimable_pages(zone) * 6))
+                       if (nr_slab == 0 && !zone_reclaimable(zone))
                                zone->all_unreclaimable = 1;
                        /*
                         * If we've done a decent amount of scanning and
index f389168f9a837b9c6be4e1f9bb3d0892396315de..355a9e669aaa800d62fa31d2b83110bf76cce9d7 100644 (file)
@@ -138,11 +138,24 @@ static void refresh_zone_stat_thresholds(void)
        int threshold;
 
        for_each_populated_zone(zone) {
+               unsigned long max_drift, tolerate_drift;
+
                threshold = calculate_threshold(zone);
 
                for_each_online_cpu(cpu)
                        per_cpu_ptr(zone->pageset, cpu)->stat_threshold
                                                        = threshold;
+
+               /*
+                * Only set percpu_drift_mark if there is a danger that
+                * NR_FREE_PAGES reports the low watermark is ok when in fact
+                * the min watermark could be breached by an allocation
+                */
+               tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
+               max_drift = num_online_cpus() * threshold;
+               if (max_drift > tolerate_drift)
+                       zone->percpu_drift_mark = high_wmark_pages(zone) +
+                                       max_drift;
        }
 }
 
@@ -813,7 +826,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
                   "\n        scanned  %lu"
                   "\n        spanned  %lu"
                   "\n        present  %lu",
-                  zone_page_state(zone, NR_FREE_PAGES),
+                  zone_nr_free_pages(zone),
                   min_wmark_pages(zone),
                   low_wmark_pages(zone),
                   high_wmark_pages(zone),
@@ -998,6 +1011,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
        switch (action) {
        case CPU_ONLINE:
        case CPU_ONLINE_FROZEN:
+               refresh_zone_stat_thresholds();
                start_cpu_timer(cpu);
                node_set_state(cpu_to_node(cpu), N_CPU);
                break;
index 01ddb0472f86c511f49ff8a602a726dd60550ff0..0eb96f7e44befb0155e364749f38eb37af0c2354 100644 (file)
@@ -24,8 +24,11 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
 
        if (vlan_dev)
                skb->dev = vlan_dev;
-       else if (vlan_id)
-               goto drop;
+       else if (vlan_id) {
+               if (!(skb->dev->flags & IFF_PROMISC))
+                       goto drop;
+               skb->pkt_type = PACKET_OTHERHOST;
+       }
 
        return (polling ? netif_receive_skb(skb) : netif_rx(skb));
 
@@ -102,8 +105,11 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
 
        if (vlan_dev)
                skb->dev = vlan_dev;
-       else if (vlan_id)
-               goto drop;
+       else if (vlan_id) {
+               if (!(skb->dev->flags & IFF_PROMISC))
+                       goto drop;
+               skb->pkt_type = PACKET_OTHERHOST;
+       }
 
        for (p = napi->gro_list; p; p = p->next) {
                NAPI_GRO_CB(p)->same_flow =
index dc6f2f26d0230b1462ea1ea583c6fda5dc5b49c5..9eb72505308fc697d2857737f8dd3f4ec04d5bd3 100644 (file)
@@ -331,8 +331,10 @@ static void p9_tag_cleanup(struct p9_client *c)
                }
        }
 
-       if (c->tagpool)
+       if (c->tagpool) {
+               p9_idpool_put(0, c->tagpool); /* free reserved tag 0 */
                p9_idpool_destroy(c->tagpool);
+       }
 
        /* free requests associated with tags */
        for (row = 0; row < (c->max_tag/P9_ROW_MAXTAG); row++) {
@@ -944,6 +946,7 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames,
        int16_t nwqids, count;
 
        err = 0;
+       wqids = NULL;
        clnt = oldfid->clnt;
        if (clone) {
                fid = p9_fid_create(clnt);
@@ -994,9 +997,11 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames,
        else
                fid->qid = oldfid->qid;
 
+       kfree(wqids);
        return fid;
 
 clunk_fid:
+       kfree(wqids);
        p9_client_clunk(fid);
        fid = NULL;
 
index 0ea20c30466c7b5758e419fd51c3cba70c9797d7..17c5ba7551a55e79c2c38e22a8fe2fdaa8e00979 100644 (file)
@@ -426,8 +426,10 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
 
        /* Allocate an fcall for the reply */
        rpl_context = kmalloc(sizeof *rpl_context, GFP_KERNEL);
-       if (!rpl_context)
+       if (!rpl_context) {
+               err = -ENOMEM;
                goto err_close;
+       }
 
        /*
         * If the request has a buffer, steal it, otherwise
@@ -445,8 +447,8 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
        }
        rpl_context->rc = req->rc;
        if (!rpl_context->rc) {
-               kfree(rpl_context);
-               goto err_close;
+               err = -ENOMEM;
+               goto err_free2;
        }
 
        /*
@@ -458,11 +460,8 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
         */
        if (atomic_inc_return(&rdma->rq_count) <= rdma->rq_depth) {
                err = post_recv(client, rpl_context);
-               if (err) {
-                       kfree(rpl_context->rc);
-                       kfree(rpl_context);
-                       goto err_close;
-               }
+               if (err)
+                       goto err_free1;
        } else
                atomic_dec(&rdma->rq_count);
 
@@ -471,8 +470,10 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
 
        /* Post the request */
        c = kmalloc(sizeof *c, GFP_KERNEL);
-       if (!c)
-               goto err_close;
+       if (!c) {
+               err = -ENOMEM;
+               goto err_free1;
+       }
        c->req = req;
 
        c->busa = ib_dma_map_single(rdma->cm_id->device,
@@ -499,9 +500,15 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
        return ib_post_send(rdma->qp, &wr, &bad_wr);
 
  error:
+       kfree(c);
+       kfree(rpl_context->rc);
+       kfree(rpl_context);
        P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n");
        return -EIO;
-
+ err_free1:
+       kfree(rpl_context->rc);
+ err_free2:
+       kfree(rpl_context);
  err_close:
        spin_lock_irqsave(&rdma->req_lock, flags);
        if (rdma->state < P9_RDMA_CLOSING) {
index dcfbe99ff81c8c0ac56031e1cbbd56bb7ed2fcd4..b88515936e4b3310741e6beefac62376bfe9d832 100644 (file)
@@ -329,7 +329,8 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
 
        mutex_lock(&virtio_9p_lock);
        list_for_each_entry(chan, &virtio_chan_list, chan_list) {
-               if (!strncmp(devname, chan->tag, chan->tag_len)) {
+               if (!strncmp(devname, chan->tag, chan->tag_len) &&
+                   strlen(devname) == chan->tag_len) {
                        if (!chan->inuse) {
                                chan->inuse = true;
                                found = 1;
index e330594d3709e6d382b2e5168cea989127793aa1..55fd82e9ffd91e9fd48878147f3068923373ce16 100644 (file)
@@ -217,7 +217,7 @@ source "net/dns_resolver/Kconfig"
 
 config RPS
        boolean
-       depends on SMP && SYSFS
+       depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
        default y
 
 menu "Network testing"
@@ -293,6 +293,7 @@ source "net/wimax/Kconfig"
 source "net/rfkill/Kconfig"
 source "net/9p/Kconfig"
 source "net/caif/Kconfig"
+source "net/ceph/Kconfig"
 
 
 endif   # if NET
index ea60fbce9b1ba3e623ee9f1ec9ce622169a3596d..6b7bfd7f1416d9950e90cb3ddd065d998c0e78ee 100644 (file)
@@ -68,3 +68,4 @@ obj-$(CONFIG_SYSCTL)          += sysctl_net.o
 endif
 obj-$(CONFIG_WIMAX)            += wimax/
 obj-$(CONFIG_DNS_RESOLVER)     += dns_resolver/
+obj-$(CONFIG_CEPH_LIB)         += ceph/
index 651babdfab3845ebd11eb6cc89fe85eae1d3f8ca..ad2b232a2055fbc241828832078087c62e4c4315 100644 (file)
@@ -399,12 +399,6 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
                        unregister_netdev(net_dev);
                        free_netdev(net_dev);
                }
-               read_lock_irq(&devs_lock);
-               if (list_empty(&br2684_devs)) {
-                       /* last br2684 device */
-                       unregister_atmdevice_notifier(&atm_dev_notifier);
-               }
-               read_unlock_irq(&devs_lock);
                return;
        }
 
@@ -675,7 +669,6 @@ static int br2684_create(void __user *arg)
 
        if (list_empty(&br2684_devs)) {
                /* 1st br2684 device */
-               register_atmdevice_notifier(&atm_dev_notifier);
                brdev->number = 1;
        } else
                brdev->number = BRPRIV(list_entry_brdev(br2684_devs.prev))->number + 1;
@@ -815,6 +808,7 @@ static int __init br2684_init(void)
                return -ENOMEM;
 #endif
        register_atm_ioctl(&br2684_ioctl_ops);
+       register_atmdevice_notifier(&atm_dev_notifier);
        return 0;
 }
 
@@ -830,9 +824,7 @@ static void __exit br2684_exit(void)
 #endif
 
 
-       /* if not already empty */
-       if (!list_empty(&br2684_devs))
-               unregister_atmdevice_notifier(&atm_dev_notifier);
+       unregister_atmdevice_notifier(&atm_dev_notifier);
 
        while (!list_empty(&br2684_devs)) {
                net_dev = list_entry_brdev(br2684_devs.next);
index 622b471e14e03dbc3752697851022a59aebffbe0..74bcc662c3dd8c5e7ec33a393560cc9bbf313b57 100644 (file)
@@ -778,7 +778,7 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
        eg->packets_rcvd++;
        mpc->eg_ops->put(eg);
 
-       memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data));
+       memset(ATM_SKB(new_skb), 0, sizeof(struct atm_skb_data));
        netif_rx(new_skb);
 }
 
index fadf26b4ed7c432eba09800b4532683fc44cc02d..0b54b7dd84010a52147a54155c4f8db2b61752a7 100644 (file)
@@ -1441,33 +1441,23 @@ static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
 
 static void l2cap_streaming_send(struct sock *sk)
 {
-       struct sk_buff *skb, *tx_skb;
+       struct sk_buff *skb;
        struct l2cap_pinfo *pi = l2cap_pi(sk);
        u16 control, fcs;
 
-       while ((skb = sk->sk_send_head)) {
-               tx_skb = skb_clone(skb, GFP_ATOMIC);
-
-               control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
+       while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
+               control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
                control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
-               put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
+               put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
 
                if (pi->fcs == L2CAP_FCS_CRC16) {
-                       fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
-                       put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
+                       fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
+                       put_unaligned_le16(fcs, skb->data + skb->len - 2);
                }
 
-               l2cap_do_send(sk, tx_skb);
+               l2cap_do_send(sk, skb);
 
                pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
-
-               if (skb_queue_is_last(TX_QUEUE(sk), skb))
-                       sk->sk_send_head = NULL;
-               else
-                       sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
-
-               skb = skb_dequeue(TX_QUEUE(sk));
-               kfree_skb(skb);
        }
 }
 
@@ -1960,6 +1950,11 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
 
        switch (optname) {
        case L2CAP_OPTIONS:
+               if (sk->sk_state == BT_CONNECTED) {
+                       err = -EINVAL;
+                       break;
+               }
+
                opts.imtu     = l2cap_pi(sk)->imtu;
                opts.omtu     = l2cap_pi(sk)->omtu;
                opts.flush_to = l2cap_pi(sk)->flush_to;
@@ -2771,10 +2766,10 @@ static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data,
                case L2CAP_CONF_MTU:
                        if (val < L2CAP_DEFAULT_MIN_MTU) {
                                *result = L2CAP_CONF_UNACCEPT;
-                               pi->omtu = L2CAP_DEFAULT_MIN_MTU;
+                               pi->imtu = L2CAP_DEFAULT_MIN_MTU;
                        } else
-                               pi->omtu = val;
-                       l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
+                               pi->imtu = val;
+                       l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
                        break;
 
                case L2CAP_CONF_FLUSH_TO:
@@ -3071,6 +3066,17 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
        return 0;
 }
 
+static inline void set_default_fcs(struct l2cap_pinfo *pi)
+{
+       /* FCS is enabled only in ERTM or streaming mode, if one or both
+        * sides request it.
+        */
+       if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
+               pi->fcs = L2CAP_FCS_NONE;
+       else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
+               pi->fcs = L2CAP_FCS_CRC16;
+}
+
 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
 {
        struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
@@ -3088,14 +3094,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
        if (!sk)
                return -ENOENT;
 
-       if (sk->sk_state != BT_CONFIG) {
-               struct l2cap_cmd_rej rej;
-
-               rej.reason = cpu_to_le16(0x0002);
-               l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
-                               sizeof(rej), &rej);
+       if (sk->sk_state == BT_DISCONN)
                goto unlock;
-       }
 
        /* Reject if config buffer is too small. */
        len = cmd_len - sizeof(*req);
@@ -3135,9 +3135,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
                goto unlock;
 
        if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
-               if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
-                   l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
-                       l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
+               set_default_fcs(l2cap_pi(sk));
 
                sk->sk_state = BT_CONNECTED;
 
@@ -3225,9 +3223,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
        l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
 
        if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
-               if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
-                   l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
-                       l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
+               set_default_fcs(l2cap_pi(sk));
 
                sk->sk_state = BT_CONNECTED;
                l2cap_pi(sk)->next_tx_seq = 0;
index 44a623275951e4b481abf1942fb2587867891dca..194b3a04cfd38a3b4a13817d5aecace4f355ea49 100644 (file)
@@ -82,11 +82,14 @@ static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb)
 static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
 {
        struct sock *sk = d->owner, *parent;
+       unsigned long flags;
+
        if (!sk)
                return;
 
        BT_DBG("dlc %p state %ld err %d", d, d->state, err);
 
+       local_irq_save(flags);
        bh_lock_sock(sk);
 
        if (err)
@@ -108,6 +111,7 @@ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
        }
 
        bh_unlock_sock(sk);
+       local_irq_restore(flags);
 
        if (parent && sock_flag(sk, SOCK_ZAPPED)) {
                /* We have to drop DLC lock here, otherwise
index 8ce9047861166740a17a2448cb1d4f668fba1d21..4bf28f25f368b399a6ef220e06c08c0f5d2621f5 100644 (file)
@@ -827,6 +827,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
        long timeo;
        int err;
        int ifindex, headroom, tailroom;
+       unsigned int mtu;
        struct net_device *dev;
 
        lock_sock(sk);
@@ -896,15 +897,23 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
                cf_sk->sk.sk_state = CAIF_DISCONNECTED;
                goto out;
        }
-       dev = dev_get_by_index(sock_net(sk), ifindex);
+
+       err = -ENODEV;
+       rcu_read_lock();
+       dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
+       if (!dev) {
+               rcu_read_unlock();
+               goto out;
+       }
        cf_sk->headroom = LL_RESERVED_SPACE_EXTRA(dev, headroom);
+       mtu = dev->mtu;
+       rcu_read_unlock();
+
        cf_sk->tailroom = tailroom;
-       cf_sk->maxframe = dev->mtu - (headroom + tailroom);
-       dev_put(dev);
+       cf_sk->maxframe = mtu - (headroom + tailroom);
        if (cf_sk->maxframe < 1) {
-               pr_warning("CAIF: %s(): CAIF Interface MTU too small (%d)\n",
-                       __func__, dev->mtu);
-               err = -ENODEV;
+               pr_warning("CAIF: %s(): CAIF Interface MTU too small (%u)\n",
+                          __func__, mtu);
                goto out;
        }
 
diff --git a/net/ceph/Kconfig b/net/ceph/Kconfig
new file mode 100644 (file)
index 0000000..ad42404
--- /dev/null
@@ -0,0 +1,28 @@
+config CEPH_LIB
+        tristate "Ceph core library (EXPERIMENTAL)"
+       depends on INET && EXPERIMENTAL
+       select LIBCRC32C
+       select CRYPTO_AES
+       select CRYPTO
+       default n
+       help
+         Choose Y or M here to include cephlib, which provides the
+         common functionality to both the Ceph filesystem and
+         to the rados block device (rbd).
+
+         More information at http://ceph.newdream.net/.
+
+         If unsure, say N.
+
+config CEPH_LIB_PRETTYDEBUG
+       bool "Include file:line in ceph debug output"
+       depends on CEPH_LIB
+       default n
+       help
+         If you say Y here, debug output will include a filename and
+         line to aid debugging.  This increases kernel size and slows
+         execution slightly when debug call sites are enabled (e.g.,
+         via CONFIG_DYNAMIC_DEBUG).
+
+         If unsure, say N.
+
diff --git a/net/ceph/Makefile b/net/ceph/Makefile
new file mode 100644 (file)
index 0000000..aab1cab
--- /dev/null
@@ -0,0 +1,37 @@
+#
+# Makefile for CEPH filesystem.
+#
+
+ifneq ($(KERNELRELEASE),)
+
+obj-$(CONFIG_CEPH_LIB) += libceph.o
+
+libceph-objs := ceph_common.o messenger.o msgpool.o buffer.o pagelist.o \
+       mon_client.o \
+       osd_client.o osdmap.o crush/crush.o crush/mapper.o crush/hash.o \
+       debugfs.o \
+       auth.o auth_none.o \
+       crypto.o armor.o \
+       auth_x.o \
+       ceph_fs.o ceph_strings.o ceph_hash.o \
+       pagevec.o
+
+else
+#Otherwise we were called directly from the command
+# line; invoke the kernel build system.
+
+KERNELDIR ?= /lib/modules/$(shell uname -r)/build
+PWD := $(shell pwd)
+
+default: all
+
+all:
+       $(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_LIB=m modules
+
+modules_install:
+       $(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_LIB=m modules_install
+
+clean:
+       $(MAKE) -C $(KERNELDIR) M=$(PWD) clean
+
+endif
similarity index 100%
rename from fs/ceph/armor.c
rename to net/ceph/armor.c
similarity index 97%
rename from fs/ceph/auth.c
rename to net/ceph/auth.c
index 6d2e30600627e71d25c2f93d5862952a078472b8..549c1f43e1d53b134c045954e103260fee9033b7 100644 (file)
@@ -1,16 +1,16 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
 
 #include <linux/module.h>
 #include <linux/err.h>
 #include <linux/slab.h>
 
-#include "types.h"
+#include <linux/ceph/types.h>
+#include <linux/ceph/decode.h>
+#include <linux/ceph/libceph.h>
+#include <linux/ceph/messenger.h>
 #include "auth_none.h"
 #include "auth_x.h"
-#include "decode.h"
-#include "super.h"
 
-#include "messenger.h"
 
 /*
  * get protocol handler
similarity index 96%
rename from fs/ceph/auth_none.c
rename to net/ceph/auth_none.c
index ad1dc21286c7f3ca9c84d75d7a2630bc0069dae4..214c2bb43d6252056a7a77dcd2d8eb5877c719d1 100644 (file)
@@ -1,14 +1,15 @@
 
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
 
 #include <linux/err.h>
 #include <linux/module.h>
 #include <linux/random.h>
 #include <linux/slab.h>
 
+#include <linux/ceph/decode.h>
+#include <linux/ceph/auth.h>
+
 #include "auth_none.h"
-#include "auth.h"
-#include "decode.h"
 
 static void reset(struct ceph_auth_client *ac)
 {
similarity index 94%
rename from fs/ceph/auth_none.h
rename to net/ceph/auth_none.h
index 8164df1a08be5060aa065106bdbeb539f9caabec..ed7d088b1bc92e14372e2f24c8f5016750e07014 100644 (file)
@@ -2,8 +2,7 @@
 #define _FS_CEPH_AUTH_NONE_H
 
 #include <linux/slab.h>
-
-#include "auth.h"
+#include <linux/ceph/auth.h>
 
 /*
  * null security mode.
similarity index 99%
rename from fs/ceph/auth_x.c
rename to net/ceph/auth_x.c
index a2d002cbdec23d15d30624a345ae6090ea452a53..7fd5dfcf6e188551f9a25638129239d79df9d874 100644 (file)
@@ -1,16 +1,17 @@
 
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
 
 #include <linux/err.h>
 #include <linux/module.h>
 #include <linux/random.h>
 #include <linux/slab.h>
 
+#include <linux/ceph/decode.h>
+#include <linux/ceph/auth.h>
+
+#include "crypto.h"
 #include "auth_x.h"
 #include "auth_x_protocol.h"
-#include "crypto.h"
-#include "auth.h"
-#include "decode.h"
 
 #define TEMP_TICKET_BUF_LEN    256
 
similarity index 96%
rename from fs/ceph/auth_x.h
rename to net/ceph/auth_x.h
index ff6f8180e6816a352ca5184d8db1b47e110b64f4..e02da7a5c5a1052b881f383e7fca318da0675421 100644 (file)
@@ -3,8 +3,9 @@
 
 #include <linux/rbtree.h>
 
+#include <linux/ceph/auth.h>
+
 #include "crypto.h"
-#include "auth.h"
 #include "auth_x_protocol.h"
 
 /*
similarity index 86%
rename from fs/ceph/buffer.c
rename to net/ceph/buffer.c
index cd39f17021de9ab6cde35ce444a727970f4f8fab..53d8abfa25d5ede4b487c88a5341b376c4921c26 100644 (file)
@@ -1,10 +1,11 @@
 
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
 
+#include <linux/module.h>
 #include <linux/slab.h>
 
-#include "buffer.h"
-#include "decode.h"
+#include <linux/ceph/buffer.h>
+#include <linux/ceph/decode.h>
 
 struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp)
 {
@@ -32,6 +33,7 @@ struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp)
        dout("buffer_new %p\n", b);
        return b;
 }
+EXPORT_SYMBOL(ceph_buffer_new);
 
 void ceph_buffer_release(struct kref *kref)
 {
@@ -46,6 +48,7 @@ void ceph_buffer_release(struct kref *kref)
        }
        kfree(b);
 }
+EXPORT_SYMBOL(ceph_buffer_release);
 
 int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end)
 {
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
new file mode 100644 (file)
index 0000000..f3e4a13
--- /dev/null
@@ -0,0 +1,529 @@
+
+#include <linux/ceph/ceph_debug.h>
+#include <linux/backing-dev.h>
+#include <linux/ctype.h>
+#include <linux/fs.h>
+#include <linux/inet.h>
+#include <linux/in6.h>
+#include <linux/module.h>
+#include <linux/mount.h>
+#include <linux/parser.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/statfs.h>
+#include <linux/string.h>
+
+
+#include <linux/ceph/libceph.h>
+#include <linux/ceph/debugfs.h>
+#include <linux/ceph/decode.h>
+#include <linux/ceph/mon_client.h>
+#include <linux/ceph/auth.h>
+
+
+
+/*
+ * find filename portion of a path (/foo/bar/baz -> baz)
+ */
+const char *ceph_file_part(const char *s, int len)
+{
+       const char *e = s + len;
+
+       while (e != s && *(e-1) != '/')
+               e--;
+       return e;
+}
+EXPORT_SYMBOL(ceph_file_part);
+
+const char *ceph_msg_type_name(int type)
+{
+       switch (type) {
+       case CEPH_MSG_SHUTDOWN: return "shutdown";
+       case CEPH_MSG_PING: return "ping";
+       case CEPH_MSG_AUTH: return "auth";
+       case CEPH_MSG_AUTH_REPLY: return "auth_reply";
+       case CEPH_MSG_MON_MAP: return "mon_map";
+       case CEPH_MSG_MON_GET_MAP: return "mon_get_map";
+       case CEPH_MSG_MON_SUBSCRIBE: return "mon_subscribe";
+       case CEPH_MSG_MON_SUBSCRIBE_ACK: return "mon_subscribe_ack";
+       case CEPH_MSG_STATFS: return "statfs";
+       case CEPH_MSG_STATFS_REPLY: return "statfs_reply";
+       case CEPH_MSG_MDS_MAP: return "mds_map";
+       case CEPH_MSG_CLIENT_SESSION: return "client_session";
+       case CEPH_MSG_CLIENT_RECONNECT: return "client_reconnect";
+       case CEPH_MSG_CLIENT_REQUEST: return "client_request";
+       case CEPH_MSG_CLIENT_REQUEST_FORWARD: return "client_request_forward";
+       case CEPH_MSG_CLIENT_REPLY: return "client_reply";
+       case CEPH_MSG_CLIENT_CAPS: return "client_caps";
+       case CEPH_MSG_CLIENT_CAPRELEASE: return "client_cap_release";
+       case CEPH_MSG_CLIENT_SNAP: return "client_snap";
+       case CEPH_MSG_CLIENT_LEASE: return "client_lease";
+       case CEPH_MSG_OSD_MAP: return "osd_map";
+       case CEPH_MSG_OSD_OP: return "osd_op";
+       case CEPH_MSG_OSD_OPREPLY: return "osd_opreply";
+       default: return "unknown";
+       }
+}
+EXPORT_SYMBOL(ceph_msg_type_name);
+
+/*
+ * Initially learn our fsid, or verify an fsid matches.
+ */
+int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid)
+{
+       if (client->have_fsid) {
+               if (ceph_fsid_compare(&client->fsid, fsid)) {
+                       pr_err("bad fsid, had %pU got %pU",
+                              &client->fsid, fsid);
+                       return -1;
+               }
+       } else {
+               pr_info("client%lld fsid %pU\n", ceph_client_id(client), fsid);
+               memcpy(&client->fsid, fsid, sizeof(*fsid));
+               ceph_debugfs_client_init(client);
+               client->have_fsid = true;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(ceph_check_fsid);
+
+static int strcmp_null(const char *s1, const char *s2)
+{
+       if (!s1 && !s2)
+               return 0;
+       if (s1 && !s2)
+               return -1;
+       if (!s1 && s2)
+               return 1;
+       return strcmp(s1, s2);
+}
+
+int ceph_compare_options(struct ceph_options *new_opt,
+                        struct ceph_client *client)
+{
+       struct ceph_options *opt1 = new_opt;
+       struct ceph_options *opt2 = client->options;
+       int ofs = offsetof(struct ceph_options, mon_addr);
+       int i;
+       int ret;
+
+       ret = memcmp(opt1, opt2, ofs);
+       if (ret)
+               return ret;
+
+       ret = strcmp_null(opt1->name, opt2->name);
+       if (ret)
+               return ret;
+
+       ret = strcmp_null(opt1->secret, opt2->secret);
+       if (ret)
+               return ret;
+
+       /* any matching mon ip implies a match */
+       for (i = 0; i < opt1->num_mon; i++) {
+               if (ceph_monmap_contains(client->monc.monmap,
+                                &opt1->mon_addr[i]))
+                       return 0;
+       }
+       return -1;
+}
+EXPORT_SYMBOL(ceph_compare_options);
+
+
+static int parse_fsid(const char *str, struct ceph_fsid *fsid)
+{
+       int i = 0;
+       char tmp[3];
+       int err = -EINVAL;
+       int d;
+
+       dout("parse_fsid '%s'\n", str);
+       tmp[2] = 0;
+       while (*str && i < 16) {
+               if (ispunct(*str)) {
+                       str++;
+                       continue;
+               }
+               if (!isxdigit(str[0]) || !isxdigit(str[1]))
+                       break;
+               tmp[0] = str[0];
+               tmp[1] = str[1];
+               if (sscanf(tmp, "%x", &d) < 1)
+                       break;
+               fsid->fsid[i] = d & 0xff;
+               i++;
+               str += 2;
+       }
+
+       if (i == 16)
+               err = 0;
+       dout("parse_fsid ret %d got fsid %pU", err, fsid);
+       return err;
+}
+
+/*
+ * ceph options
+ */
+enum {
+       Opt_osdtimeout,
+       Opt_osdkeepalivetimeout,
+       Opt_mount_timeout,
+       Opt_osd_idle_ttl,
+       Opt_last_int,
+       /* int args above */
+       Opt_fsid,
+       Opt_name,
+       Opt_secret,
+       Opt_ip,
+       Opt_last_string,
+       /* string args above */
+       Opt_noshare,
+       Opt_nocrc,
+};
+
+static match_table_t opt_tokens = {
+       {Opt_osdtimeout, "osdtimeout=%d"},
+       {Opt_osdkeepalivetimeout, "osdkeepalive=%d"},
+       {Opt_mount_timeout, "mount_timeout=%d"},
+       {Opt_osd_idle_ttl, "osd_idle_ttl=%d"},
+       /* int args above */
+       {Opt_fsid, "fsid=%s"},
+       {Opt_name, "name=%s"},
+       {Opt_secret, "secret=%s"},
+       {Opt_ip, "ip=%s"},
+       /* string args above */
+       {Opt_noshare, "noshare"},
+       {Opt_nocrc, "nocrc"},
+       {-1, NULL}
+};
+
+void ceph_destroy_options(struct ceph_options *opt)
+{
+       dout("destroy_options %p\n", opt);
+       kfree(opt->name);
+       kfree(opt->secret);
+       kfree(opt);
+}
+EXPORT_SYMBOL(ceph_destroy_options);
+
+int ceph_parse_options(struct ceph_options **popt, char *options,
+                      const char *dev_name, const char *dev_name_end,
+                      int (*parse_extra_token)(char *c, void *private),
+                      void *private)
+{
+       struct ceph_options *opt;
+       const char *c;
+       int err = -ENOMEM;
+       substring_t argstr[MAX_OPT_ARGS];
+
+       opt = kzalloc(sizeof(*opt), GFP_KERNEL);
+       if (!opt)
+               return err;
+       opt->mon_addr = kcalloc(CEPH_MAX_MON, sizeof(*opt->mon_addr),
+                               GFP_KERNEL);
+       if (!opt->mon_addr)
+               goto out;
+
+       dout("parse_options %p options '%s' dev_name '%s'\n", opt, options,
+            dev_name);
+
+       /* start with defaults */
+       opt->flags = CEPH_OPT_DEFAULT;
+       opt->osd_timeout = CEPH_OSD_TIMEOUT_DEFAULT;
+       opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT;
+       opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; /* seconds */
+       opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT;   /* seconds */
+
+       /* get mon ip(s) */
+       /* ip1[:port1][,ip2[:port2]...] */
+       err = ceph_parse_ips(dev_name, dev_name_end, opt->mon_addr,
+                            CEPH_MAX_MON, &opt->num_mon);
+       if (err < 0)
+               goto out;
+
+       /* parse mount options */
+       while ((c = strsep(&options, ",")) != NULL) {
+               int token, intval, ret;
+               if (!*c)
+                       continue;
+               err = -EINVAL;
+               token = match_token((char *)c, opt_tokens, argstr);
+               if (token < 0 && parse_extra_token) {
+                       /* extra? */
+                       err = parse_extra_token((char *)c, private);
+                       if (err < 0) {
+                               pr_err("bad option at '%s'\n", c);
+                               goto out;
+                       }
+                       continue;
+               }
+               if (token < Opt_last_int) {
+                       ret = match_int(&argstr[0], &intval);
+                       if (ret < 0) {
+                               pr_err("bad mount option arg (not int) "
+                                      "at '%s'\n", c);
+                               continue;
+                       }
+                       dout("got int token %d val %d\n", token, intval);
+               } else if (token > Opt_last_int && token < Opt_last_string) {
+                       dout("got string token %d val %s\n", token,
+                            argstr[0].from);
+               } else {
+                       dout("got token %d\n", token);
+               }
+               switch (token) {
+               case Opt_ip:
+                       err = ceph_parse_ips(argstr[0].from,
+                                            argstr[0].to,
+                                            &opt->my_addr,
+                                            1, NULL);
+                       if (err < 0)
+                               goto out;
+                       opt->flags |= CEPH_OPT_MYIP;
+                       break;
+
+               case Opt_fsid:
+                       err = parse_fsid(argstr[0].from, &opt->fsid);
+                       if (err == 0)
+                               opt->flags |= CEPH_OPT_FSID;
+                       break;
+               case Opt_name:
+                       opt->name = kstrndup(argstr[0].from,
+                                             argstr[0].to-argstr[0].from,
+                                             GFP_KERNEL);
+                       break;
+               case Opt_secret:
+                       opt->secret = kstrndup(argstr[0].from,
+                                               argstr[0].to-argstr[0].from,
+                                               GFP_KERNEL);
+                       break;
+
+                       /* misc */
+               case Opt_osdtimeout:
+                       opt->osd_timeout = intval;
+                       break;
+               case Opt_osdkeepalivetimeout:
+                       opt->osd_keepalive_timeout = intval;
+                       break;
+               case Opt_osd_idle_ttl:
+                       opt->osd_idle_ttl = intval;
+                       break;
+               case Opt_mount_timeout:
+                       opt->mount_timeout = intval;
+                       break;
+
+               case Opt_noshare:
+                       opt->flags |= CEPH_OPT_NOSHARE;
+                       break;
+
+               case Opt_nocrc:
+                       opt->flags |= CEPH_OPT_NOCRC;
+                       break;
+
+               default:
+                       BUG_ON(token);
+               }
+       }
+
+       /* success */
+       *popt = opt;
+       return 0;
+
+out:
+       ceph_destroy_options(opt);
+       return err;
+}
+EXPORT_SYMBOL(ceph_parse_options);
+
+u64 ceph_client_id(struct ceph_client *client)
+{
+       return client->monc.auth->global_id;
+}
+EXPORT_SYMBOL(ceph_client_id);
+
+/*
+ * create a fresh client instance
+ */
+struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private)
+{
+       struct ceph_client *client;
+       int err = -ENOMEM;
+
+       client = kzalloc(sizeof(*client), GFP_KERNEL);
+       if (client == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       client->private = private;
+       client->options = opt;
+
+       mutex_init(&client->mount_mutex);
+       init_waitqueue_head(&client->auth_wq);
+       client->auth_err = 0;
+
+       client->extra_mon_dispatch = NULL;
+       client->supported_features = CEPH_FEATURE_SUPPORTED_DEFAULT;
+       client->required_features = CEPH_FEATURE_REQUIRED_DEFAULT;
+
+       client->msgr = NULL;
+
+       /* subsystems */
+       err = ceph_monc_init(&client->monc, client);
+       if (err < 0)
+               goto fail;
+       err = ceph_osdc_init(&client->osdc, client);
+       if (err < 0)
+               goto fail_monc;
+
+       return client;
+
+fail_monc:
+       ceph_monc_stop(&client->monc);
+fail:
+       kfree(client);
+       return ERR_PTR(err);
+}
+EXPORT_SYMBOL(ceph_create_client);
+
+void ceph_destroy_client(struct ceph_client *client)
+{
+       dout("destroy_client %p\n", client);
+
+       /* unmount */
+       ceph_osdc_stop(&client->osdc);
+
+       /*
+        * make sure mds and osd connections close out before destroying
+        * the auth module, which is needed to free those connections'
+        * ceph_authorizers.
+        */
+       ceph_msgr_flush();
+
+       ceph_monc_stop(&client->monc);
+
+       ceph_debugfs_client_cleanup(client);
+
+       if (client->msgr)
+               ceph_messenger_destroy(client->msgr);
+
+       ceph_destroy_options(client->options);
+
+       kfree(client);
+       dout("destroy_client %p done\n", client);
+}
+EXPORT_SYMBOL(ceph_destroy_client);
+
+/*
+ * true if we have the mon map (and have thus joined the cluster)
+ */
+static int have_mon_and_osd_map(struct ceph_client *client)
+{
+       return client->monc.monmap && client->monc.monmap->epoch &&
+              client->osdc.osdmap && client->osdc.osdmap->epoch;
+}
+
+/*
+ * mount: join the ceph cluster, and open root directory.
+ */
+int __ceph_open_session(struct ceph_client *client, unsigned long started)
+{
+       struct ceph_entity_addr *myaddr = NULL;
+       int err;
+       unsigned long timeout = client->options->mount_timeout * HZ;
+
+       /* initialize the messenger */
+       if (client->msgr == NULL) {
+               if (ceph_test_opt(client, MYIP))
+                       myaddr = &client->options->my_addr;
+               client->msgr = ceph_messenger_create(myaddr,
+                                       client->supported_features,
+                                       client->required_features);
+               if (IS_ERR(client->msgr)) {
+                       client->msgr = NULL;
+                       return PTR_ERR(client->msgr);
+               }
+               client->msgr->nocrc = ceph_test_opt(client, NOCRC);
+       }
+
+       /* open session, and wait for mon and osd maps */
+       err = ceph_monc_open_session(&client->monc);
+       if (err < 0)
+               return err;
+
+       while (!have_mon_and_osd_map(client)) {
+               err = -EIO;
+               if (timeout && time_after_eq(jiffies, started + timeout))
+                       return err;
+
+               /* wait */
+               dout("mount waiting for mon_map\n");
+               err = wait_event_interruptible_timeout(client->auth_wq,
+                       have_mon_and_osd_map(client) || (client->auth_err < 0),
+                       timeout);
+               if (err == -EINTR || err == -ERESTARTSYS)
+                       return err;
+               if (client->auth_err < 0)
+                       return client->auth_err;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(__ceph_open_session);
+
+
+int ceph_open_session(struct ceph_client *client)
+{
+       int ret;
+       unsigned long started = jiffies;  /* note the start time */
+
+       dout("open_session start\n");
+       mutex_lock(&client->mount_mutex);
+
+       ret = __ceph_open_session(client, started);
+
+       mutex_unlock(&client->mount_mutex);
+       return ret;
+}
+EXPORT_SYMBOL(ceph_open_session);
+
+
+static int __init init_ceph_lib(void)
+{
+       int ret = 0;
+
+       ret = ceph_debugfs_init();
+       if (ret < 0)
+               goto out;
+
+       ret = ceph_msgr_init();
+       if (ret < 0)
+               goto out_debugfs;
+
+       pr_info("loaded (mon/osd proto %d/%d, osdmap %d/%d %d/%d)\n",
+               CEPH_MONC_PROTOCOL, CEPH_OSDC_PROTOCOL,
+               CEPH_OSDMAP_VERSION, CEPH_OSDMAP_VERSION_EXT,
+               CEPH_OSDMAP_INC_VERSION, CEPH_OSDMAP_INC_VERSION_EXT);
+
+       return 0;
+
+out_debugfs:
+       ceph_debugfs_cleanup();
+out:
+       return ret;
+}
+
+static void __exit exit_ceph_lib(void)
+{
+       dout("exit_ceph_lib\n");
+       ceph_msgr_exit();
+       ceph_debugfs_cleanup();
+}
+
+module_init(init_ceph_lib);
+module_exit(exit_ceph_lib);
+
+MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
+MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
+MODULE_AUTHOR("Patience Warnick <patience@newdream.net>");
+MODULE_DESCRIPTION("Ceph filesystem for Linux");
+MODULE_LICENSE("GPL");
similarity index 92%
rename from fs/ceph/ceph_fs.c
rename to net/ceph/ceph_fs.c
index 3ac6cc7c1156dafd78262e7290edf914c3f01e31..a3a3a31d3c37b0006d0e9bfe26fd06a7ebe3b47d 100644 (file)
@@ -1,7 +1,8 @@
 /*
  * Some non-inline ceph helpers
  */
-#include "types.h"
+#include <linux/module.h>
+#include <linux/ceph/types.h>
 
 /*
  * return true if @layout appears to be valid
@@ -52,6 +53,7 @@ int ceph_flags_to_mode(int flags)
 
        return mode;
 }
+EXPORT_SYMBOL(ceph_flags_to_mode);
 
 int ceph_caps_for_mode(int mode)
 {
@@ -70,3 +72,4 @@ int ceph_caps_for_mode(int mode)
 
        return caps;
 }
+EXPORT_SYMBOL(ceph_caps_for_mode);
similarity index 98%
rename from fs/ceph/ceph_hash.c
rename to net/ceph/ceph_hash.c
index bd570015d147ca2b1ce52061d359a56bb936a4df..815ef8826796a82b6b80030830a1663e9421b417 100644 (file)
@@ -1,5 +1,5 @@
 
-#include "types.h"
+#include <linux/ceph/types.h>
 
 /*
  * Robert Jenkin's hash function.
diff --git a/net/ceph/ceph_strings.c b/net/ceph/ceph_strings.c
new file mode 100644 (file)
index 0000000..3fbda04
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * Ceph string constants
+ */
+#include <linux/module.h>
+#include <linux/ceph/types.h>
+
+const char *ceph_entity_type_name(int type)
+{
+       switch (type) {
+       case CEPH_ENTITY_TYPE_MDS: return "mds";
+       case CEPH_ENTITY_TYPE_OSD: return "osd";
+       case CEPH_ENTITY_TYPE_MON: return "mon";
+       case CEPH_ENTITY_TYPE_CLIENT: return "client";
+       case CEPH_ENTITY_TYPE_AUTH: return "auth";
+       default: return "unknown";
+       }
+}
+
+const char *ceph_osd_op_name(int op)
+{
+       switch (op) {
+       case CEPH_OSD_OP_READ: return "read";
+       case CEPH_OSD_OP_STAT: return "stat";
+
+       case CEPH_OSD_OP_MASKTRUNC: return "masktrunc";
+
+       case CEPH_OSD_OP_WRITE: return "write";
+       case CEPH_OSD_OP_DELETE: return "delete";
+       case CEPH_OSD_OP_TRUNCATE: return "truncate";
+       case CEPH_OSD_OP_ZERO: return "zero";
+       case CEPH_OSD_OP_WRITEFULL: return "writefull";
+       case CEPH_OSD_OP_ROLLBACK: return "rollback";
+
+       case CEPH_OSD_OP_APPEND: return "append";
+       case CEPH_OSD_OP_STARTSYNC: return "startsync";
+       case CEPH_OSD_OP_SETTRUNC: return "settrunc";
+       case CEPH_OSD_OP_TRIMTRUNC: return "trimtrunc";
+
+       case CEPH_OSD_OP_TMAPUP: return "tmapup";
+       case CEPH_OSD_OP_TMAPGET: return "tmapget";
+       case CEPH_OSD_OP_TMAPPUT: return "tmapput";
+
+       case CEPH_OSD_OP_GETXATTR: return "getxattr";
+       case CEPH_OSD_OP_GETXATTRS: return "getxattrs";
+       case CEPH_OSD_OP_SETXATTR: return "setxattr";
+       case CEPH_OSD_OP_SETXATTRS: return "setxattrs";
+       case CEPH_OSD_OP_RESETXATTRS: return "resetxattrs";
+       case CEPH_OSD_OP_RMXATTR: return "rmxattr";
+       case CEPH_OSD_OP_CMPXATTR: return "cmpxattr";
+
+       case CEPH_OSD_OP_PULL: return "pull";
+       case CEPH_OSD_OP_PUSH: return "push";
+       case CEPH_OSD_OP_BALANCEREADS: return "balance-reads";
+       case CEPH_OSD_OP_UNBALANCEREADS: return "unbalance-reads";
+       case CEPH_OSD_OP_SCRUB: return "scrub";
+
+       case CEPH_OSD_OP_WRLOCK: return "wrlock";
+       case CEPH_OSD_OP_WRUNLOCK: return "wrunlock";
+       case CEPH_OSD_OP_RDLOCK: return "rdlock";
+       case CEPH_OSD_OP_RDUNLOCK: return "rdunlock";
+       case CEPH_OSD_OP_UPLOCK: return "uplock";
+       case CEPH_OSD_OP_DNLOCK: return "dnlock";
+
+       case CEPH_OSD_OP_CALL: return "call";
+
+       case CEPH_OSD_OP_PGLS: return "pgls";
+       }
+       return "???";
+}
+
+
+const char *ceph_pool_op_name(int op)
+{
+       switch (op) {
+       case POOL_OP_CREATE: return "create";
+       case POOL_OP_DELETE: return "delete";
+       case POOL_OP_AUID_CHANGE: return "auid change";
+       case POOL_OP_CREATE_SNAP: return "create snap";
+       case POOL_OP_DELETE_SNAP: return "delete snap";
+       case POOL_OP_CREATE_UNMANAGED_SNAP: return "create unmanaged snap";
+       case POOL_OP_DELETE_UNMANAGED_SNAP: return "delete unmanaged snap";
+       }
+       return "???";
+}
similarity index 99%
rename from fs/ceph/crush/crush.c
rename to net/ceph/crush/crush.c
index fabd302e5779c0188b55916be81d7913dce84105..d6ebb13a18a4bc787eb249ad3e4b62ec1c174f97 100644 (file)
@@ -8,7 +8,7 @@
 # define BUG_ON(x) assert(!(x))
 #endif
 
-#include "crush.h"
+#include <linux/crush/crush.h>
 
 const char *crush_bucket_alg_name(int alg)
 {
similarity index 99%
rename from fs/ceph/crush/hash.c
rename to net/ceph/crush/hash.c
index 5873aed694bf5a06a092102a2209c5ad71a9aff8..5bb63e37a8a10f3419a399b32339cf5baf279762 100644 (file)
@@ -1,6 +1,6 @@
 
 #include <linux/types.h>
-#include "hash.h"
+#include <linux/crush/hash.h>
 
 /*
  * Robert Jenkins' function for mixing 32-bit values
similarity index 99%
rename from fs/ceph/crush/mapper.c
rename to net/ceph/crush/mapper.c
index a4eec133258e80a880a8cecca0282c54e11982ac..42599e31dcad8a6ceb1f3b29d9171f6a249e56a3 100644 (file)
@@ -18,8 +18,8 @@
 # define kfree(x) free(x)
 #endif
 
-#include "crush.h"
-#include "hash.h"
+#include <linux/crush/crush.h>
+#include <linux/crush/hash.h>
 
 /*
  * Implement the core CRUSH mapping algorithm.
similarity index 99%
rename from fs/ceph/crypto.c
rename to net/ceph/crypto.c
index a3e627f63293b2d329f620525328280a9ae15291..7b505b0c983f74eaffa51e306e38f2cd90aad3fb 100644 (file)
@@ -1,13 +1,13 @@
 
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
 
 #include <linux/err.h>
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
 #include <crypto/hash.h>
 
+#include <linux/ceph/decode.h>
 #include "crypto.h"
-#include "decode.h"
 
 int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
 {
similarity index 95%
rename from fs/ceph/crypto.h
rename to net/ceph/crypto.h
index bdf38607323c2da2eb87d2c3ebf7b3d158bd7d0d..f9eccace592b63b10a68c8f7d5d99abe2b2eba55 100644 (file)
@@ -1,8 +1,8 @@
 #ifndef _FS_CEPH_CRYPTO_H
 #define _FS_CEPH_CRYPTO_H
 
-#include "types.h"
-#include "buffer.h"
+#include <linux/ceph/types.h>
+#include <linux/ceph/buffer.h>
 
 /*
  * cryptographic secret
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c
new file mode 100644 (file)
index 0000000..27d4ea3
--- /dev/null
@@ -0,0 +1,267 @@
+#include <linux/ceph/ceph_debug.h>
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <linux/ceph/libceph.h>
+#include <linux/ceph/mon_client.h>
+#include <linux/ceph/auth.h>
+#include <linux/ceph/debugfs.h>
+
+#ifdef CONFIG_DEBUG_FS
+
+/*
+ * Implement /sys/kernel/debug/ceph fun
+ *
+ * /sys/kernel/debug/ceph/client*  - an instance of the ceph client
+ *      .../osdmap      - current osdmap
+ *      .../monmap      - current monmap
+ *      .../osdc        - active osd requests
+ *      .../monc        - mon client state
+ *      .../dentry_lru  - dump contents of dentry lru
+ *      .../caps        - expose cap (reservation) stats
+ *      .../bdi         - symlink to ../../bdi/something
+ */
+
+static struct dentry *ceph_debugfs_dir;
+
+static int monmap_show(struct seq_file *s, void *p)
+{
+       int i;
+       struct ceph_client *client = s->private;
+
+       if (client->monc.monmap == NULL)
+               return 0;
+
+       seq_printf(s, "epoch %d\n", client->monc.monmap->epoch);
+       for (i = 0; i < client->monc.monmap->num_mon; i++) {
+               struct ceph_entity_inst *inst =
+                       &client->monc.monmap->mon_inst[i];
+
+               seq_printf(s, "\t%s%lld\t%s\n",
+                          ENTITY_NAME(inst->name),
+                          ceph_pr_addr(&inst->addr.in_addr));
+       }
+       return 0;
+}
+
+static int osdmap_show(struct seq_file *s, void *p)
+{
+       int i;
+       struct ceph_client *client = s->private;
+       struct rb_node *n;
+
+       if (client->osdc.osdmap == NULL)
+               return 0;
+       seq_printf(s, "epoch %d\n", client->osdc.osdmap->epoch);
+       seq_printf(s, "flags%s%s\n",
+                  (client->osdc.osdmap->flags & CEPH_OSDMAP_NEARFULL) ?
+                  " NEARFULL" : "",
+                  (client->osdc.osdmap->flags & CEPH_OSDMAP_FULL) ?
+                  " FULL" : "");
+       for (n = rb_first(&client->osdc.osdmap->pg_pools); n; n = rb_next(n)) {
+               struct ceph_pg_pool_info *pool =
+                       rb_entry(n, struct ceph_pg_pool_info, node);
+               seq_printf(s, "pg_pool %d pg_num %d / %d, lpg_num %d / %d\n",
+                          pool->id, pool->v.pg_num, pool->pg_num_mask,
+                          pool->v.lpg_num, pool->lpg_num_mask);
+       }
+       for (i = 0; i < client->osdc.osdmap->max_osd; i++) {
+               struct ceph_entity_addr *addr =
+                       &client->osdc.osdmap->osd_addr[i];
+               int state = client->osdc.osdmap->osd_state[i];
+               char sb[64];
+
+               seq_printf(s, "\tosd%d\t%s\t%3d%%\t(%s)\n",
+                          i, ceph_pr_addr(&addr->in_addr),
+                          ((client->osdc.osdmap->osd_weight[i]*100) >> 16),
+                          ceph_osdmap_state_str(sb, sizeof(sb), state));
+       }
+       return 0;
+}
+
+static int monc_show(struct seq_file *s, void *p)
+{
+       struct ceph_client *client = s->private;
+       struct ceph_mon_generic_request *req;
+       struct ceph_mon_client *monc = &client->monc;
+       struct rb_node *rp;
+
+       mutex_lock(&monc->mutex);
+
+       if (monc->have_mdsmap)
+               seq_printf(s, "have mdsmap %u\n", (unsigned)monc->have_mdsmap);
+       if (monc->have_osdmap)
+               seq_printf(s, "have osdmap %u\n", (unsigned)monc->have_osdmap);
+       if (monc->want_next_osdmap)
+               seq_printf(s, "want next osdmap\n");
+
+       for (rp = rb_first(&monc->generic_request_tree); rp; rp = rb_next(rp)) {
+               __u16 op;
+               req = rb_entry(rp, struct ceph_mon_generic_request, node);
+               op = le16_to_cpu(req->request->hdr.type);
+               if (op == CEPH_MSG_STATFS)
+                       seq_printf(s, "%lld statfs\n", req->tid);
+               else
+                       seq_printf(s, "%lld unknown\n", req->tid);
+       }
+
+       mutex_unlock(&monc->mutex);
+       return 0;
+}
+
+static int osdc_show(struct seq_file *s, void *pp)
+{
+       struct ceph_client *client = s->private;
+       struct ceph_osd_client *osdc = &client->osdc;
+       struct rb_node *p;
+
+       mutex_lock(&osdc->request_mutex);
+       for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
+               struct ceph_osd_request *req;
+               struct ceph_osd_request_head *head;
+               struct ceph_osd_op *op;
+               int num_ops;
+               int opcode, olen;
+               int i;
+
+               req = rb_entry(p, struct ceph_osd_request, r_node);
+
+               seq_printf(s, "%lld\tosd%d\t%d.%x\t", req->r_tid,
+                          req->r_osd ? req->r_osd->o_osd : -1,
+                          le32_to_cpu(req->r_pgid.pool),
+                          le16_to_cpu(req->r_pgid.ps));
+
+               head = req->r_request->front.iov_base;
+               op = (void *)(head + 1);
+
+               num_ops = le16_to_cpu(head->num_ops);
+               olen = le32_to_cpu(head->object_len);
+               seq_printf(s, "%.*s", olen,
+                          (const char *)(head->ops + num_ops));
+
+               if (req->r_reassert_version.epoch)
+                       seq_printf(s, "\t%u'%llu",
+                          (unsigned)le32_to_cpu(req->r_reassert_version.epoch),
+                          le64_to_cpu(req->r_reassert_version.version));
+               else
+                       seq_printf(s, "\t");
+
+               for (i = 0; i < num_ops; i++) {
+                       opcode = le16_to_cpu(op->op);
+                       seq_printf(s, "\t%s", ceph_osd_op_name(opcode));
+                       op++;
+               }
+
+               seq_printf(s, "\n");
+       }
+       mutex_unlock(&osdc->request_mutex);
+       return 0;
+}
+
+CEPH_DEFINE_SHOW_FUNC(monmap_show)
+CEPH_DEFINE_SHOW_FUNC(osdmap_show)
+CEPH_DEFINE_SHOW_FUNC(monc_show)
+CEPH_DEFINE_SHOW_FUNC(osdc_show)
+
+int ceph_debugfs_init(void)
+{
+       ceph_debugfs_dir = debugfs_create_dir("ceph", NULL);
+       if (!ceph_debugfs_dir)
+               return -ENOMEM;
+       return 0;
+}
+
+void ceph_debugfs_cleanup(void)
+{
+       debugfs_remove(ceph_debugfs_dir);
+}
+
+int ceph_debugfs_client_init(struct ceph_client *client)
+{
+       int ret = -ENOMEM;
+       char name[80];
+
+       snprintf(name, sizeof(name), "%pU.client%lld", &client->fsid,
+                client->monc.auth->global_id);
+
+       client->debugfs_dir = debugfs_create_dir(name, ceph_debugfs_dir);
+       if (!client->debugfs_dir)
+               goto out;
+
+       client->monc.debugfs_file = debugfs_create_file("monc",
+                                                     0600,
+                                                     client->debugfs_dir,
+                                                     client,
+                                                     &monc_show_fops);
+       if (!client->monc.debugfs_file)
+               goto out;
+
+       client->osdc.debugfs_file = debugfs_create_file("osdc",
+                                                     0600,
+                                                     client->debugfs_dir,
+                                                     client,
+                                                     &osdc_show_fops);
+       if (!client->osdc.debugfs_file)
+               goto out;
+
+       client->debugfs_monmap = debugfs_create_file("monmap",
+                                       0600,
+                                       client->debugfs_dir,
+                                       client,
+                                       &monmap_show_fops);
+       if (!client->debugfs_monmap)
+               goto out;
+
+       client->debugfs_osdmap = debugfs_create_file("osdmap",
+                                       0600,
+                                       client->debugfs_dir,
+                                       client,
+                                       &osdmap_show_fops);
+       if (!client->debugfs_osdmap)
+               goto out;
+
+       return 0;
+
+out:
+       ceph_debugfs_client_cleanup(client);
+       return ret;
+}
+
+void ceph_debugfs_client_cleanup(struct ceph_client *client)
+{
+       debugfs_remove(client->debugfs_osdmap);
+       debugfs_remove(client->debugfs_monmap);
+       debugfs_remove(client->osdc.debugfs_file);
+       debugfs_remove(client->monc.debugfs_file);
+       debugfs_remove(client->debugfs_dir);
+}
+
+#else  /* CONFIG_DEBUG_FS */
+
+int ceph_debugfs_init(void)
+{
+       return 0;
+}
+
+void ceph_debugfs_cleanup(void)
+{
+}
+
+int ceph_debugfs_client_init(struct ceph_client *client)
+{
+       return 0;
+}
+
+void ceph_debugfs_client_cleanup(struct ceph_client *client)
+{
+}
+
+#endif  /* CONFIG_DEBUG_FS */
+
+EXPORT_SYMBOL(ceph_debugfs_init);
+EXPORT_SYMBOL(ceph_debugfs_cleanup);
similarity index 89%
rename from fs/ceph/messenger.c
rename to net/ceph/messenger.c
index 2502d76fcec175ddb8500bcd1863c52add1208e5..0e8157ee5d4382a32992f34c2b1cf0d78f2868b4 100644 (file)
@@ -1,4 +1,4 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
 
 #include <linux/crc32c.h>
 #include <linux/ctype.h>
@@ -9,12 +9,14 @@
 #include <linux/slab.h>
 #include <linux/socket.h>
 #include <linux/string.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
 #include <net/tcp.h>
 
-#include "super.h"
-#include "messenger.h"
-#include "decode.h"
-#include "pagelist.h"
+#include <linux/ceph/libceph.h>
+#include <linux/ceph/messenger.h>
+#include <linux/ceph/decode.h>
+#include <linux/ceph/pagelist.h>
 
 /*
  * Ceph uses the messenger to exchange ceph_msg messages with other
@@ -48,7 +50,7 @@ static char addr_str[MAX_ADDR_STR][MAX_ADDR_STR_LEN];
 static DEFINE_SPINLOCK(addr_str_lock);
 static int last_addr_str;
 
-const char *pr_addr(const struct sockaddr_storage *ss)
+const char *ceph_pr_addr(const struct sockaddr_storage *ss)
 {
        int i;
        char *s;
@@ -79,6 +81,7 @@ const char *pr_addr(const struct sockaddr_storage *ss)
 
        return s;
 }
+EXPORT_SYMBOL(ceph_pr_addr);
 
 static void encode_my_addr(struct ceph_messenger *msgr)
 {
@@ -91,7 +94,7 @@ static void encode_my_addr(struct ceph_messenger *msgr)
  */
 struct workqueue_struct *ceph_msgr_wq;
 
-int __init ceph_msgr_init(void)
+int ceph_msgr_init(void)
 {
        ceph_msgr_wq = create_workqueue("ceph-msgr");
        if (IS_ERR(ceph_msgr_wq)) {
@@ -102,16 +105,19 @@ int __init ceph_msgr_init(void)
        }
        return 0;
 }
+EXPORT_SYMBOL(ceph_msgr_init);
 
 void ceph_msgr_exit(void)
 {
        destroy_workqueue(ceph_msgr_wq);
 }
+EXPORT_SYMBOL(ceph_msgr_exit);
 
 void ceph_msgr_flush(void)
 {
        flush_workqueue(ceph_msgr_wq);
 }
+EXPORT_SYMBOL(ceph_msgr_flush);
 
 
 /*
@@ -221,19 +227,19 @@ static struct socket *ceph_tcp_connect(struct ceph_connection *con)
 
        set_sock_callbacks(sock, con);
 
-       dout("connect %s\n", pr_addr(&con->peer_addr.in_addr));
+       dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr));
 
        ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr),
                                 O_NONBLOCK);
        if (ret == -EINPROGRESS) {
                dout("connect %s EINPROGRESS sk_state = %u\n",
-                    pr_addr(&con->peer_addr.in_addr),
+                    ceph_pr_addr(&con->peer_addr.in_addr),
                     sock->sk->sk_state);
                ret = 0;
        }
        if (ret < 0) {
                pr_err("connect %s error %d\n",
-                      pr_addr(&con->peer_addr.in_addr), ret);
+                      ceph_pr_addr(&con->peer_addr.in_addr), ret);
                sock_release(sock);
                con->sock = NULL;
                con->error_msg = "connect error";
@@ -334,7 +340,8 @@ static void reset_connection(struct ceph_connection *con)
  */
 void ceph_con_close(struct ceph_connection *con)
 {
-       dout("con_close %p peer %s\n", con, pr_addr(&con->peer_addr.in_addr));
+       dout("con_close %p peer %s\n", con,
+            ceph_pr_addr(&con->peer_addr.in_addr));
        set_bit(CLOSED, &con->state);  /* in case there's queued work */
        clear_bit(STANDBY, &con->state);  /* avoid connect_seq bump */
        clear_bit(LOSSYTX, &con->state);  /* so we retry next connect */
@@ -347,19 +354,21 @@ void ceph_con_close(struct ceph_connection *con)
        mutex_unlock(&con->mutex);
        queue_con(con);
 }
+EXPORT_SYMBOL(ceph_con_close);
 
 /*
  * Reopen a closed connection, with a new peer address.
  */
 void ceph_con_open(struct ceph_connection *con, struct ceph_entity_addr *addr)
 {
-       dout("con_open %p %s\n", con, pr_addr(&addr->in_addr));
+       dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
        set_bit(OPENING, &con->state);
        clear_bit(CLOSED, &con->state);
        memcpy(&con->peer_addr, addr, sizeof(*addr));
        con->delay = 0;      /* reset backoff memory */
        queue_con(con);
 }
+EXPORT_SYMBOL(ceph_con_open);
 
 /*
  * return true if this connection ever successfully opened
@@ -406,6 +415,7 @@ void ceph_con_init(struct ceph_messenger *msgr, struct ceph_connection *con)
        INIT_LIST_HEAD(&con->out_sent);
        INIT_DELAYED_WORK(&con->work, con_work);
 }
+EXPORT_SYMBOL(ceph_con_init);
 
 
 /*
@@ -529,8 +539,11 @@ static void prepare_write_message(struct ceph_connection *con)
        if (le32_to_cpu(m->hdr.data_len) > 0) {
                /* initialize page iterator */
                con->out_msg_pos.page = 0;
-               con->out_msg_pos.page_pos =
-                       le16_to_cpu(m->hdr.data_off) & ~PAGE_MASK;
+               if (m->pages)
+                       con->out_msg_pos.page_pos =
+                               le16_to_cpu(m->hdr.data_off) & ~PAGE_MASK;
+               else
+                       con->out_msg_pos.page_pos = 0;
                con->out_msg_pos.data_pos = 0;
                con->out_msg_pos.did_page_crc = 0;
                con->out_more = 1;  /* data + footer will follow */
@@ -647,7 +660,7 @@ static void prepare_write_connect(struct ceph_messenger *msgr,
        dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
             con->connect_seq, global_seq, proto);
 
-       con->out_connect.features = cpu_to_le64(CEPH_FEATURE_SUPPORTED);
+       con->out_connect.features = cpu_to_le64(msgr->supported_features);
        con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
        con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
        con->out_connect.global_seq = cpu_to_le32(global_seq);
@@ -712,6 +725,31 @@ out:
        return ret;  /* done! */
 }
 
+#ifdef CONFIG_BLOCK
+static void init_bio_iter(struct bio *bio, struct bio **iter, int *seg)
+{
+       if (!bio) {
+               *iter = NULL;
+               *seg = 0;
+               return;
+       }
+       *iter = bio;
+       *seg = bio->bi_idx;
+}
+
+static void iter_bio_next(struct bio **bio_iter, int *seg)
+{
+       if (*bio_iter == NULL)
+               return;
+
+       BUG_ON(*seg >= (*bio_iter)->bi_vcnt);
+
+       (*seg)++;
+       if (*seg == (*bio_iter)->bi_vcnt)
+               init_bio_iter((*bio_iter)->bi_next, bio_iter, seg);
+}
+#endif
+
 /*
  * Write as much message data payload as we can.  If we finish, queue
  * up the footer.
@@ -726,21 +764,46 @@ static int write_partial_msg_pages(struct ceph_connection *con)
        size_t len;
        int crc = con->msgr->nocrc;
        int ret;
+       int total_max_write;
+       int in_trail = 0;
+       size_t trail_len = (msg->trail ? msg->trail->length : 0);
 
        dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n",
             con, con->out_msg, con->out_msg_pos.page, con->out_msg->nr_pages,
             con->out_msg_pos.page_pos);
 
-       while (con->out_msg_pos.page < con->out_msg->nr_pages) {
+#ifdef CONFIG_BLOCK
+       if (msg->bio && !msg->bio_iter)
+               init_bio_iter(msg->bio, &msg->bio_iter, &msg->bio_seg);
+#endif
+
+       while (data_len > con->out_msg_pos.data_pos) {
                struct page *page = NULL;
                void *kaddr = NULL;
+               int max_write = PAGE_SIZE;
+               int page_shift = 0;
+
+               total_max_write = data_len - trail_len -
+                       con->out_msg_pos.data_pos;
 
                /*
                 * if we are calculating the data crc (the default), we need
                 * to map the page.  if our pages[] has been revoked, use the
                 * zero page.
                 */
-               if (msg->pages) {
+
+               /* have we reached the trail part of the data? */
+               if (con->out_msg_pos.data_pos >= data_len - trail_len) {
+                       in_trail = 1;
+
+                       total_max_write = data_len - con->out_msg_pos.data_pos;
+
+                       page = list_first_entry(&msg->trail->head,
+                                               struct page, lru);
+                       if (crc)
+                               kaddr = kmap(page);
+                       max_write = PAGE_SIZE;
+               } else if (msg->pages) {
                        page = msg->pages[con->out_msg_pos.page];
                        if (crc)
                                kaddr = kmap(page);
@@ -749,13 +812,25 @@ static int write_partial_msg_pages(struct ceph_connection *con)
                                                struct page, lru);
                        if (crc)
                                kaddr = kmap(page);
+#ifdef CONFIG_BLOCK
+               } else if (msg->bio) {
+                       struct bio_vec *bv;
+
+                       bv = bio_iovec_idx(msg->bio_iter, msg->bio_seg);
+                       page = bv->bv_page;
+                       page_shift = bv->bv_offset;
+                       if (crc)
+                               kaddr = kmap(page) + page_shift;
+                       max_write = bv->bv_len;
+#endif
                } else {
                        page = con->msgr->zero_page;
                        if (crc)
                                kaddr = page_address(con->msgr->zero_page);
                }
-               len = min((int)(PAGE_SIZE - con->out_msg_pos.page_pos),
-                         (int)(data_len - con->out_msg_pos.data_pos));
+               len = min_t(int, max_write - con->out_msg_pos.page_pos,
+                           total_max_write);
+
                if (crc && !con->out_msg_pos.did_page_crc) {
                        void *base = kaddr + con->out_msg_pos.page_pos;
                        u32 tmpcrc = le32_to_cpu(con->out_msg->footer.data_crc);
@@ -765,13 +840,14 @@ static int write_partial_msg_pages(struct ceph_connection *con)
                                cpu_to_le32(crc32c(tmpcrc, base, len));
                        con->out_msg_pos.did_page_crc = 1;
                }
-
                ret = kernel_sendpage(con->sock, page,
-                                     con->out_msg_pos.page_pos, len,
+                                     con->out_msg_pos.page_pos + page_shift,
+                                     len,
                                      MSG_DONTWAIT | MSG_NOSIGNAL |
                                      MSG_MORE);
 
-               if (crc && (msg->pages || msg->pagelist))
+               if (crc &&
+                   (msg->pages || msg->pagelist || msg->bio || in_trail))
                        kunmap(page);
 
                if (ret <= 0)
@@ -783,9 +859,16 @@ static int write_partial_msg_pages(struct ceph_connection *con)
                        con->out_msg_pos.page_pos = 0;
                        con->out_msg_pos.page++;
                        con->out_msg_pos.did_page_crc = 0;
-                       if (msg->pagelist)
+                       if (in_trail)
+                               list_move_tail(&page->lru,
+                                              &msg->trail->head);
+                       else if (msg->pagelist)
                                list_move_tail(&page->lru,
                                               &msg->pagelist->head);
+#ifdef CONFIG_BLOCK
+                       else if (msg->bio)
+                               iter_bio_next(&msg->bio_iter, &msg->bio_seg);
+#endif
                }
        }
 
@@ -938,7 +1021,7 @@ static int verify_hello(struct ceph_connection *con)
 {
        if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) {
                pr_err("connect to %s got bad banner\n",
-                      pr_addr(&con->peer_addr.in_addr));
+                      ceph_pr_addr(&con->peer_addr.in_addr));
                con->error_msg = "protocol error, bad banner";
                return -1;
        }
@@ -1041,7 +1124,7 @@ int ceph_parse_ips(const char *c, const char *end,
 
                addr_set_port(ss, port);
 
-               dout("parse_ips got %s\n", pr_addr(ss));
+               dout("parse_ips got %s\n", ceph_pr_addr(ss));
 
                if (p == end)
                        break;
@@ -1061,6 +1144,7 @@ bad:
        pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c);
        return -EINVAL;
 }
+EXPORT_SYMBOL(ceph_parse_ips);
 
 static int process_banner(struct ceph_connection *con)
 {
@@ -1082,9 +1166,9 @@ static int process_banner(struct ceph_connection *con)
            !(addr_is_blank(&con->actual_peer_addr.in_addr) &&
              con->actual_peer_addr.nonce == con->peer_addr.nonce)) {
                pr_warning("wrong peer, want %s/%d, got %s/%d\n",
-                          pr_addr(&con->peer_addr.in_addr),
+                          ceph_pr_addr(&con->peer_addr.in_addr),
                           (int)le32_to_cpu(con->peer_addr.nonce),
-                          pr_addr(&con->actual_peer_addr.in_addr),
+                          ceph_pr_addr(&con->actual_peer_addr.in_addr),
                           (int)le32_to_cpu(con->actual_peer_addr.nonce));
                con->error_msg = "wrong peer at address";
                return -1;
@@ -1102,7 +1186,7 @@ static int process_banner(struct ceph_connection *con)
                addr_set_port(&con->msgr->inst.addr.in_addr, port);
                encode_my_addr(con->msgr);
                dout("process_banner learned my addr is %s\n",
-                    pr_addr(&con->msgr->inst.addr.in_addr));
+                    ceph_pr_addr(&con->msgr->inst.addr.in_addr));
        }
 
        set_bit(NEGOTIATING, &con->state);
@@ -1123,8 +1207,8 @@ static void fail_protocol(struct ceph_connection *con)
 
 static int process_connect(struct ceph_connection *con)
 {
-       u64 sup_feat = CEPH_FEATURE_SUPPORTED;
-       u64 req_feat = CEPH_FEATURE_REQUIRED;
+       u64 sup_feat = con->msgr->supported_features;
+       u64 req_feat = con->msgr->required_features;
        u64 server_feat = le64_to_cpu(con->in_reply.features);
 
        dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
@@ -1134,7 +1218,7 @@ static int process_connect(struct ceph_connection *con)
                pr_err("%s%lld %s feature set mismatch,"
                       " my %llx < server's %llx, missing %llx\n",
                       ENTITY_NAME(con->peer_name),
-                      pr_addr(&con->peer_addr.in_addr),
+                      ceph_pr_addr(&con->peer_addr.in_addr),
                       sup_feat, server_feat, server_feat & ~sup_feat);
                con->error_msg = "missing required protocol features";
                fail_protocol(con);
@@ -1144,7 +1228,7 @@ static int process_connect(struct ceph_connection *con)
                pr_err("%s%lld %s protocol version mismatch,"
                       " my %d != server's %d\n",
                       ENTITY_NAME(con->peer_name),
-                      pr_addr(&con->peer_addr.in_addr),
+                      ceph_pr_addr(&con->peer_addr.in_addr),
                       le32_to_cpu(con->out_connect.protocol_version),
                       le32_to_cpu(con->in_reply.protocol_version));
                con->error_msg = "protocol version mismatch";
@@ -1178,7 +1262,7 @@ static int process_connect(struct ceph_connection *con)
                     le32_to_cpu(con->in_connect.connect_seq));
                pr_err("%s%lld %s connection reset\n",
                       ENTITY_NAME(con->peer_name),
-                      pr_addr(&con->peer_addr.in_addr));
+                      ceph_pr_addr(&con->peer_addr.in_addr));
                reset_connection(con);
                prepare_write_connect(con->msgr, con, 0);
                prepare_read_connect(con);
@@ -1223,7 +1307,7 @@ static int process_connect(struct ceph_connection *con)
                        pr_err("%s%lld %s protocol feature mismatch,"
                               " my required %llx > server's %llx, need %llx\n",
                               ENTITY_NAME(con->peer_name),
-                              pr_addr(&con->peer_addr.in_addr),
+                              ceph_pr_addr(&con->peer_addr.in_addr),
                               req_feat, server_feat, req_feat & ~server_feat);
                        con->error_msg = "missing required protocol features";
                        fail_protocol(con);
@@ -1305,8 +1389,7 @@ static int read_partial_message_section(struct ceph_connection *con,
                                        struct kvec *section,
                                        unsigned int sec_len, u32 *crc)
 {
-       int left;
-       int ret;
+       int ret, left;
 
        BUG_ON(!section);
 
@@ -1329,13 +1412,83 @@ static int read_partial_message_section(struct ceph_connection *con,
 static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
                                struct ceph_msg_header *hdr,
                                int *skip);
+
+
+static int read_partial_message_pages(struct ceph_connection *con,
+                                     struct page **pages,
+                                     unsigned data_len, int datacrc)
+{
+       void *p;
+       int ret;
+       int left;
+
+       left = min((int)(data_len - con->in_msg_pos.data_pos),
+                  (int)(PAGE_SIZE - con->in_msg_pos.page_pos));
+       /* (page) data */
+       BUG_ON(pages == NULL);
+       p = kmap(pages[con->in_msg_pos.page]);
+       ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
+                              left);
+       if (ret > 0 && datacrc)
+               con->in_data_crc =
+                       crc32c(con->in_data_crc,
+                                 p + con->in_msg_pos.page_pos, ret);
+       kunmap(pages[con->in_msg_pos.page]);
+       if (ret <= 0)
+               return ret;
+       con->in_msg_pos.data_pos += ret;
+       con->in_msg_pos.page_pos += ret;
+       if (con->in_msg_pos.page_pos == PAGE_SIZE) {
+               con->in_msg_pos.page_pos = 0;
+               con->in_msg_pos.page++;
+       }
+
+       return ret;
+}
+
+#ifdef CONFIG_BLOCK
+static int read_partial_message_bio(struct ceph_connection *con,
+                                   struct bio **bio_iter, int *bio_seg,
+                                   unsigned data_len, int datacrc)
+{
+       struct bio_vec *bv = bio_iovec_idx(*bio_iter, *bio_seg);
+       void *p;
+       int ret, left;
+
+       if (IS_ERR(bv))
+               return PTR_ERR(bv);
+
+       left = min((int)(data_len - con->in_msg_pos.data_pos),
+                  (int)(bv->bv_len - con->in_msg_pos.page_pos));
+
+       p = kmap(bv->bv_page) + bv->bv_offset;
+
+       ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
+                              left);
+       if (ret > 0 && datacrc)
+               con->in_data_crc =
+                       crc32c(con->in_data_crc,
+                                 p + con->in_msg_pos.page_pos, ret);
+       kunmap(bv->bv_page);
+       if (ret <= 0)
+               return ret;
+       con->in_msg_pos.data_pos += ret;
+       con->in_msg_pos.page_pos += ret;
+       if (con->in_msg_pos.page_pos == bv->bv_len) {
+               con->in_msg_pos.page_pos = 0;
+               iter_bio_next(bio_iter, bio_seg);
+       }
+
+       return ret;
+}
+#endif
+
 /*
  * read (part of) a message.
  */
 static int read_partial_message(struct ceph_connection *con)
 {
        struct ceph_msg *m = con->in_msg;
-       void *p;
        int ret;
        int to, left;
        unsigned front_len, middle_len, data_len, data_off;
@@ -1381,7 +1534,7 @@ static int read_partial_message(struct ceph_connection *con)
        if ((s64)seq - (s64)con->in_seq < 1) {
                pr_info("skipping %s%lld %s seq %lld, expected %lld\n",
                        ENTITY_NAME(con->peer_name),
-                       pr_addr(&con->peer_addr.in_addr),
+                       ceph_pr_addr(&con->peer_addr.in_addr),
                        seq, con->in_seq + 1);
                con->in_base_pos = -front_len - middle_len - data_len -
                        sizeof(m->footer);
@@ -1422,7 +1575,10 @@ static int read_partial_message(struct ceph_connection *con)
                        m->middle->vec.iov_len = 0;
 
                con->in_msg_pos.page = 0;
-               con->in_msg_pos.page_pos = data_off & ~PAGE_MASK;
+               if (m->pages)
+                       con->in_msg_pos.page_pos = data_off & ~PAGE_MASK;
+               else
+                       con->in_msg_pos.page_pos = 0;
                con->in_msg_pos.data_pos = 0;
        }
 
@@ -1440,27 +1596,29 @@ static int read_partial_message(struct ceph_connection *con)
                if (ret <= 0)
                        return ret;
        }
+#ifdef CONFIG_BLOCK
+       if (m->bio && !m->bio_iter)
+               init_bio_iter(m->bio, &m->bio_iter, &m->bio_seg);
+#endif
 
        /* (page) data */
        while (con->in_msg_pos.data_pos < data_len) {
-               left = min((int)(data_len - con->in_msg_pos.data_pos),
-                          (int)(PAGE_SIZE - con->in_msg_pos.page_pos));
-               BUG_ON(m->pages == NULL);
-               p = kmap(m->pages[con->in_msg_pos.page]);
-               ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
-                                      left);
-               if (ret > 0 && datacrc)
-                       con->in_data_crc =
-                               crc32c(con->in_data_crc,
-                                         p + con->in_msg_pos.page_pos, ret);
-               kunmap(m->pages[con->in_msg_pos.page]);
-               if (ret <= 0)
-                       return ret;
-               con->in_msg_pos.data_pos += ret;
-               con->in_msg_pos.page_pos += ret;
-               if (con->in_msg_pos.page_pos == PAGE_SIZE) {
-                       con->in_msg_pos.page_pos = 0;
-                       con->in_msg_pos.page++;
+               if (m->pages) {
+                       ret = read_partial_message_pages(con, m->pages,
+                                                data_len, datacrc);
+                       if (ret <= 0)
+                               return ret;
+#ifdef CONFIG_BLOCK
+               } else if (m->bio) {
+
+                       ret = read_partial_message_bio(con,
+                                                &m->bio_iter, &m->bio_seg,
+                                                data_len, datacrc);
+                       if (ret <= 0)
+                               return ret;
+#endif
+               } else {
+                       BUG_ON(1);
                }
        }
 
@@ -1874,9 +2032,9 @@ out:
 static void ceph_fault(struct ceph_connection *con)
 {
        pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
-              pr_addr(&con->peer_addr.in_addr), con->error_msg);
+              ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
        dout("fault %p state %lu to peer %s\n",
-            con, con->state, pr_addr(&con->peer_addr.in_addr));
+            con, con->state, ceph_pr_addr(&con->peer_addr.in_addr));
 
        if (test_bit(LOSSYTX, &con->state)) {
                dout("fault on LOSSYTX channel\n");
@@ -1936,7 +2094,9 @@ out:
 /*
  * create a new messenger instance
  */
-struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr)
+struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr,
+                                            u32 supported_features,
+                                            u32 required_features)
 {
        struct ceph_messenger *msgr;
 
@@ -1944,6 +2104,9 @@ struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr)
        if (msgr == NULL)
                return ERR_PTR(-ENOMEM);
 
+       msgr->supported_features = supported_features;
+       msgr->required_features = required_features;
+
        spin_lock_init(&msgr->global_seq_lock);
 
        /* the zero page is needed if a request is "canceled" while the message
@@ -1966,6 +2129,7 @@ struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr)
        dout("messenger_create %p\n", msgr);
        return msgr;
 }
+EXPORT_SYMBOL(ceph_messenger_create);
 
 void ceph_messenger_destroy(struct ceph_messenger *msgr)
 {
@@ -1975,6 +2139,7 @@ void ceph_messenger_destroy(struct ceph_messenger *msgr)
        kfree(msgr);
        dout("destroyed messenger %p\n", msgr);
 }
+EXPORT_SYMBOL(ceph_messenger_destroy);
 
 /*
  * Queue up an outgoing message on the given connection.
@@ -2011,6 +2176,7 @@ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
        if (test_and_set_bit(WRITE_PENDING, &con->state) == 0)
                queue_con(con);
 }
+EXPORT_SYMBOL(ceph_con_send);
 
 /*
  * Revoke a message that was previously queued for send
@@ -2076,6 +2242,7 @@ void ceph_con_keepalive(struct ceph_connection *con)
            test_and_set_bit(WRITE_PENDING, &con->state) == 0)
                queue_con(con);
 }
+EXPORT_SYMBOL(ceph_con_keepalive);
 
 
 /*
@@ -2136,6 +2303,10 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags)
        m->nr_pages = 0;
        m->pages = NULL;
        m->pagelist = NULL;
+       m->bio = NULL;
+       m->bio_iter = NULL;
+       m->bio_seg = 0;
+       m->trail = NULL;
 
        dout("ceph_msg_new %p front %d\n", m, front_len);
        return m;
@@ -2146,6 +2317,7 @@ out:
        pr_err("msg_new can't create type %d front %d\n", type, front_len);
        return NULL;
 }
+EXPORT_SYMBOL(ceph_msg_new);
 
 /*
  * Allocate "middle" portion of a message, if it is needed and wasn't
@@ -2250,11 +2422,14 @@ void ceph_msg_last_put(struct kref *kref)
                m->pagelist = NULL;
        }
 
+       m->trail = NULL;
+
        if (m->pool)
                ceph_msgpool_put(m->pool, m);
        else
                ceph_msg_kfree(m);
 }
+EXPORT_SYMBOL(ceph_msg_last_put);
 
 void ceph_msg_dump(struct ceph_msg *msg)
 {
@@ -2275,3 +2450,4 @@ void ceph_msg_dump(struct ceph_msg *msg)
                       DUMP_PREFIX_OFFSET, 16, 1,
                       &msg->footer, sizeof(msg->footer), true);
 }
+EXPORT_SYMBOL(ceph_msg_dump);
similarity index 94%
rename from fs/ceph/mon_client.c
rename to net/ceph/mon_client.c
index b2a5a3e4a671c336fc7495d473792ddc25f71110..8a079399174a27fc0c86e50e0891318cbb33cf97 100644 (file)
@@ -1,14 +1,16 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
 
+#include <linux/module.h>
 #include <linux/types.h>
 #include <linux/slab.h>
 #include <linux/random.h>
 #include <linux/sched.h>
 
-#include "mon_client.h"
-#include "super.h"
-#include "auth.h"
-#include "decode.h"
+#include <linux/ceph/mon_client.h>
+#include <linux/ceph/libceph.h>
+#include <linux/ceph/decode.h>
+
+#include <linux/ceph/auth.h>
 
 /*
  * Interact with Ceph monitor cluster.  Handle requests for new map
@@ -74,7 +76,7 @@ struct ceph_monmap *ceph_monmap_decode(void *p, void *end)
             m->num_mon);
        for (i = 0; i < m->num_mon; i++)
                dout("monmap_decode  mon%d is %s\n", i,
-                    pr_addr(&m->mon_inst[i].addr.in_addr));
+                    ceph_pr_addr(&m->mon_inst[i].addr.in_addr));
        return m;
 
 bad:
@@ -191,30 +193,33 @@ static void __send_subscribe(struct ceph_mon_client *monc)
                struct ceph_msg *msg = monc->m_subscribe;
                struct ceph_mon_subscribe_item *i;
                void *p, *end;
+               int num;
 
                p = msg->front.iov_base;
                end = p + msg->front_max;
 
-               dout("__send_subscribe to 'mdsmap' %u+\n",
-                    (unsigned)monc->have_mdsmap);
+               num = 1 + !!monc->want_next_osdmap + !!monc->want_mdsmap;
+               ceph_encode_32(&p, num);
+
                if (monc->want_next_osdmap) {
                        dout("__send_subscribe to 'osdmap' %u\n",
                             (unsigned)monc->have_osdmap);
-                       ceph_encode_32(&p, 3);
                        ceph_encode_string(&p, end, "osdmap", 6);
                        i = p;
                        i->have = cpu_to_le64(monc->have_osdmap);
                        i->onetime = 1;
                        p += sizeof(*i);
                        monc->want_next_osdmap = 2;  /* requested */
-               } else {
-                       ceph_encode_32(&p, 2);
                }
-               ceph_encode_string(&p, end, "mdsmap", 6);
-               i = p;
-               i->have = cpu_to_le64(monc->have_mdsmap);
-               i->onetime = 0;
-               p += sizeof(*i);
+               if (monc->want_mdsmap) {
+                       dout("__send_subscribe to 'mdsmap' %u+\n",
+                            (unsigned)monc->have_mdsmap);
+                       ceph_encode_string(&p, end, "mdsmap", 6);
+                       i = p;
+                       i->have = cpu_to_le64(monc->have_mdsmap);
+                       i->onetime = 0;
+                       p += sizeof(*i);
+               }
                ceph_encode_string(&p, end, "monmap", 6);
                i = p;
                i->have = 0;
@@ -243,7 +248,8 @@ static void handle_subscribe_ack(struct ceph_mon_client *monc,
        mutex_lock(&monc->mutex);
        if (monc->hunting) {
                pr_info("mon%d %s session established\n",
-                       monc->cur_mon, pr_addr(&monc->con->peer_addr.in_addr));
+                       monc->cur_mon,
+                       ceph_pr_addr(&monc->con->peer_addr.in_addr));
                monc->hunting = false;
        }
        dout("handle_subscribe_ack after %d seconds\n", seconds);
@@ -266,6 +272,7 @@ int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 got)
        mutex_unlock(&monc->mutex);
        return 0;
 }
+EXPORT_SYMBOL(ceph_monc_got_mdsmap);
 
 int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 got)
 {
@@ -310,6 +317,7 @@ int ceph_monc_open_session(struct ceph_mon_client *monc)
        mutex_unlock(&monc->mutex);
        return 0;
 }
+EXPORT_SYMBOL(ceph_monc_open_session);
 
 /*
  * The monitor responds with mount ack indicate mount success.  The
@@ -540,6 +548,7 @@ out:
        kref_put(&req->kref, release_generic_request);
        return err;
 }
+EXPORT_SYMBOL(ceph_monc_do_statfs);
 
 /*
  * pool ops
@@ -651,6 +660,7 @@ int ceph_monc_create_snapid(struct ceph_mon_client *monc,
                                   pool, 0, (char *)snapid, sizeof(*snapid));
 
 }
+EXPORT_SYMBOL(ceph_monc_create_snapid);
 
 int ceph_monc_delete_snapid(struct ceph_mon_client *monc,
                            u32 pool, u64 snapid)
@@ -708,9 +718,9 @@ static void delayed_work(struct work_struct *work)
  */
 static int build_initial_monmap(struct ceph_mon_client *monc)
 {
-       struct ceph_mount_args *args = monc->client->mount_args;
-       struct ceph_entity_addr *mon_addr = args->mon_addr;
-       int num_mon = args->num_mon;
+       struct ceph_options *opt = monc->client->options;
+       struct ceph_entity_addr *mon_addr = opt->mon_addr;
+       int num_mon = opt->num_mon;
        int i;
 
        /* build initial monmap */
@@ -728,11 +738,6 @@ static int build_initial_monmap(struct ceph_mon_client *monc)
        }
        monc->monmap->num_mon = num_mon;
        monc->have_fsid = false;
-
-       /* release addr memory */
-       kfree(args->mon_addr);
-       args->mon_addr = NULL;
-       args->num_mon = 0;
        return 0;
 }
 
@@ -753,8 +758,8 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
        monc->con = NULL;
 
        /* authentication */
-       monc->auth = ceph_auth_init(cl->mount_args->name,
-                                   cl->mount_args->secret);
+       monc->auth = ceph_auth_init(cl->options->name,
+                                   cl->options->secret);
        if (IS_ERR(monc->auth))
                return PTR_ERR(monc->auth);
        monc->auth->want_keys =
@@ -808,6 +813,7 @@ out_monmap:
 out:
        return err;
 }
+EXPORT_SYMBOL(ceph_monc_init);
 
 void ceph_monc_stop(struct ceph_mon_client *monc)
 {
@@ -832,6 +838,7 @@ void ceph_monc_stop(struct ceph_mon_client *monc)
 
        kfree(monc->monmap);
 }
+EXPORT_SYMBOL(ceph_monc_stop);
 
 static void handle_auth_reply(struct ceph_mon_client *monc,
                              struct ceph_msg *msg)
@@ -889,6 +896,7 @@ int ceph_monc_validate_auth(struct ceph_mon_client *monc)
        mutex_unlock(&monc->mutex);
        return ret;
 }
+EXPORT_SYMBOL(ceph_monc_validate_auth);
 
 /*
  * handle incoming message
@@ -922,15 +930,16 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
                ceph_monc_handle_map(monc, msg);
                break;
 
-       case CEPH_MSG_MDS_MAP:
-               ceph_mdsc_handle_map(&monc->client->mdsc, msg);
-               break;
-
        case CEPH_MSG_OSD_MAP:
                ceph_osdc_handle_map(&monc->client->osdc, msg);
                break;
 
        default:
+               /* can the chained handler handle it? */
+               if (monc->client->extra_mon_dispatch &&
+                   monc->client->extra_mon_dispatch(monc->client, msg) == 0)
+                       break;
+                       
                pr_err("received unknown message type %d %s\n", type,
                       ceph_msg_type_name(type));
        }
@@ -994,7 +1003,7 @@ static void mon_fault(struct ceph_connection *con)
        if (monc->con && !monc->hunting)
                pr_info("mon%d %s session lost, "
                        "hunting for new mon\n", monc->cur_mon,
-                       pr_addr(&monc->con->peer_addr.in_addr));
+                       ceph_pr_addr(&monc->con->peer_addr.in_addr));
 
        __close_session(monc);
        if (!monc->hunting) {
similarity index 95%
rename from fs/ceph/msgpool.c
rename to net/ceph/msgpool.c
index dd65a6438131c5f08cbe97085b6b1ba3f671076d..d5f2d97ac05caf40124fe4f628efe77a514d8e55 100644 (file)
@@ -1,11 +1,11 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
 
 #include <linux/err.h>
 #include <linux/sched.h>
 #include <linux/types.h>
 #include <linux/vmalloc.h>
 
-#include "msgpool.h"
+#include <linux/ceph/msgpool.h>
 
 static void *alloc_fn(gfp_t gfp_mask, void *arg)
 {
similarity index 84%
rename from fs/ceph/osd_client.c
rename to net/ceph/osd_client.c
index dfced1dacbcdcb47178f2c9676275869c2105296..79391994b3edee7d531b823c0720262090db6618 100644 (file)
@@ -1,17 +1,22 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
 
+#include <linux/module.h>
 #include <linux/err.h>
 #include <linux/highmem.h>
 #include <linux/mm.h>
 #include <linux/pagemap.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
+#ifdef CONFIG_BLOCK
+#include <linux/bio.h>
+#endif
 
-#include "super.h"
-#include "osd_client.h"
-#include "messenger.h"
-#include "decode.h"
-#include "auth.h"
+#include <linux/ceph/libceph.h>
+#include <linux/ceph/osd_client.h>
+#include <linux/ceph/messenger.h>
+#include <linux/ceph/decode.h>
+#include <linux/ceph/auth.h>
+#include <linux/ceph/pagelist.h>
 
 #define OSD_OP_FRONT_LEN       4096
 #define OSD_OPREPLY_FRONT_LEN  512
@@ -22,6 +27,59 @@ static int __kick_requests(struct ceph_osd_client *osdc,
 
 static void kick_requests(struct ceph_osd_client *osdc, struct ceph_osd *osd);
 
+static int op_needs_trail(int op)
+{
+       switch (op) {
+       case CEPH_OSD_OP_GETXATTR:
+       case CEPH_OSD_OP_SETXATTR:
+       case CEPH_OSD_OP_CMPXATTR:
+       case CEPH_OSD_OP_CALL:
+               return 1;
+       default:
+               return 0;
+       }
+}
+
+static int op_has_extent(int op)
+{
+       return (op == CEPH_OSD_OP_READ ||
+               op == CEPH_OSD_OP_WRITE);
+}
+
+void ceph_calc_raw_layout(struct ceph_osd_client *osdc,
+                       struct ceph_file_layout *layout,
+                       u64 snapid,
+                       u64 off, u64 *plen, u64 *bno,
+                       struct ceph_osd_request *req,
+                       struct ceph_osd_req_op *op)
+{
+       struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
+       u64 orig_len = *plen;
+       u64 objoff, objlen;    /* extent in object */
+
+       reqhead->snapid = cpu_to_le64(snapid);
+
+       /* object extent? */
+       ceph_calc_file_object_mapping(layout, off, plen, bno,
+                                     &objoff, &objlen);
+       if (*plen < orig_len)
+               dout(" skipping last %llu, final file extent %llu~%llu\n",
+                    orig_len - *plen, off, *plen);
+
+       if (op_has_extent(op->op)) {
+               op->extent.offset = objoff;
+               op->extent.length = objlen;
+       }
+       req->r_num_pages = calc_pages_for(off, *plen);
+       if (op->op == CEPH_OSD_OP_WRITE)
+               op->payload_len = *plen;
+
+       dout("calc_layout bno=%llx %llu~%llu (%d pages)\n",
+            *bno, objoff, objlen, req->r_num_pages);
+
+}
+EXPORT_SYMBOL(ceph_calc_raw_layout);
+
 /*
  * Implement client access to distributed object storage cluster.
  *
@@ -48,34 +106,19 @@ static void kick_requests(struct ceph_osd_client *osdc, struct ceph_osd *osd);
  * fill osd op in request message.
  */
 static void calc_layout(struct ceph_osd_client *osdc,
-                       struct ceph_vino vino, struct ceph_file_layout *layout,
+                       struct ceph_vino vino,
+                       struct ceph_file_layout *layout,
                        u64 off, u64 *plen,
-                       struct ceph_osd_request *req)
+                       struct ceph_osd_request *req,
+                       struct ceph_osd_req_op *op)
 {
-       struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
-       struct ceph_osd_op *op = (void *)(reqhead + 1);
-       u64 orig_len = *plen;
-       u64 objoff, objlen;    /* extent in object */
        u64 bno;
 
-       reqhead->snapid = cpu_to_le64(vino.snap);
-
-       /* object extent? */
-       ceph_calc_file_object_mapping(layout, off, plen, &bno,
-                                     &objoff, &objlen);
-       if (*plen < orig_len)
-               dout(" skipping last %llu, final file extent %llu~%llu\n",
-                    orig_len - *plen, off, *plen);
+       ceph_calc_raw_layout(osdc, layout, vino.snap, off,
+                            plen, &bno, req, op);
 
        sprintf(req->r_oid, "%llx.%08llx", vino.ino, bno);
        req->r_oid_len = strlen(req->r_oid);
-
-       op->extent.offset = cpu_to_le64(objoff);
-       op->extent.length = cpu_to_le64(objlen);
-       req->r_num_pages = calc_pages_for(off, *plen);
-
-       dout("calc_layout %s (%d) %llu~%llu (%d pages)\n",
-            req->r_oid, req->r_oid_len, objoff, objlen, req->r_num_pages);
 }
 
 /*
@@ -101,56 +144,66 @@ void ceph_osdc_release_request(struct kref *kref)
        if (req->r_own_pages)
                ceph_release_page_vector(req->r_pages,
                                         req->r_num_pages);
+#ifdef CONFIG_BLOCK
+       if (req->r_bio)
+               bio_put(req->r_bio);
+#endif
        ceph_put_snap_context(req->r_snapc);
+       if (req->r_trail) {
+               ceph_pagelist_release(req->r_trail);
+               kfree(req->r_trail);
+       }
        if (req->r_mempool)
                mempool_free(req, req->r_osdc->req_mempool);
        else
                kfree(req);
 }
+EXPORT_SYMBOL(ceph_osdc_release_request);
 
-/*
- * build new request AND message, calculate layout, and adjust file
- * extent as needed.
- *
- * if the file was recently truncated, we include information about its
- * old and new size so that the object can be updated appropriately.  (we
- * avoid synchronously deleting truncated objects because it's slow.)
- *
- * if @do_sync, include a 'startsync' command so that the osd will flush
- * data quickly.
- */
-struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
-                                              struct ceph_file_layout *layout,
-                                              struct ceph_vino vino,
-                                              u64 off, u64 *plen,
-                                              int opcode, int flags,
+static int get_num_ops(struct ceph_osd_req_op *ops, int *needs_trail)
+{
+       int i = 0;
+
+       if (needs_trail)
+               *needs_trail = 0;
+       while (ops[i].op) {
+               if (needs_trail && op_needs_trail(ops[i].op))
+                       *needs_trail = 1;
+               i++;
+       }
+
+       return i;
+}
+
+struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
+                                              int flags,
                                               struct ceph_snap_context *snapc,
-                                              int do_sync,
-                                              u32 truncate_seq,
-                                              u64 truncate_size,
-                                              struct timespec *mtime,
-                                              bool use_mempool, int num_reply)
+                                              struct ceph_osd_req_op *ops,
+                                              bool use_mempool,
+                                              gfp_t gfp_flags,
+                                              struct page **pages,
+                                              struct bio *bio)
 {
        struct ceph_osd_request *req;
        struct ceph_msg *msg;
-       struct ceph_osd_request_head *head;
-       struct ceph_osd_op *op;
-       void *p;
-       int num_op = 1 + do_sync;
-       size_t msg_size = sizeof(*head) + num_op*sizeof(*op);
-       int i;
+       int needs_trail;
+       int num_op = get_num_ops(ops, &needs_trail);
+       size_t msg_size = sizeof(struct ceph_osd_request_head);
+
+       msg_size += num_op*sizeof(struct ceph_osd_op);
 
        if (use_mempool) {
-               req = mempool_alloc(osdc->req_mempool, GFP_NOFS);
+               req = mempool_alloc(osdc->req_mempool, gfp_flags);
                memset(req, 0, sizeof(*req));
        } else {
-               req = kzalloc(sizeof(*req), GFP_NOFS);
+               req = kzalloc(sizeof(*req), gfp_flags);
        }
        if (req == NULL)
                return NULL;
 
        req->r_osdc = osdc;
        req->r_mempool = use_mempool;
+
        kref_init(&req->r_kref);
        init_completion(&req->r_completion);
        init_completion(&req->r_safe_completion);
@@ -164,13 +217,22 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
                msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
        else
                msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY,
-                                  OSD_OPREPLY_FRONT_LEN, GFP_NOFS);
+                                  OSD_OPREPLY_FRONT_LEN, gfp_flags);
        if (!msg) {
                ceph_osdc_put_request(req);
                return NULL;
        }
        req->r_reply = msg;
 
+       /* allocate space for the trailing data */
+       if (needs_trail) {
+               req->r_trail = kmalloc(sizeof(struct ceph_pagelist), gfp_flags);
+               if (!req->r_trail) {
+                       ceph_osdc_put_request(req);
+                       return NULL;
+               }
+               ceph_pagelist_init(req->r_trail);
+       }
        /* create request message; allow space for oid */
        msg_size += 40;
        if (snapc)
@@ -178,18 +240,115 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
        if (use_mempool)
                msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
        else
-               msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, GFP_NOFS);
+               msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags);
        if (!msg) {
                ceph_osdc_put_request(req);
                return NULL;
        }
+
        msg->hdr.type = cpu_to_le16(CEPH_MSG_OSD_OP);
        memset(msg->front.iov_base, 0, msg->front.iov_len);
+
+       req->r_request = msg;
+       req->r_pages = pages;
+#ifdef CONFIG_BLOCK
+       if (bio) {
+               req->r_bio = bio;
+               bio_get(req->r_bio);
+       }
+#endif
+
+       return req;
+}
+EXPORT_SYMBOL(ceph_osdc_alloc_request);
+
+static void osd_req_encode_op(struct ceph_osd_request *req,
+                             struct ceph_osd_op *dst,
+                             struct ceph_osd_req_op *src)
+{
+       dst->op = cpu_to_le16(src->op);
+
+       switch (dst->op) {
+       case CEPH_OSD_OP_READ:
+       case CEPH_OSD_OP_WRITE:
+               dst->extent.offset =
+                       cpu_to_le64(src->extent.offset);
+               dst->extent.length =
+                       cpu_to_le64(src->extent.length);
+               dst->extent.truncate_size =
+                       cpu_to_le64(src->extent.truncate_size);
+               dst->extent.truncate_seq =
+                       cpu_to_le32(src->extent.truncate_seq);
+               break;
+
+       case CEPH_OSD_OP_GETXATTR:
+       case CEPH_OSD_OP_SETXATTR:
+       case CEPH_OSD_OP_CMPXATTR:
+               BUG_ON(!req->r_trail);
+
+               dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
+               dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
+               dst->xattr.cmp_op = src->xattr.cmp_op;
+               dst->xattr.cmp_mode = src->xattr.cmp_mode;
+               ceph_pagelist_append(req->r_trail, src->xattr.name,
+                                    src->xattr.name_len);
+               ceph_pagelist_append(req->r_trail, src->xattr.val,
+                                    src->xattr.value_len);
+               break;
+       case CEPH_OSD_OP_CALL:
+               BUG_ON(!req->r_trail);
+
+               dst->cls.class_len = src->cls.class_len;
+               dst->cls.method_len = src->cls.method_len;
+               dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
+
+               ceph_pagelist_append(req->r_trail, src->cls.class_name,
+                                    src->cls.class_len);
+               ceph_pagelist_append(req->r_trail, src->cls.method_name,
+                                    src->cls.method_len);
+               ceph_pagelist_append(req->r_trail, src->cls.indata,
+                                    src->cls.indata_len);
+               break;
+       case CEPH_OSD_OP_ROLLBACK:
+               dst->snap.snapid = cpu_to_le64(src->snap.snapid);
+               break;
+       case CEPH_OSD_OP_STARTSYNC:
+               break;
+       default:
+               pr_err("unrecognized osd opcode %d\n", dst->op);
+               WARN_ON(1);
+               break;
+       }
+       dst->payload_len = cpu_to_le32(src->payload_len);
+}
+
+/*
+ * build new request AND message
+ *
+ */
+void ceph_osdc_build_request(struct ceph_osd_request *req,
+                            u64 off, u64 *plen,
+                            struct ceph_osd_req_op *src_ops,
+                            struct ceph_snap_context *snapc,
+                            struct timespec *mtime,
+                            const char *oid,
+                            int oid_len)
+{
+       struct ceph_msg *msg = req->r_request;
+       struct ceph_osd_request_head *head;
+       struct ceph_osd_req_op *src_op;
+       struct ceph_osd_op *op;
+       void *p;
+       int num_op = get_num_ops(src_ops, NULL);
+       size_t msg_size = sizeof(*head) + num_op*sizeof(*op);
+       int flags = req->r_flags;
+       u64 data_len = 0;
+       int i;
+
        head = msg->front.iov_base;
        op = (void *)(head + 1);
        p = (void *)(op + num_op);
 
-       req->r_request = msg;
        req->r_snapc = ceph_get_snap_context(snapc);
 
        head->client_inc = cpu_to_le32(1); /* always, for now. */
@@ -197,29 +356,23 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
        if (flags & CEPH_OSD_FLAG_WRITE)
                ceph_encode_timespec(&head->mtime, mtime);
        head->num_ops = cpu_to_le16(num_op);
-       op->op = cpu_to_le16(opcode);
 
-       /* calculate max write size */
-       calc_layout(osdc, vino, layout, off, plen, req);
-       req->r_file_layout = *layout;  /* keep a copy */
-
-       if (flags & CEPH_OSD_FLAG_WRITE) {
-               req->r_request->hdr.data_off = cpu_to_le16(off);
-               req->r_request->hdr.data_len = cpu_to_le32(*plen);
-               op->payload_len = cpu_to_le32(*plen);
-       }
-       op->extent.truncate_size = cpu_to_le64(truncate_size);
-       op->extent.truncate_seq = cpu_to_le32(truncate_seq);
 
        /* fill in oid */
-       head->object_len = cpu_to_le32(req->r_oid_len);
-       memcpy(p, req->r_oid, req->r_oid_len);
-       p += req->r_oid_len;
-
-       if (do_sync) {
+       head->object_len = cpu_to_le32(oid_len);
+       memcpy(p, oid, oid_len);
+       p += oid_len;
+
+       src_op = src_ops;
+       while (src_op->op) {
+               osd_req_encode_op(req, op, src_op);
+               src_op++;
                op++;
-               op->op = cpu_to_le16(CEPH_OSD_OP_STARTSYNC);
        }
+
+       if (req->r_trail)
+               data_len += req->r_trail->length;
+
        if (snapc) {
                head->snap_seq = cpu_to_le64(snapc->seq);
                head->num_snaps = cpu_to_le32(snapc->num_snaps);
@@ -229,12 +382,79 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
                }
        }
 
+       if (flags & CEPH_OSD_FLAG_WRITE) {
+               req->r_request->hdr.data_off = cpu_to_le16(off);
+               req->r_request->hdr.data_len = cpu_to_le32(*plen + data_len);
+       } else if (data_len) {
+               req->r_request->hdr.data_off = 0;
+               req->r_request->hdr.data_len = cpu_to_le32(data_len);
+       }
+
        BUG_ON(p > msg->front.iov_base + msg->front.iov_len);
        msg_size = p - msg->front.iov_base;
        msg->front.iov_len = msg_size;
        msg->hdr.front_len = cpu_to_le32(msg_size);
+       return;
+}
+EXPORT_SYMBOL(ceph_osdc_build_request);
+
+/*
+ * build new request AND message, calculate layout, and adjust file
+ * extent as needed.
+ *
+ * if the file was recently truncated, we include information about its
+ * old and new size so that the object can be updated appropriately.  (we
+ * avoid synchronously deleting truncated objects because it's slow.)
+ *
+ * if @do_sync, include a 'startsync' command so that the osd will flush
+ * data quickly.
+ */
+struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
+                                              struct ceph_file_layout *layout,
+                                              struct ceph_vino vino,
+                                              u64 off, u64 *plen,
+                                              int opcode, int flags,
+                                              struct ceph_snap_context *snapc,
+                                              int do_sync,
+                                              u32 truncate_seq,
+                                              u64 truncate_size,
+                                              struct timespec *mtime,
+                                              bool use_mempool, int num_reply)
+{
+       struct ceph_osd_req_op ops[3];
+       struct ceph_osd_request *req;
+
+       ops[0].op = opcode;
+       ops[0].extent.truncate_seq = truncate_seq;
+       ops[0].extent.truncate_size = truncate_size;
+       ops[0].payload_len = 0;
+
+       if (do_sync) {
+               ops[1].op = CEPH_OSD_OP_STARTSYNC;
+               ops[1].payload_len = 0;
+               ops[2].op = 0;
+       } else
+               ops[1].op = 0;
+
+       req = ceph_osdc_alloc_request(osdc, flags,
+                                        snapc, ops,
+                                        use_mempool,
+                                        GFP_NOFS, NULL, NULL);
+       if (IS_ERR(req))
+               return req;
+
+       /* calculate max write size */
+       calc_layout(osdc, vino, layout, off, plen, req, ops);
+       req->r_file_layout = *layout;  /* keep a copy */
+
+       ceph_osdc_build_request(req, off, plen, ops,
+                               snapc,
+                               mtime,
+                               req->r_oid, req->r_oid_len);
+
        return req;
 }
+EXPORT_SYMBOL(ceph_osdc_new_request);
 
 /*
  * We keep osd requests in an rbtree, sorted by ->r_tid.
@@ -389,7 +609,7 @@ static void __move_osd_to_lru(struct ceph_osd_client *osdc,
        dout("__move_osd_to_lru %p\n", osd);
        BUG_ON(!list_empty(&osd->o_osd_lru));
        list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
-       osd->lru_ttl = jiffies + osdc->client->mount_args->osd_idle_ttl * HZ;
+       osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl * HZ;
 }
 
 static void __remove_osd_from_lru(struct ceph_osd *osd)
@@ -483,7 +703,7 @@ static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o)
 static void __schedule_osd_timeout(struct ceph_osd_client *osdc)
 {
        schedule_delayed_work(&osdc->timeout_work,
-                       osdc->client->mount_args->osd_keepalive_timeout * HZ);
+                       osdc->client->options->osd_keepalive_timeout * HZ);
 }
 
 static void __cancel_osd_timeout(struct ceph_osd_client *osdc)
@@ -549,7 +769,7 @@ static void __unregister_request(struct ceph_osd_client *osdc,
  */
 static void __cancel_request(struct ceph_osd_request *req)
 {
-       if (req->r_sent) {
+       if (req->r_sent && req->r_osd) {
                ceph_con_revoke(&req->r_osd->o_con, req->r_request);
                req->r_sent = 0;
        }
@@ -684,9 +904,9 @@ static void handle_timeout(struct work_struct *work)
                container_of(work, struct ceph_osd_client, timeout_work.work);
        struct ceph_osd_request *req, *last_req = NULL;
        struct ceph_osd *osd;
-       unsigned long timeout = osdc->client->mount_args->osd_timeout * HZ;
+       unsigned long timeout = osdc->client->options->osd_timeout * HZ;
        unsigned long keepalive =
-               osdc->client->mount_args->osd_keepalive_timeout * HZ;
+               osdc->client->options->osd_keepalive_timeout * HZ;
        unsigned long last_stamp = 0;
        struct rb_node *p;
        struct list_head slow_osds;
@@ -773,7 +993,7 @@ static void handle_osds_timeout(struct work_struct *work)
                container_of(work, struct ceph_osd_client,
                             osds_timeout_work.work);
        unsigned long delay =
-               osdc->client->mount_args->osd_idle_ttl * HZ >> 2;
+               osdc->client->options->osd_idle_ttl * HZ >> 2;
 
        dout("osds timeout\n");
        down_read(&osdc->map_sem);
@@ -1104,6 +1324,10 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
 
        req->r_request->pages = req->r_pages;
        req->r_request->nr_pages = req->r_num_pages;
+#ifdef CONFIG_BLOCK
+       req->r_request->bio = req->r_bio;
+#endif
+       req->r_request->trail = req->r_trail;
 
        register_request(osdc, req);
 
@@ -1131,6 +1355,7 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
        up_read(&osdc->map_sem);
        return rc;
 }
+EXPORT_SYMBOL(ceph_osdc_start_request);
 
 /*
  * wait for a request to complete
@@ -1153,6 +1378,7 @@ int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
        dout("wait_request tid %llu result %d\n", req->r_tid, req->r_result);
        return req->r_result;
 }
+EXPORT_SYMBOL(ceph_osdc_wait_request);
 
 /*
  * sync - wait for all in-flight requests to flush.  avoid starvation.
@@ -1186,6 +1412,7 @@ void ceph_osdc_sync(struct ceph_osd_client *osdc)
        mutex_unlock(&osdc->request_mutex);
        dout("sync done (thru tid %llu)\n", last_tid);
 }
+EXPORT_SYMBOL(ceph_osdc_sync);
 
 /*
  * init, shutdown
@@ -1211,7 +1438,7 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
        INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
 
        schedule_delayed_work(&osdc->osds_timeout_work,
-          round_jiffies_relative(osdc->client->mount_args->osd_idle_ttl * HZ));
+          round_jiffies_relative(osdc->client->options->osd_idle_ttl * HZ));
 
        err = -ENOMEM;
        osdc->req_mempool = mempool_create_kmalloc_pool(10,
@@ -1237,6 +1464,7 @@ out_mempool:
 out:
        return err;
 }
+EXPORT_SYMBOL(ceph_osdc_init);
 
 void ceph_osdc_stop(struct ceph_osd_client *osdc)
 {
@@ -1251,6 +1479,7 @@ void ceph_osdc_stop(struct ceph_osd_client *osdc)
        ceph_msgpool_destroy(&osdc->msgpool_op);
        ceph_msgpool_destroy(&osdc->msgpool_op_reply);
 }
+EXPORT_SYMBOL(ceph_osdc_stop);
 
 /*
  * Read some contiguous pages.  If we cross a stripe boundary, shorten
@@ -1288,6 +1517,7 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc,
        dout("readpages result %d\n", rc);
        return rc;
 }
+EXPORT_SYMBOL(ceph_osdc_readpages);
 
 /*
  * do a synchronous write on N pages
@@ -1330,6 +1560,7 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
        dout("writepages result %d\n", rc);
        return rc;
 }
+EXPORT_SYMBOL(ceph_osdc_writepages);
 
 /*
  * handle incoming message
@@ -1420,6 +1651,9 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
                }
                m->pages = req->r_pages;
                m->nr_pages = req->r_num_pages;
+#ifdef CONFIG_BLOCK
+               m->bio = req->r_bio;
+#endif
        }
        *skip = 0;
        req->r_con_filling_msg = ceph_con_get(con);
similarity index 97%
rename from fs/ceph/osdmap.c
rename to net/ceph/osdmap.c
index e31f118f1392988351257d2385863c37228bc7df..d73f3f6efa36ff6cf9efc33cada1853afa8073e7 100644 (file)
@@ -1,14 +1,15 @@
 
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
 
+#include <linux/module.h>
 #include <linux/slab.h>
 #include <asm/div64.h>
 
-#include "super.h"
-#include "osdmap.h"
-#include "crush/hash.h"
-#include "crush/mapper.h"
-#include "decode.h"
+#include <linux/ceph/libceph.h>
+#include <linux/ceph/osdmap.h>
+#include <linux/ceph/decode.h>
+#include <linux/crush/hash.h>
+#include <linux/crush/mapper.h>
 
 char *ceph_osdmap_state_str(char *str, int len, int state)
 {
@@ -417,6 +418,20 @@ static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id)
        return NULL;
 }
 
+int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name)
+{
+       struct rb_node *rbp;
+
+       for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) {
+               struct ceph_pg_pool_info *pi =
+                       rb_entry(rbp, struct ceph_pg_pool_info, node);
+               if (pi->name && strcmp(pi->name, name) == 0)
+                       return pi->id;
+       }
+       return -ENOENT;
+}
+EXPORT_SYMBOL(ceph_pg_poolid_by_name);
+
 static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
 {
        rb_erase(&pi->node, root);
@@ -966,6 +981,7 @@ void ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
 
        dout(" obj extent %llu~%llu\n", *oxoff, *oxlen);
 }
+EXPORT_SYMBOL(ceph_calc_file_object_mapping);
 
 /*
  * calculate an object layout (i.e. pgid) from an oid,
@@ -1011,6 +1027,7 @@ int ceph_calc_object_layout(struct ceph_object_layout *ol,
        ol->ol_stripe_unit = fl->fl_object_stripe_unit;
        return 0;
 }
+EXPORT_SYMBOL(ceph_calc_object_layout);
 
 /*
  * Calculate raw osd vector for the given pgid.  Return pointer to osd
@@ -1108,3 +1125,4 @@ int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, struct ceph_pg pgid)
                        return osds[i];
        return -1;
 }
+EXPORT_SYMBOL(ceph_calc_pg_primary);
diff --git a/net/ceph/pagelist.c b/net/ceph/pagelist.c
new file mode 100644 (file)
index 0000000..13cb409
--- /dev/null
@@ -0,0 +1,154 @@
+
+#include <linux/module.h>
+#include <linux/gfp.h>
+#include <linux/pagemap.h>
+#include <linux/highmem.h>
+#include <linux/ceph/pagelist.h>
+
+static void ceph_pagelist_unmap_tail(struct ceph_pagelist *pl)
+{
+       if (pl->mapped_tail) {
+               struct page *page = list_entry(pl->head.prev, struct page, lru);
+               kunmap(page);
+               pl->mapped_tail = NULL;
+       }
+}
+
+int ceph_pagelist_release(struct ceph_pagelist *pl)
+{
+       ceph_pagelist_unmap_tail(pl);
+       while (!list_empty(&pl->head)) {
+               struct page *page = list_first_entry(&pl->head, struct page,
+                                                    lru);
+               list_del(&page->lru);
+               __free_page(page);
+       }
+       ceph_pagelist_free_reserve(pl);
+       return 0;
+}
+EXPORT_SYMBOL(ceph_pagelist_release);
+
+static int ceph_pagelist_addpage(struct ceph_pagelist *pl)
+{
+       struct page *page;
+
+       if (!pl->num_pages_free) {
+               page = __page_cache_alloc(GFP_NOFS);
+       } else {
+               page = list_first_entry(&pl->free_list, struct page, lru);
+               list_del(&page->lru);
+               --pl->num_pages_free;
+       }
+       if (!page)
+               return -ENOMEM;
+       pl->room += PAGE_SIZE;
+       ceph_pagelist_unmap_tail(pl);
+       list_add_tail(&page->lru, &pl->head);
+       pl->mapped_tail = kmap(page);
+       return 0;
+}
+
+int ceph_pagelist_append(struct ceph_pagelist *pl, const void *buf, size_t len)
+{
+       while (pl->room < len) {
+               size_t bit = pl->room;
+               int ret;
+
+               memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK),
+                      buf, bit);
+               pl->length += bit;
+               pl->room -= bit;
+               buf += bit;
+               len -= bit;
+               ret = ceph_pagelist_addpage(pl);
+               if (ret)
+                       return ret;
+       }
+
+       memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK), buf, len);
+       pl->length += len;
+       pl->room -= len;
+       return 0;
+}
+EXPORT_SYMBOL(ceph_pagelist_append);
+
+/**
+ * Allocate enough pages for a pagelist to append the given amount
+ * of data without without allocating.
+ * Returns: 0 on success, -ENOMEM on error.
+ */
+int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space)
+{
+       if (space <= pl->room)
+               return 0;
+       space -= pl->room;
+       space = (space + PAGE_SIZE - 1) >> PAGE_SHIFT;   /* conv to num pages */
+
+       while (space > pl->num_pages_free) {
+               struct page *page = __page_cache_alloc(GFP_NOFS);
+               if (!page)
+                       return -ENOMEM;
+               list_add_tail(&page->lru, &pl->free_list);
+               ++pl->num_pages_free;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(ceph_pagelist_reserve);
+
+/**
+ * Free any pages that have been preallocated.
+ */
+int ceph_pagelist_free_reserve(struct ceph_pagelist *pl)
+{
+       while (!list_empty(&pl->free_list)) {
+               struct page *page = list_first_entry(&pl->free_list,
+                                                    struct page, lru);
+               list_del(&page->lru);
+               __free_page(page);
+               --pl->num_pages_free;
+       }
+       BUG_ON(pl->num_pages_free);
+       return 0;
+}
+EXPORT_SYMBOL(ceph_pagelist_free_reserve);
+
+/**
+ * Create a truncation point.
+ */
+void ceph_pagelist_set_cursor(struct ceph_pagelist *pl,
+                             struct ceph_pagelist_cursor *c)
+{
+       c->pl = pl;
+       c->page_lru = pl->head.prev;
+       c->room = pl->room;
+}
+EXPORT_SYMBOL(ceph_pagelist_set_cursor);
+
+/**
+ * Truncate a pagelist to the given point. Move extra pages to reserve.
+ * This won't sleep.
+ * Returns: 0 on success,
+ *          -EINVAL if the pagelist doesn't match the trunc point pagelist
+ */
+int ceph_pagelist_truncate(struct ceph_pagelist *pl,
+                          struct ceph_pagelist_cursor *c)
+{
+       struct page *page;
+
+       if (pl != c->pl)
+               return -EINVAL;
+       ceph_pagelist_unmap_tail(pl);
+       while (pl->head.prev != c->page_lru) {
+               page = list_entry(pl->head.prev, struct page, lru);
+               list_del(&page->lru);                /* remove from pagelist */
+               list_add_tail(&page->lru, &pl->free_list); /* add to reserve */
+               ++pl->num_pages_free;
+       }
+       pl->room = c->room;
+       if (!list_empty(&pl->head)) {
+               page = list_entry(pl->head.prev, struct page, lru);
+               pl->mapped_tail = kmap(page);
+       }
+       return 0;
+}
+EXPORT_SYMBOL(ceph_pagelist_truncate);
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c
new file mode 100644 (file)
index 0000000..54caf06
--- /dev/null
@@ -0,0 +1,223 @@
+#include <linux/ceph/ceph_debug.h>
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/namei.h>
+#include <linux/writeback.h>
+
+#include <linux/ceph/libceph.h>
+
+/*
+ * build a vector of user pages
+ */
+struct page **ceph_get_direct_page_vector(const char __user *data,
+                                                int num_pages,
+                                                loff_t off, size_t len)
+{
+       struct page **pages;
+       int rc;
+
+       pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
+       if (!pages)
+               return ERR_PTR(-ENOMEM);
+
+       down_read(&current->mm->mmap_sem);
+       rc = get_user_pages(current, current->mm, (unsigned long)data,
+                           num_pages, 0, 0, pages, NULL);
+       up_read(&current->mm->mmap_sem);
+       if (rc < 0)
+               goto fail;
+       return pages;
+
+fail:
+       kfree(pages);
+       return ERR_PTR(rc);
+}
+EXPORT_SYMBOL(ceph_get_direct_page_vector);
+
+void ceph_put_page_vector(struct page **pages, int num_pages)
+{
+       int i;
+
+       for (i = 0; i < num_pages; i++)
+               put_page(pages[i]);
+       kfree(pages);
+}
+EXPORT_SYMBOL(ceph_put_page_vector);
+
+void ceph_release_page_vector(struct page **pages, int num_pages)
+{
+       int i;
+
+       for (i = 0; i < num_pages; i++)
+               __free_pages(pages[i], 0);
+       kfree(pages);
+}
+EXPORT_SYMBOL(ceph_release_page_vector);
+
+/*
+ * allocate a vector new pages
+ */
+struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags)
+{
+       struct page **pages;
+       int i;
+
+       pages = kmalloc(sizeof(*pages) * num_pages, flags);
+       if (!pages)
+               return ERR_PTR(-ENOMEM);
+       for (i = 0; i < num_pages; i++) {
+               pages[i] = __page_cache_alloc(flags);
+               if (pages[i] == NULL) {
+                       ceph_release_page_vector(pages, i);
+                       return ERR_PTR(-ENOMEM);
+               }
+       }
+       return pages;
+}
+EXPORT_SYMBOL(ceph_alloc_page_vector);
+
+/*
+ * copy user data into a page vector
+ */
+int ceph_copy_user_to_page_vector(struct page **pages,
+                                        const char __user *data,
+                                        loff_t off, size_t len)
+{
+       int i = 0;
+       int po = off & ~PAGE_CACHE_MASK;
+       int left = len;
+       int l, bad;
+
+       while (left > 0) {
+               l = min_t(int, PAGE_CACHE_SIZE-po, left);
+               bad = copy_from_user(page_address(pages[i]) + po, data, l);
+               if (bad == l)
+                       return -EFAULT;
+               data += l - bad;
+               left -= l - bad;
+               po += l - bad;
+               if (po == PAGE_CACHE_SIZE) {
+                       po = 0;
+                       i++;
+               }
+       }
+       return len;
+}
+EXPORT_SYMBOL(ceph_copy_user_to_page_vector);
+
+int ceph_copy_to_page_vector(struct page **pages,
+                                   const char *data,
+                                   loff_t off, size_t len)
+{
+       int i = 0;
+       size_t po = off & ~PAGE_CACHE_MASK;
+       size_t left = len;
+       size_t l;
+
+       while (left > 0) {
+               l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
+               memcpy(page_address(pages[i]) + po, data, l);
+               data += l;
+               left -= l;
+               po += l;
+               if (po == PAGE_CACHE_SIZE) {
+                       po = 0;
+                       i++;
+               }
+       }
+       return len;
+}
+EXPORT_SYMBOL(ceph_copy_to_page_vector);
+
+int ceph_copy_from_page_vector(struct page **pages,
+                                   char *data,
+                                   loff_t off, size_t len)
+{
+       int i = 0;
+       size_t po = off & ~PAGE_CACHE_MASK;
+       size_t left = len;
+       size_t l;
+
+       while (left > 0) {
+               l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
+               memcpy(data, page_address(pages[i]) + po, l);
+               data += l;
+               left -= l;
+               po += l;
+               if (po == PAGE_CACHE_SIZE) {
+                       po = 0;
+                       i++;
+               }
+       }
+       return len;
+}
+EXPORT_SYMBOL(ceph_copy_from_page_vector);
+
+/*
+ * copy user data from a page vector into a user pointer
+ */
+int ceph_copy_page_vector_to_user(struct page **pages,
+                                        char __user *data,
+                                        loff_t off, size_t len)
+{
+       int i = 0;
+       int po = off & ~PAGE_CACHE_MASK;
+       int left = len;
+       int l, bad;
+
+       while (left > 0) {
+               l = min_t(int, left, PAGE_CACHE_SIZE-po);
+               bad = copy_to_user(data, page_address(pages[i]) + po, l);
+               if (bad == l)
+                       return -EFAULT;
+               data += l - bad;
+               left -= l - bad;
+               if (po) {
+                       po += l - bad;
+                       if (po == PAGE_CACHE_SIZE)
+                               po = 0;
+               }
+               i++;
+       }
+       return len;
+}
+EXPORT_SYMBOL(ceph_copy_page_vector_to_user);
+
+/*
+ * Zero an extent within a page vector.  Offset is relative to the
+ * start of the first page.
+ */
+void ceph_zero_page_vector_range(int off, int len, struct page **pages)
+{
+       int i = off >> PAGE_CACHE_SHIFT;
+
+       off &= ~PAGE_CACHE_MASK;
+
+       dout("zero_page_vector_page %u~%u\n", off, len);
+
+       /* leading partial page? */
+       if (off) {
+               int end = min((int)PAGE_CACHE_SIZE, off + len);
+               dout("zeroing %d %p head from %d\n", i, pages[i],
+                    (int)off);
+               zero_user_segment(pages[i], off, end);
+               len -= (end - off);
+               i++;
+       }
+       while (len >= PAGE_CACHE_SIZE) {
+               dout("zeroing %d %p len=%d\n", i, pages[i], len);
+               zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
+               len -= PAGE_CACHE_SIZE;
+               i++;
+       }
+       /* trailing partial page? */
+       if (len) {
+               dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len);
+               zero_user_segment(pages[i], 0, len);
+       }
+}
+EXPORT_SYMBOL(ceph_zero_page_vector_range);
+
index 251997a9548362c5ebc8a0a34842971a0198539a..282806ba7a57e60991f2f7806bc3015d9b8596a5 100644 (file)
@@ -243,6 +243,7 @@ void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
        unlock_sock_fast(sk, slow);
 
        /* skb is now orphaned, can be freed outside of locked section */
+       trace_kfree_skb(skb, skb_free_datagram_locked);
        __kfree_skb(skb);
 }
 EXPORT_SYMBOL(skb_free_datagram_locked);
index 3721fbb9a83c3c7761c05ae39d8acab21b6f6b66..7ec85e27beed840b8da5f9dae620bcd48453f3f7 100644 (file)
 #include <linux/jhash.h>
 #include <linux/random.h>
 #include <trace/events/napi.h>
+#include <trace/events/net.h>
+#include <trace/events/skb.h>
 #include <linux/pci.h>
 
 #include "net-sysfs.h"
@@ -1978,6 +1980,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                }
 
                rc = ops->ndo_start_xmit(skb, dev);
+               trace_net_dev_xmit(skb, rc);
                if (rc == NETDEV_TX_OK)
                        txq_trans_update(txq);
                return rc;
@@ -1998,6 +2001,7 @@ gso:
                        skb_dst_drop(nskb);
 
                rc = ops->ndo_start_xmit(nskb, dev);
+               trace_net_dev_xmit(nskb, rc);
                if (unlikely(rc != NETDEV_TX_OK)) {
                        if (rc & ~NETDEV_TX_MASK)
                                goto out_kfree_gso_skb;
@@ -2058,16 +2062,16 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
                                        struct sk_buff *skb)
 {
        int queue_index;
-       struct sock *sk = skb->sk;
+       const struct net_device_ops *ops = dev->netdev_ops;
 
-       queue_index = sk_tx_queue_get(sk);
-       if (queue_index < 0) {
-               const struct net_device_ops *ops = dev->netdev_ops;
+       if (ops->ndo_select_queue) {
+               queue_index = ops->ndo_select_queue(dev, skb);
+               queue_index = dev_cap_txqueue(dev, queue_index);
+       } else {
+               struct sock *sk = skb->sk;
+               queue_index = sk_tx_queue_get(sk);
+               if (queue_index < 0) {
 
-               if (ops->ndo_select_queue) {
-                       queue_index = ops->ndo_select_queue(dev, skb);
-                       queue_index = dev_cap_txqueue(dev, queue_index);
-               } else {
                        queue_index = 0;
                        if (dev->real_num_tx_queues > 1)
                                queue_index = skb_tx_hash(dev, skb);
@@ -2186,6 +2190,7 @@ int dev_queue_xmit(struct sk_buff *skb)
 #ifdef CONFIG_NET_CLS_ACT
        skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
 #endif
+       trace_net_dev_queue(skb);
        if (q->enqueue) {
                rc = __dev_xmit_skb(skb, q, dev, txq);
                goto out;
@@ -2512,6 +2517,7 @@ int netif_rx(struct sk_buff *skb)
        if (netdev_tstamp_prequeue)
                net_timestamp_check(skb);
 
+       trace_netif_rx(skb);
 #ifdef CONFIG_RPS
        {
                struct rps_dev_flow voidflow, *rflow = &voidflow;
@@ -2571,6 +2577,7 @@ static void net_tx_action(struct softirq_action *h)
                        clist = clist->next;
 
                        WARN_ON(atomic_read(&skb->users));
+                       trace_kfree_skb(skb, net_tx_action);
                        __kfree_skb(skb);
                }
        }
@@ -2828,6 +2835,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
        if (!netdev_tstamp_prequeue)
                net_timestamp_check(skb);
 
+       trace_netif_receive_skb(skb);
        if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
                return NET_RX_SUCCESS;
 
@@ -4845,7 +4853,7 @@ static void rollback_registered_many(struct list_head *head)
        dev = list_first_entry(head, struct net_device, unreg_list);
        call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
 
-       synchronize_net();
+       rcu_barrier();
 
        list_for_each_entry(dev, head, unreg_list)
                dev_put(dev);
index 7a85367b3c2f8010af24bbd6b6f4249698f9d78d..8451ab481095fc523c47fa01ccb11392b15dbc30 100644 (file)
@@ -348,7 +348,7 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
        if (info.cmd == ETHTOOL_GRXCLSRLALL) {
                if (info.rule_cnt > 0) {
                        if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32))
-                               rule_buf = kmalloc(info.rule_cnt * sizeof(u32),
+                               rule_buf = kzalloc(info.rule_cnt * sizeof(u32),
                                                   GFP_USER);
                        if (!rule_buf)
                                return -ENOMEM;
@@ -397,7 +397,7 @@ static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
            (KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index))
                return -ENOMEM;
        full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size;
-       indir = kmalloc(full_size, GFP_USER);
+       indir = kzalloc(full_size, GFP_USER);
        if (!indir)
                return -ENOMEM;
 
@@ -538,7 +538,7 @@ static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr)
 
        gstrings.len = ret;
 
-       data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
+       data = kzalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
        if (!data)
                return -ENOMEM;
 
@@ -775,7 +775,7 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
        if (regs.len > reglen)
                regs.len = reglen;
 
-       regbuf = kmalloc(reglen, GFP_USER);
+       regbuf = kzalloc(reglen, GFP_USER);
        if (!regbuf)
                return -ENOMEM;
 
index 1cd98df412dfd52daee9cc9105ceddfea0d87a59..e6b133b77ccb5615d65bcdac0ecc01b759808c76 100644 (file)
  *     in any case.
  */
 
-int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
+long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
 {
-       int size, err, ct;
+       int size, ct;
+       long err;
 
        if (m->msg_namelen) {
                if (mode == VERIFY_READ) {
index afa6380ed88ac2ee0b5cd8c8a731dcb9f3dcb809..7f1bb2aba03bf0e501ee1625ba6f07163b528ab9 100644 (file)
@@ -26,6 +26,7 @@
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/skb.h>
+#include <trace/events/net.h>
 #include <trace/events/napi.h>
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(kfree_skb);
index 26396ff67cf9b2f374e2e593128e88a861934a3e..56ba3c4e4761c6584375a8acf45022bc5c40d6e8 100644 (file)
@@ -466,6 +466,7 @@ void consume_skb(struct sk_buff *skb)
                smp_rmb();
        else if (likely(!atomic_dec_and_test(&skb->users)))
                return;
+       trace_consume_skb(skb);
        __kfree_skb(skb);
 }
 EXPORT_SYMBOL(consume_skb);
@@ -2706,7 +2707,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
        } else if (skb_gro_len(p) != pinfo->gso_size)
                return -E2BIG;
 
-       headroom = NET_SKB_PAD + NET_IP_ALIGN;
+       headroom = skb_headroom(p);
        nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC);
        if (unlikely(!nskb))
                return -ENOMEM;
index b05b9b6ddb8700989e63e8597f6946ffd205bdba..7d99e13148e6287f1cca0f5e3417ee5d535c366c 100644 (file)
@@ -1078,8 +1078,11 @@ static void sk_prot_free(struct proto *prot, struct sock *sk)
 #ifdef CONFIG_CGROUPS
 void sock_update_classid(struct sock *sk)
 {
-       u32 classid = task_cls_classid(current);
+       u32 classid;
 
+       rcu_read_lock();  /* doing current task, which cannot vanish. */
+       classid = task_cls_classid(current);
+       rcu_read_unlock();
        if (classid && classid != sk->sk_classid)
                sk->sk_classid = classid;
 }
@@ -1351,9 +1354,9 @@ int sock_i_uid(struct sock *sk)
 {
        int uid;
 
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
        return uid;
 }
 EXPORT_SYMBOL(sock_i_uid);
@@ -1362,9 +1365,9 @@ unsigned long sock_i_ino(struct sock *sk)
 {
        unsigned long ino;
 
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
        return ino;
 }
 EXPORT_SYMBOL(sock_i_ino);
index d959e0f41528ce71d69f4aafea0f6d028d72607a..f5df85dcd20bc7f790aec8f58967f55e02586444 100644 (file)
@@ -141,10 +141,10 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
 
                set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
                sk->sk_write_pending++;
-               sk_wait_event(sk, &current_timeo, !sk->sk_err &&
-                                                 !(sk->sk_shutdown & SEND_SHUTDOWN) &&
-                                                 sk_stream_memory_free(sk) &&
-                                                 vm_wait);
+               sk_wait_event(sk, &current_timeo, sk->sk_err ||
+                                                 (sk->sk_shutdown & SEND_SHUTDOWN) ||
+                                                 (sk_stream_memory_free(sk) &&
+                                                 !vm_wait));
                sk->sk_write_pending--;
 
                if (vm_wait) {
index 571f8950ed06f585f4dca482037d4b7985c256af..7cd7760144f7dd1998276f4e2976e4a4a1c8135d 100644 (file)
@@ -217,6 +217,7 @@ config NET_IPIP
 
 config NET_IPGRE
        tristate "IP: GRE tunnels over IP"
+       depends on IPV6 || IPV6=n
        help
          Tunneling means encapsulating data of one protocol type within
          another protocol and sending it over a channel that understands the
@@ -412,7 +413,7 @@ config INET_XFRM_MODE_BEET
          If unsure, say Y.
 
 config INET_LRO
-       bool "Large Receive Offload (ipv4/tcp)"
+       tristate "Large Receive Offload (ipv4/tcp)"
        default y
        ---help---
          Support for Large Receive Offload (ipv4/tcp).
index f0550941df7b9e1ac63468384eba7ede1bf6d537..721a8a37b45c77ce1d3ae78afffe57cf9eaeedf1 100644 (file)
@@ -62,8 +62,11 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        }
        if (!inet->inet_saddr)
                inet->inet_saddr = rt->rt_src;  /* Update source address */
-       if (!inet->inet_rcv_saddr)
+       if (!inet->inet_rcv_saddr) {
                inet->inet_rcv_saddr = rt->rt_src;
+               if (sk->sk_prot->rehash)
+                       sk->sk_prot->rehash(sk);
+       }
        inet->inet_daddr = rt->rt_dst;
        inet->inet_dport = usin->sin_port;
        sk->sk_state = TCP_ESTABLISHED;
index a43968918350244a057e6f3364727d6a2aa7baf2..7d02a9f999fabcebeb61800816d722e6f6c054ff 100644 (file)
@@ -246,6 +246,7 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
 
        struct fib_result res;
        int no_addr, rpf, accept_local;
+       bool dev_match;
        int ret;
        struct net *net;
 
@@ -273,12 +274,22 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
        }
        *spec_dst = FIB_RES_PREFSRC(res);
        fib_combine_itag(itag, &res);
+       dev_match = false;
+
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
-       if (FIB_RES_DEV(res) == dev || res.fi->fib_nhs > 1)
+       for (ret = 0; ret < res.fi->fib_nhs; ret++) {
+               struct fib_nh *nh = &res.fi->fib_nh[ret];
+
+               if (nh->nh_dev == dev) {
+                       dev_match = true;
+                       break;
+               }
+       }
 #else
        if (FIB_RES_DEV(res) == dev)
+               dev_match = true;
 #endif
-       {
+       if (dev_match) {
                ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
                fib_res_put(&res);
                return ret;
index 79d057a939ba6404d4e7553008a2613815a9b049..4a8e370862bca453cd6e939162ded09de6bdf831 100644 (file)
@@ -186,7 +186,9 @@ static inline struct tnode *node_parent_rcu(struct node *node)
 {
        struct tnode *ret = node_parent(node);
 
-       return rcu_dereference(ret);
+       return rcu_dereference_check(ret,
+                                    rcu_read_lock_held() ||
+                                    lockdep_rtnl_is_held());
 }
 
 /* Same as rcu_assign_pointer
@@ -1753,7 +1755,9 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct node *c)
 
 static struct leaf *trie_firstleaf(struct trie *t)
 {
-       struct tnode *n = (struct tnode *) rcu_dereference(t->trie);
+       struct tnode *n = (struct tnode *) rcu_dereference_check(t->trie,
+                                                       rcu_read_lock_held() ||
+                                                       lockdep_rtnl_is_held());
 
        if (!n)
                return NULL;
index a1ad0e7180d2bd7051df4f78bed75fbb7612da40..2a4bb76f2132957da25326ce98653b249d9ccaaf 100644 (file)
@@ -856,6 +856,18 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
                igmpv3_clear_delrec(in_dev);
        } else if (len < 12) {
                return; /* ignore bogus packet; freed by caller */
+       } else if (IGMP_V1_SEEN(in_dev)) {
+               /* This is a v3 query with v1 queriers present */
+               max_delay = IGMP_Query_Response_Interval;
+               group = 0;
+       } else if (IGMP_V2_SEEN(in_dev)) {
+               /* this is a v3 query with v2 queriers present;
+                * Interpretation of the max_delay code is problematic here.
+                * A real v2 host would use ih_code directly, while v3 has a
+                * different encoding. We use the v3 encoding as more likely
+                * to be intended in a v3 query.
+                */
+               max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
        } else { /* v3 */
                if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
                        return;
index 945b20a5ad5006b6a8ebf84b22756f7573436c1f..35c93e8b6a4694561c838641e546b609e8cbfaa0 100644 (file)
@@ -45,7 +45,7 @@
 #include <net/netns/generic.h>
 #include <net/rtnetlink.h>
 
-#ifdef CONFIG_IPV6
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 #include <net/ipv6.h>
 #include <net/ip6_fib.h>
 #include <net/ip6_route.h>
@@ -699,7 +699,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
                        if ((dst = rt->rt_gateway) == 0)
                                goto tx_error_icmp;
                }
-#ifdef CONFIG_IPV6
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
                else if (skb->protocol == htons(ETH_P_IPV6)) {
                        struct in6_addr *addr6;
                        int addr_type;
@@ -774,7 +774,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
                        goto tx_error;
                }
        }
-#ifdef CONFIG_IPV6
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
        else if (skb->protocol == htons(ETH_P_IPV6)) {
                struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
 
@@ -850,7 +850,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
        if ((iph->ttl = tiph->ttl) == 0) {
                if (skb->protocol == htons(ETH_P_IP))
                        iph->ttl = old_iph->ttl;
-#ifdef CONFIG_IPV6
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
                else if (skb->protocol == htons(ETH_P_IPV6))
                        iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit;
 #endif
index 04b69896df5fc743021efd4d4a1705b7de146333..7649d7750075d184896a9da6d37ed9a35ea5f403 100644 (file)
@@ -488,9 +488,8 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
         * we can switch to copy when see the first bad fragment.
         */
        if (skb_has_frags(skb)) {
-               struct sk_buff *frag;
+               struct sk_buff *frag, *frag2;
                int first_len = skb_pagelen(skb);
-               int truesizes = 0;
 
                if (first_len - hlen > mtu ||
                    ((first_len - hlen) & 7) ||
@@ -503,18 +502,18 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
                        if (frag->len > mtu ||
                            ((frag->len & 7) && frag->next) ||
                            skb_headroom(frag) < hlen)
-                           goto slow_path;
+                               goto slow_path_clean;
 
                        /* Partially cloned skb? */
                        if (skb_shared(frag))
-                               goto slow_path;
+                               goto slow_path_clean;
 
                        BUG_ON(frag->sk);
                        if (skb->sk) {
                                frag->sk = skb->sk;
                                frag->destructor = sock_wfree;
                        }
-                       truesizes += frag->truesize;
+                       skb->truesize -= frag->truesize;
                }
 
                /* Everything is OK. Generate! */
@@ -524,7 +523,6 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
                frag = skb_shinfo(skb)->frag_list;
                skb_frag_list_init(skb);
                skb->data_len = first_len - skb_headlen(skb);
-               skb->truesize -= truesizes;
                skb->len = first_len;
                iph->tot_len = htons(first_len);
                iph->frag_off = htons(IP_MF);
@@ -576,6 +574,15 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
                }
                IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
                return err;
+
+slow_path_clean:
+               skb_walk_frags(skb, frag2) {
+                       if (frag2 == frag)
+                               break;
+                       frag2->sk = NULL;
+                       frag2->destructor = NULL;
+                       skb->truesize += frag2->truesize;
+               }
        }
 
 slow_path:
index 6c40a8c46e7984843275af12bfbac9a241e8e4c6..64b70ad162e370dbeb5b60adda5dd811182fc0f5 100644 (file)
@@ -1129,6 +1129,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
        case IP_HDRINCL:
                val = inet->hdrincl;
                break;
+       case IP_NODEFRAG:
+               val = inet->nodefrag;
+               break;
        case IP_MTU_DISCOVER:
                val = inet->pmtudisc;
                break;
index b254dafaf4294548b7d36d9b54ce29832f8658e3..43eec80c0e7c55a2f6dc20790a30603236b7d93f 100644 (file)
@@ -112,6 +112,7 @@ static void send_reset(struct sk_buff *oldskb, int hook)
        /* ip_route_me_harder expects skb->dst to be set */
        skb_dst_set_noref(nskb, skb_dst(oldskb));
 
+       nskb->protocol = htons(ETH_P_IP);
        if (ip_route_me_harder(nskb, addr_type))
                goto free_nskb;
 
index 244f7cb08d681d35f9a08744ca449ce90f5b8972..37f8adb68c79e8619d1a45a496ca6e1ef37c7c8a 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/percpu.h>
+#include <linux/security.h>
 #include <net/net_namespace.h>
 
 #include <linux/netfilter.h>
@@ -87,6 +88,29 @@ static void ct_seq_stop(struct seq_file *s, void *v)
        rcu_read_unlock();
 }
 
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+static int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
+{
+       int ret;
+       u32 len;
+       char *secctx;
+
+       ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
+       if (ret)
+               return ret;
+
+       ret = seq_printf(s, "secctx=%s ", secctx);
+
+       security_release_secctx(secctx, len);
+       return ret;
+}
+#else
+static inline int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
+{
+       return 0;
+}
+#endif
+
 static int ct_seq_show(struct seq_file *s, void *v)
 {
        struct nf_conntrack_tuple_hash *hash = v;
@@ -148,10 +172,8 @@ static int ct_seq_show(struct seq_file *s, void *v)
                goto release;
 #endif
 
-#ifdef CONFIG_NF_CONNTRACK_SECMARK
-       if (seq_printf(s, "secmark=%u ", ct->secmark))
+       if (ct_show_secctx(s, ct))
                goto release;
-#endif
 
        if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
                goto release;
index eab8de32f200af74828bbf0fb3187ff578451e9b..f3a9b42b16c620a7f569e00039beea13bc75bc3c 100644 (file)
@@ -66,9 +66,11 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
                                          const struct net_device *out,
                                          int (*okfn)(struct sk_buff *))
 {
+       struct sock *sk = skb->sk;
        struct inet_sock *inet = inet_sk(skb->sk);
 
-       if (inet && inet->nodefrag)
+       if (sk && (sk->sk_family == PF_INET) &&
+           inet->nodefrag)
                return NF_ACCEPT;
 
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
index 8c8632d9b93cead0cd115945a9566d1e57829667..957c9241fb0ce1d91cde6c1b098f81b52337690a 100644 (file)
@@ -38,7 +38,7 @@ static DEFINE_SPINLOCK(nf_nat_lock);
 static struct nf_conntrack_l3proto *l3proto __read_mostly;
 
 #define MAX_IP_NAT_PROTO 256
-static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO]
+static const struct nf_nat_protocol __rcu *nf_nat_protos[MAX_IP_NAT_PROTO]
                                                __read_mostly;
 
 static inline const struct nf_nat_protocol *
index 1679e2c0963d9b83e34f1f8d6a34b7702a603e2c..ee5f419d0a56d01c6533d6ee45f70a7405480150 100644 (file)
@@ -893,13 +893,15 @@ static void fast_csum(__sum16 *csum,
        unsigned char s[4];
 
        if (offset & 1) {
-               s[0] = s[2] = 0;
+               s[0] = ~0;
                s[1] = ~*optr;
+               s[2] = 0;
                s[3] = *nptr;
        } else {
-               s[1] = s[3] = 0;
                s[0] = ~*optr;
+               s[1] = ~0;
                s[2] = *nptr;
+               s[3] = 0;
        }
 
        *csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum)));
index 3f56b6e6c6aab583d65902e7190bbf1a6eaa60b6..ac6559cb54f9f650986e4dd86996349e328cd92a 100644 (file)
@@ -1231,7 +1231,7 @@ restart:
                        }
 
                        if (net_ratelimit())
-                               printk(KERN_WARNING "Neighbour table overflow.\n");
+                               printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
                        rt_drop(rt);
                        return -ENOBUFS;
                }
@@ -2738,6 +2738,11 @@ slow_output:
 }
 EXPORT_SYMBOL_GPL(__ip_route_output_key);
 
+static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
+{
+       return NULL;
+}
+
 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
 {
 }
@@ -2746,7 +2751,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
        .family                 =       AF_INET,
        .protocol               =       cpu_to_be16(ETH_P_IP),
        .destroy                =       ipv4_dst_destroy,
-       .check                  =       ipv4_dst_check,
+       .check                  =       ipv4_blackhole_dst_check,
        .update_pmtu            =       ipv4_rt_blackhole_update_pmtu,
        .entries                =       ATOMIC_INIT(0),
 };
index 3fb1428e526eedb521057a49624fa28dde8b41cd..f115ea68a4efa264c59b20f61222db97a6050a9d 100644 (file)
@@ -386,8 +386,6 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
         */
 
        mask = 0;
-       if (sk->sk_err)
-               mask = POLLERR;
 
        /*
         * POLLHUP is certainly not done right. But poll() doesn't
@@ -457,6 +455,11 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
                if (tp->urg_data & TCP_URG_VALID)
                        mask |= POLLPRI;
        }
+       /* This barrier is coupled with smp_wmb() in tcp_reset() */
+       smp_rmb();
+       if (sk->sk_err)
+               mask |= POLLERR;
+
        return mask;
 }
 EXPORT_SYMBOL(tcp_poll);
@@ -940,7 +943,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        sg = sk->sk_route_caps & NETIF_F_SG;
 
        while (--iovlen >= 0) {
-               int seglen = iov->iov_len;
+               size_t seglen = iov->iov_len;
                unsigned char __user *from = iov->iov_base;
 
                iov++;
index e663b78a2ef6b6286b549aa65b418fe385f32184..b55f60f6fcbe934c1364ee3aece309dff4d1be4b 100644 (file)
@@ -2545,7 +2545,8 @@ static void tcp_mark_head_lost(struct sock *sk, int packets)
                        cnt += tcp_skb_pcount(skb);
 
                if (cnt > packets) {
-                       if (tcp_is_sack(tp) || (oldcnt >= packets))
+                       if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) ||
+                           (oldcnt >= packets))
                                break;
 
                        mss = skb_shinfo(skb)->gso_size;
@@ -4048,6 +4049,8 @@ static void tcp_reset(struct sock *sk)
        default:
                sk->sk_err = ECONNRESET;
        }
+       /* This barrier is coupled with smp_rmb() in tcp_poll() */
+       smp_wmb();
 
        if (!sock_flag(sk, SOCK_DEAD))
                sk->sk_error_report(sk);
index c35b469e851c298814d69583bd593ec1c580dde5..74c54b30600f618522e07581c43130eea031f2db 100644 (file)
@@ -135,13 +135,16 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
 
 /* This function calculates a "timeout" which is equivalent to the timeout of a
  * TCP connection after "boundary" unsuccessful, exponentially backed-off
- * retransmissions with an initial RTO of TCP_RTO_MIN.
+ * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if
+ * syn_set flag is set.
  */
 static bool retransmits_timed_out(struct sock *sk,
-                                 unsigned int boundary)
+                                 unsigned int boundary,
+                                 bool syn_set)
 {
        unsigned int timeout, linear_backoff_thresh;
        unsigned int start_ts;
+       unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
 
        if (!inet_csk(sk)->icsk_retransmits)
                return false;
@@ -151,12 +154,12 @@ static bool retransmits_timed_out(struct sock *sk,
        else
                start_ts = tcp_sk(sk)->retrans_stamp;
 
-       linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN);
+       linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
 
        if (boundary <= linear_backoff_thresh)
-               timeout = ((2 << boundary) - 1) * TCP_RTO_MIN;
+               timeout = ((2 << boundary) - 1) * rto_base;
        else
-               timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN +
+               timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
                          (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
 
        return (tcp_time_stamp - start_ts) >= timeout;
@@ -167,14 +170,15 @@ static int tcp_write_timeout(struct sock *sk)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        int retry_until;
-       bool do_reset;
+       bool do_reset, syn_set = 0;
 
        if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
                if (icsk->icsk_retransmits)
                        dst_negative_advice(sk);
                retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
+               syn_set = 1;
        } else {
-               if (retransmits_timed_out(sk, sysctl_tcp_retries1)) {
+               if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0)) {
                        /* Black hole detection */
                        tcp_mtu_probing(icsk, sk);
 
@@ -187,14 +191,14 @@ static int tcp_write_timeout(struct sock *sk)
 
                        retry_until = tcp_orphan_retries(sk, alive);
                        do_reset = alive ||
-                                  !retransmits_timed_out(sk, retry_until);
+                                  !retransmits_timed_out(sk, retry_until, 0);
 
                        if (tcp_out_of_resources(sk, do_reset))
                                return 1;
                }
        }
 
-       if (retransmits_timed_out(sk, retry_until)) {
+       if (retransmits_timed_out(sk, retry_until, syn_set)) {
                /* Has it gone just too far? */
                tcp_write_err(sk);
                return 1;
@@ -436,7 +440,7 @@ out_reset_timer:
                icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
        }
        inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
-       if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1))
+       if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0))
                __sk_dst_reset(sk);
 
 out:;
index 32e0bef60d0afdbedd3d4cbdd3c57a9f9beafcdc..fb23c2e63b5281a7ca505af3ac94f411e10c4ee6 100644 (file)
@@ -1260,6 +1260,49 @@ void udp_lib_unhash(struct sock *sk)
 }
 EXPORT_SYMBOL(udp_lib_unhash);
 
+/*
+ * inet_rcv_saddr was changed, we must rehash secondary hash
+ */
+void udp_lib_rehash(struct sock *sk, u16 newhash)
+{
+       if (sk_hashed(sk)) {
+               struct udp_table *udptable = sk->sk_prot->h.udp_table;
+               struct udp_hslot *hslot, *hslot2, *nhslot2;
+
+               hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
+               nhslot2 = udp_hashslot2(udptable, newhash);
+               udp_sk(sk)->udp_portaddr_hash = newhash;
+               if (hslot2 != nhslot2) {
+                       hslot = udp_hashslot(udptable, sock_net(sk),
+                                            udp_sk(sk)->udp_port_hash);
+                       /* we must lock primary chain too */
+                       spin_lock_bh(&hslot->lock);
+
+                       spin_lock(&hslot2->lock);
+                       hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
+                       hslot2->count--;
+                       spin_unlock(&hslot2->lock);
+
+                       spin_lock(&nhslot2->lock);
+                       hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
+                                                &nhslot2->head);
+                       nhslot2->count++;
+                       spin_unlock(&nhslot2->lock);
+
+                       spin_unlock_bh(&hslot->lock);
+               }
+       }
+}
+EXPORT_SYMBOL(udp_lib_rehash);
+
+static void udp_v4_rehash(struct sock *sk)
+{
+       u16 new_hash = udp4_portaddr_hash(sock_net(sk),
+                                         inet_sk(sk)->inet_rcv_saddr,
+                                         inet_sk(sk)->inet_num);
+       udp_lib_rehash(sk, new_hash);
+}
+
 static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 {
        int rc;
@@ -1843,6 +1886,7 @@ struct proto udp_prot = {
        .backlog_rcv       = __udp_queue_rcv_skb,
        .hash              = udp_lib_hash,
        .unhash            = udp_lib_unhash,
+       .rehash            = udp_v4_rehash,
        .get_port          = udp_v4_get_port,
        .memory_allocated  = &udp_memory_allocated,
        .sysctl_mem        = sysctl_udp_mem,
index 869078d4eeb957a4982ab62e5d408e8fbf42117c..a580349f0b8ab53c77b04b00d504de0bf1f83e09 100644 (file)
@@ -61,7 +61,7 @@ static int xfrm4_get_saddr(struct net *net,
 
 static int xfrm4_get_tos(struct flowi *fl)
 {
-       return fl->fl4_tos;
+       return IPTOS_RT_MASK & fl->fl4_tos; /* Strip ECN bits */
 }
 
 static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
index 1ef1366a0a03775eed25ab5ef0b083aed593726e..47947624eccc58fe7fcfcdbff19fb655a07ddedc 100644 (file)
@@ -21,21 +21,25 @@ static int xfrm4_init_flags(struct xfrm_state *x)
 }
 
 static void
-__xfrm4_init_tempsel(struct xfrm_state *x, struct flowi *fl,
-                    struct xfrm_tmpl *tmpl,
-                    xfrm_address_t *daddr, xfrm_address_t *saddr)
+__xfrm4_init_tempsel(struct xfrm_selector *sel, struct flowi *fl)
+{
+       sel->daddr.a4 = fl->fl4_dst;
+       sel->saddr.a4 = fl->fl4_src;
+       sel->dport = xfrm_flowi_dport(fl);
+       sel->dport_mask = htons(0xffff);
+       sel->sport = xfrm_flowi_sport(fl);
+       sel->sport_mask = htons(0xffff);
+       sel->family = AF_INET;
+       sel->prefixlen_d = 32;
+       sel->prefixlen_s = 32;
+       sel->proto = fl->proto;
+       sel->ifindex = fl->oif;
+}
+
+static void
+xfrm4_init_temprop(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
+                  xfrm_address_t *daddr, xfrm_address_t *saddr)
 {
-       x->sel.daddr.a4 = fl->fl4_dst;
-       x->sel.saddr.a4 = fl->fl4_src;
-       x->sel.dport = xfrm_flowi_dport(fl);
-       x->sel.dport_mask = htons(0xffff);
-       x->sel.sport = xfrm_flowi_sport(fl);
-       x->sel.sport_mask = htons(0xffff);
-       x->sel.family = AF_INET;
-       x->sel.prefixlen_d = 32;
-       x->sel.prefixlen_s = 32;
-       x->sel.proto = fl->proto;
-       x->sel.ifindex = fl->oif;
        x->id = tmpl->id;
        if (x->id.daddr.a4 == 0)
                x->id.daddr.a4 = daddr->a4;
@@ -70,6 +74,7 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = {
        .owner                  = THIS_MODULE,
        .init_flags             = xfrm4_init_flags,
        .init_tempsel           = __xfrm4_init_tempsel,
+       .init_temprop           = xfrm4_init_temprop,
        .output                 = xfrm4_output,
        .extract_input          = xfrm4_extract_input,
        .extract_output         = xfrm4_extract_output,
index ab70a3fbcafafee9a080ea0f8c6eab2ce04fc37e..324fac3b6c16db0238d1139649038bcaa9aaced2 100644 (file)
@@ -4637,10 +4637,12 @@ int __init addrconf_init(void)
        if (err < 0) {
                printk(KERN_CRIT "IPv6 Addrconf:"
                       " cannot initialize default policy table: %d.\n", err);
-               return err;
+               goto out;
        }
 
-       register_pernet_subsys(&addrconf_ops);
+       err = register_pernet_subsys(&addrconf_ops);
+       if (err < 0)
+               goto out_addrlabel;
 
        /* The addrconf netdev notifier requires that loopback_dev
         * has it's ipv6 private information allocated and setup
@@ -4692,7 +4694,9 @@ errout:
        unregister_netdevice_notifier(&ipv6_dev_notf);
 errlo:
        unregister_pernet_subsys(&addrconf_ops);
-
+out_addrlabel:
+       ipv6_addr_label_cleanup();
+out:
        return err;
 }
 
@@ -4703,6 +4707,7 @@ void addrconf_cleanup(void)
 
        unregister_netdevice_notifier(&ipv6_dev_notf);
        unregister_pernet_subsys(&addrconf_ops);
+       ipv6_addr_label_cleanup();
 
        rtnl_lock();
 
index f0e774cea386696a72560d9306f441a46817a96d..8175f802651bec4481080823a73f60a21d3a2f36 100644 (file)
@@ -393,6 +393,11 @@ int __init ipv6_addr_label_init(void)
        return register_pernet_subsys(&ipv6_addr_label_ops);
 }
 
+void ipv6_addr_label_cleanup(void)
+{
+       unregister_pernet_subsys(&ipv6_addr_label_ops);
+}
+
 static const struct nla_policy ifal_policy[IFAL_MAX+1] = {
        [IFAL_ADDRESS]          = { .len = sizeof(struct in6_addr), },
        [IFAL_LABEL]            = { .len = sizeof(u32), },
index 7d929a22cbc2f505aca94ba5a6fd9cae412824dc..ef371aa01ac50724f9dff9cbf7d084e062d844c9 100644 (file)
@@ -105,9 +105,12 @@ ipv4_connected:
                if (ipv6_addr_any(&np->saddr))
                        ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
 
-               if (ipv6_addr_any(&np->rcv_saddr))
+               if (ipv6_addr_any(&np->rcv_saddr)) {
                        ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
                                               &np->rcv_saddr);
+                       if (sk->sk_prot->rehash)
+                               sk->sk_prot->rehash(sk);
+               }
 
                goto out;
        }
@@ -181,6 +184,8 @@ ipv4_connected:
        if (ipv6_addr_any(&np->rcv_saddr)) {
                ipv6_addr_copy(&np->rcv_saddr, &fl.fl6_src);
                inet->inet_rcv_saddr = LOOPBACK4_IPV6;
+               if (sk->sk_prot->rehash)
+                       sk->sk_prot->rehash(sk);
        }
 
        ip6_dst_store(sk, dst,
index d40b330c0ee698af62f51f90caf86b2e6cf04c9f..980912ed7a388bd2404b8cb934b2b61e67e12a6c 100644 (file)
@@ -639,7 +639,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
 
        if (skb_has_frags(skb)) {
                int first_len = skb_pagelen(skb);
-               int truesizes = 0;
+               struct sk_buff *frag2;
 
                if (first_len - hlen > mtu ||
                    ((first_len - hlen) & 7) ||
@@ -651,18 +651,18 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
                        if (frag->len > mtu ||
                            ((frag->len & 7) && frag->next) ||
                            skb_headroom(frag) < hlen)
-                           goto slow_path;
+                               goto slow_path_clean;
 
                        /* Partially cloned skb? */
                        if (skb_shared(frag))
-                               goto slow_path;
+                               goto slow_path_clean;
 
                        BUG_ON(frag->sk);
                        if (skb->sk) {
                                frag->sk = skb->sk;
                                frag->destructor = sock_wfree;
-                               truesizes += frag->truesize;
                        }
+                       skb->truesize -= frag->truesize;
                }
 
                err = 0;
@@ -693,7 +693,6 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
 
                first_len = skb_pagelen(skb);
                skb->data_len = first_len - skb_headlen(skb);
-               skb->truesize -= truesizes;
                skb->len = first_len;
                ipv6_hdr(skb)->payload_len = htons(first_len -
                                                   sizeof(struct ipv6hdr));
@@ -756,6 +755,15 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
                              IPSTATS_MIB_FRAGFAILS);
                dst_release(&rt->dst);
                return err;
+
+slow_path_clean:
+               skb_walk_frags(skb, frag2) {
+                       if (frag2 == frag)
+                               break;
+                       frag2->sk = NULL;
+                       frag2->destructor = NULL;
+                       skb->truesize += frag2->truesize;
+               }
        }
 
 slow_path:
index 13ef5bc05cf5220a3e2aa62543c4a12b79e5a98a..578f3c1a16db614614f986947cbae5a7960fcdb5 100644 (file)
@@ -113,14 +113,6 @@ static void nf_skb_free(struct sk_buff *skb)
                kfree_skb(NFCT_FRAG6_CB(skb)->orig);
 }
 
-/* Memory Tracking Functions. */
-static void frag_kfree_skb(struct sk_buff *skb)
-{
-       atomic_sub(skb->truesize, &nf_init_frags.mem);
-       nf_skb_free(skb);
-       kfree_skb(skb);
-}
-
 /* Destruction primitives. */
 
 static __inline__ void fq_put(struct nf_ct_frag6_queue *fq)
@@ -282,66 +274,22 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
        }
 
 found:
-       /* We found where to put this one.  Check for overlap with
-        * preceding fragment, and, if needed, align things so that
-        * any overlaps are eliminated.
-        */
-       if (prev) {
-               int i = (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset;
-
-               if (i > 0) {
-                       offset += i;
-                       if (end <= offset) {
-                               pr_debug("overlap\n");
-                               goto err;
-                       }
-                       if (!pskb_pull(skb, i)) {
-                               pr_debug("Can't pull\n");
-                               goto err;
-                       }
-                       if (skb->ip_summed != CHECKSUM_UNNECESSARY)
-                               skb->ip_summed = CHECKSUM_NONE;
-               }
-       }
-
-       /* Look for overlap with succeeding segments.
-        * If we can merge fragments, do it.
+       /* RFC5722, Section 4:
+        *                                  When reassembling an IPv6 datagram, if
+        *   one or more its constituent fragments is determined to be an
+        *   overlapping fragment, the entire datagram (and any constituent
+        *   fragments, including those not yet received) MUST be silently
+        *   discarded.
         */
-       while (next && NFCT_FRAG6_CB(next)->offset < end) {
-               /* overlap is 'i' bytes */
-               int i = end - NFCT_FRAG6_CB(next)->offset;
-
-               if (i < next->len) {
-                       /* Eat head of the next overlapped fragment
-                        * and leave the loop. The next ones cannot overlap.
-                        */
-                       pr_debug("Eat head of the overlapped parts.: %d", i);
-                       if (!pskb_pull(next, i))
-                               goto err;
 
-                       /* next fragment */
-                       NFCT_FRAG6_CB(next)->offset += i;
-                       fq->q.meat -= i;
-                       if (next->ip_summed != CHECKSUM_UNNECESSARY)
-                               next->ip_summed = CHECKSUM_NONE;
-                       break;
-               } else {
-                       struct sk_buff *free_it = next;
-
-                       /* Old fragmnet is completely overridden with
-                        * new one drop it.
-                        */
-                       next = next->next;
+       /* Check for overlap with preceding fragment. */
+       if (prev &&
+           (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset > 0)
+               goto discard_fq;
 
-                       if (prev)
-                               prev->next = next;
-                       else
-                               fq->q.fragments = next;
-
-                       fq->q.meat -= free_it->len;
-                       frag_kfree_skb(free_it);
-               }
-       }
+       /* Look for overlap with succeeding segment. */
+       if (next && NFCT_FRAG6_CB(next)->offset < end)
+               goto discard_fq;
 
        NFCT_FRAG6_CB(skb)->offset = offset;
 
@@ -371,6 +319,8 @@ found:
        write_unlock(&nf_frags.lock);
        return 0;
 
+discard_fq:
+       fq_kill(fq);
 err:
        return -1;
 }
index 545c4141b755ee91277c6bd2912de4f42813bb05..64cfef1b0a4c556ccf63c2f912c86ec3a1599b82 100644 (file)
@@ -149,13 +149,6 @@ int ip6_frag_match(struct inet_frag_queue *q, void *a)
 }
 EXPORT_SYMBOL(ip6_frag_match);
 
-/* Memory Tracking Functions. */
-static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb)
-{
-       atomic_sub(skb->truesize, &nf->mem);
-       kfree_skb(skb);
-}
-
 void ip6_frag_init(struct inet_frag_queue *q, void *a)
 {
        struct frag_queue *fq = container_of(q, struct frag_queue, q);
@@ -346,58 +339,22 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
        }
 
 found:
-       /* We found where to put this one.  Check for overlap with
-        * preceding fragment, and, if needed, align things so that
-        * any overlaps are eliminated.
+       /* RFC5722, Section 4:
+        *                                  When reassembling an IPv6 datagram, if
+        *   one or more its constituent fragments is determined to be an
+        *   overlapping fragment, the entire datagram (and any constituent
+        *   fragments, including those not yet received) MUST be silently
+        *   discarded.
         */
-       if (prev) {
-               int i = (FRAG6_CB(prev)->offset + prev->len) - offset;
 
-               if (i > 0) {
-                       offset += i;
-                       if (end <= offset)
-                               goto err;
-                       if (!pskb_pull(skb, i))
-                               goto err;
-                       if (skb->ip_summed != CHECKSUM_UNNECESSARY)
-                               skb->ip_summed = CHECKSUM_NONE;
-               }
-       }
+       /* Check for overlap with preceding fragment. */
+       if (prev &&
+           (FRAG6_CB(prev)->offset + prev->len) - offset > 0)
+               goto discard_fq;
 
-       /* Look for overlap with succeeding segments.
-        * If we can merge fragments, do it.
-        */
-       while (next && FRAG6_CB(next)->offset < end) {
-               int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */
-
-               if (i < next->len) {
-                       /* Eat head of the next overlapped fragment
-                        * and leave the loop. The next ones cannot overlap.
-                        */
-                       if (!pskb_pull(next, i))
-                               goto err;
-                       FRAG6_CB(next)->offset += i;    /* next fragment */
-                       fq->q.meat -= i;
-                       if (next->ip_summed != CHECKSUM_UNNECESSARY)
-                               next->ip_summed = CHECKSUM_NONE;
-                       break;
-               } else {
-                       struct sk_buff *free_it = next;
-
-                       /* Old fragment is completely overridden with
-                        * new one drop it.
-                        */
-                       next = next->next;
-
-                       if (prev)
-                               prev->next = next;
-                       else
-                               fq->q.fragments = next;
-
-                       fq->q.meat -= free_it->len;
-                       frag_kfree_skb(fq->q.net, free_it);
-               }
-       }
+       /* Look for overlap with succeeding segment. */
+       if (next && FRAG6_CB(next)->offset < end)
+               goto discard_fq;
 
        FRAG6_CB(skb)->offset = offset;
 
@@ -436,6 +393,8 @@ found:
        write_unlock(&ip6_frags.lock);
        return -1;
 
+discard_fq:
+       fq_kill(fq);
 err:
        IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
                      IPSTATS_MIB_REASMFAILS);
index d126365ac0463bc075d62446562a8d9f07d548a2..a275c6e1e25c23884d7d1859e46a2ee82c00acef 100644 (file)
@@ -670,7 +670,7 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *dad
 
                        if (net_ratelimit())
                                printk(KERN_WARNING
-                                      "Neighbour table overflow.\n");
+                                      "ipv6: Neighbour table overflow.\n");
                        dst_free(&rt->dst);
                        return NULL;
                }
@@ -1556,14 +1556,13 @@ out:
  *     i.e. Path MTU discovery
  */
 
-void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
-                       struct net_device *dev, u32 pmtu)
+static void rt6_do_pmtu_disc(struct in6_addr *daddr, struct in6_addr *saddr,
+                            struct net *net, u32 pmtu, int ifindex)
 {
        struct rt6_info *rt, *nrt;
-       struct net *net = dev_net(dev);
        int allfrag = 0;
 
-       rt = rt6_lookup(net, daddr, saddr, dev->ifindex, 0);
+       rt = rt6_lookup(net, daddr, saddr, ifindex, 0);
        if (rt == NULL)
                return;
 
@@ -1631,6 +1630,27 @@ out:
        dst_release(&rt->dst);
 }
 
+void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
+                       struct net_device *dev, u32 pmtu)
+{
+       struct net *net = dev_net(dev);
+
+       /*
+        * RFC 1981 states that a node "MUST reduce the size of the packets it
+        * is sending along the path" that caused the Packet Too Big message.
+        * Since it's not possible in the general case to determine which
+        * interface was used to send the original packet, we update the MTU
+        * on the interface that will be used to send future packets. We also
+        * update the MTU on the interface that received the Packet Too Big in
+        * case the original packet was forced out that interface with
+        * SO_BINDTODEVICE or similar. This is the next best thing to the
+        * correct behaviour, which would be to update the MTU on all
+        * interfaces.
+        */
+       rt6_do_pmtu_disc(daddr, saddr, net, pmtu, 0);
+       rt6_do_pmtu_disc(daddr, saddr, net, pmtu, dev->ifindex);
+}
+
 /*
  *     Misc support functions
  */
index 1dd1affdead2d418a3d98a0f34e5d027cab88371..5acb3560ff15267021266f59c7b2102f633f3d6a 100644 (file)
@@ -111,6 +111,15 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum)
        return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr);
 }
 
+static void udp_v6_rehash(struct sock *sk)
+{
+       u16 new_hash = udp6_portaddr_hash(sock_net(sk),
+                                         &inet6_sk(sk)->rcv_saddr,
+                                         inet_sk(sk)->inet_num);
+
+       udp_lib_rehash(sk, new_hash);
+}
+
 static inline int compute_score(struct sock *sk, struct net *net,
                                unsigned short hnum,
                                struct in6_addr *saddr, __be16 sport,
@@ -1447,6 +1456,7 @@ struct proto udpv6_prot = {
        .backlog_rcv       = udpv6_queue_rcv_skb,
        .hash              = udp_lib_hash,
        .unhash            = udp_lib_unhash,
+       .rehash            = udp_v6_rehash,
        .get_port          = udp_v6_get_port,
        .memory_allocated  = &udp_memory_allocated,
        .sysctl_mem        = sysctl_udp_mem,
index f417b77fa0e15762715a9498974fbd8f5071401d..a67575d472a320f306002a88959f0d0f9a73af09 100644 (file)
 #include <net/addrconf.h>
 
 static void
-__xfrm6_init_tempsel(struct xfrm_state *x, struct flowi *fl,
-                    struct xfrm_tmpl *tmpl,
-                    xfrm_address_t *daddr, xfrm_address_t *saddr)
+__xfrm6_init_tempsel(struct xfrm_selector *sel, struct flowi *fl)
 {
        /* Initialize temporary selector matching only
         * to current session. */
-       ipv6_addr_copy((struct in6_addr *)&x->sel.daddr, &fl->fl6_dst);
-       ipv6_addr_copy((struct in6_addr *)&x->sel.saddr, &fl->fl6_src);
-       x->sel.dport = xfrm_flowi_dport(fl);
-       x->sel.dport_mask = htons(0xffff);
-       x->sel.sport = xfrm_flowi_sport(fl);
-       x->sel.sport_mask = htons(0xffff);
-       x->sel.family = AF_INET6;
-       x->sel.prefixlen_d = 128;
-       x->sel.prefixlen_s = 128;
-       x->sel.proto = fl->proto;
-       x->sel.ifindex = fl->oif;
+       ipv6_addr_copy((struct in6_addr *)&sel->daddr, &fl->fl6_dst);
+       ipv6_addr_copy((struct in6_addr *)&sel->saddr, &fl->fl6_src);
+       sel->dport = xfrm_flowi_dport(fl);
+       sel->dport_mask = htons(0xffff);
+       sel->sport = xfrm_flowi_sport(fl);
+       sel->sport_mask = htons(0xffff);
+       sel->family = AF_INET6;
+       sel->prefixlen_d = 128;
+       sel->prefixlen_s = 128;
+       sel->proto = fl->proto;
+       sel->ifindex = fl->oif;
+}
+
+static void
+xfrm6_init_temprop(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
+                  xfrm_address_t *daddr, xfrm_address_t *saddr)
+{
        x->id = tmpl->id;
        if (ipv6_addr_any((struct in6_addr*)&x->id.daddr))
                memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr));
@@ -168,6 +172,7 @@ static struct xfrm_state_afinfo xfrm6_state_afinfo = {
        .eth_proto              = htons(ETH_P_IPV6),
        .owner                  = THIS_MODULE,
        .init_tempsel           = __xfrm6_init_tempsel,
+       .init_temprop           = xfrm6_init_temprop,
        .tmpl_sort              = __xfrm6_tmpl_sort,
        .state_sort             = __xfrm6_state_sort,
        .output                 = xfrm6_output,
index a788f9e9427d2c1c127d411adc8b06eb9d19ba87..6130f9d9dbe138edffaaafed0528b1d2110db0c4 100644 (file)
@@ -1102,7 +1102,7 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len)
        memcpy(&val_len, buf+n, 2); /* To avoid alignment problems */
        le16_to_cpus(&val_len); n+=2;
 
-       if (val_len > 1016) {
+       if (val_len >= 1016) {
                IRDA_DEBUG(2, "%s(), parameter length to long\n", __func__ );
                return -RSP_INVALID_COMMAND_FORMAT;
        }
index 023ba820236f391a01595418873b7c5e0df3ece8..582612998211d24aa8d4aea919eed1ba1e994db3 100644 (file)
@@ -1024,7 +1024,8 @@ static int llc_ui_setsockopt(struct socket *sock, int level, int optname,
 {
        struct sock *sk = sock->sk;
        struct llc_sock *llc = llc_sk(sk);
-       int rc = -EINVAL, opt;
+       unsigned int opt;
+       int rc = -EINVAL;
 
        lock_sock(sk);
        if (unlikely(level != SOL_LLC || optlen != sizeof(int)))
index e4dae0244d76b677ee89ef0b35dd51ccf7807e06..cf4aea3ba30f3c822be7a2c19a8fef87cdb8aadf 100644 (file)
@@ -689,7 +689,7 @@ static void llc_station_rcv(struct sk_buff *skb)
 
 int __init llc_station_init(void)
 {
-       u16 rc = -ENOBUFS;
+       int rc = -ENOBUFS;
        struct sk_buff *skb;
        struct llc_station_state_ev *ev;
 
index c893f236acea771076b5572b42c82162c3684913..8f23401832b7729d28c9e0791a78ce8acd797728 100644 (file)
@@ -175,6 +175,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
 
        set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
 
+       del_timer_sync(&tid_tx->addba_resp_timer);
+
        /*
         * After this packets are no longer handed right through
         * to the driver but are put onto tid_tx->pending instead,
index fa0f37e4afe4901226b0ccd668ae6a88c136eeb2..28624282c5f36ad5bed8f74c0e8bf7df42ee2d6c 100644 (file)
@@ -2199,9 +2199,6 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
        struct net_device *prev_dev = NULL;
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
 
-       if (status->flag & RX_FLAG_INTERNAL_CMTR)
-               goto out_free_skb;
-
        if (skb_headroom(skb) < sizeof(*rthdr) &&
            pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
                goto out_free_skb;
@@ -2260,7 +2257,6 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
        } else
                goto out_free_skb;
 
-       status->flag |= RX_FLAG_INTERNAL_CMTR;
        return;
 
  out_free_skb:
index 10caec5ea8fa7740d9617605adf1590eefa2a730..34da67995d94ae91c8982776fd3e9104c161079f 100644 (file)
@@ -377,7 +377,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
                                skb2 = skb_clone(skb, GFP_ATOMIC);
                                if (skb2) {
                                        skb2->dev = prev_dev;
-                                       netif_receive_skb(skb2);
+                                       netif_rx(skb2);
                                }
                        }
 
@@ -386,7 +386,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
        }
        if (prev_dev) {
                skb->dev = prev_dev;
-               netif_receive_skb(skb);
+               netif_rx(skb);
                skb = NULL;
        }
        rcu_read_unlock();
index 78b505d33bfb42cdf1033be323c2fdb1a359a833..fdaec7daff1d539038ef6ee6c0d0acfeae6e7cf6 100644 (file)
@@ -27,7 +27,7 @@
 
 static DEFINE_MUTEX(afinfo_mutex);
 
-const struct nf_afinfo *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
+const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
 EXPORT_SYMBOL(nf_afinfo);
 
 int nf_register_afinfo(const struct nf_afinfo *afinfo)
index 4f8ddba480110167674fa36f6854e53f673e8d68..4c2f89df5ccecc40f1dc9200772a4e8bb3e85b95 100644 (file)
@@ -924,6 +924,7 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
 
        ip_vs_out_stats(cp, skb);
        ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp);
+       ip_vs_update_conntrack(skb, cp, 0);
        ip_vs_conn_put(cp);
 
        skb->ipvs_property = 1;
index 33b329bfc2d24e6da813a1a6a546a96ea6b3bdbe..7e9af5b76d9eb280cc7d197538e5ce6161c42e66 100644 (file)
@@ -410,7 +410,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
        union nf_inet_addr to;
        __be16 port;
        struct ip_vs_conn *n_cp;
-       struct nf_conn *ct;
 
 #ifdef CONFIG_IP_VS_IPV6
        /* This application helper doesn't work with IPv6 yet,
@@ -497,11 +496,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
                ip_vs_control_add(n_cp, cp);
        }
 
-       ct = (struct nf_conn *)skb->nfct;
-       if (ct && ct != &nf_conntrack_untracked)
-               ip_vs_expect_related(skb, ct, n_cp,
-                                    IPPROTO_TCP, &n_cp->dport, 1);
-
        /*
         *      Move tunnel to listen state
         */
index 21e1a5e9b9d3cd354d74808e44094ffc67f671da..49df6bea6a2ddaec391ce077cf9f72a72efc4a7f 100644 (file)
@@ -349,8 +349,8 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
 }
 #endif
 
-static void
-ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp)
+void
+ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin)
 {
        struct nf_conn *ct = (struct nf_conn *)skb->nfct;
        struct nf_conntrack_tuple new_tuple;
@@ -365,11 +365,17 @@ ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp)
         * real-server we will see RIP->DIP.
         */
        new_tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
-       new_tuple.src.u3 = cp->daddr;
+       if (outin)
+               new_tuple.src.u3 = cp->daddr;
+       else
+               new_tuple.dst.u3 = cp->vaddr;
        /*
         * This will also take care of UDP and other protocols.
         */
-       new_tuple.src.u.tcp.port = cp->dport;
+       if (outin)
+               new_tuple.src.u.tcp.port = cp->dport;
+       else
+               new_tuple.dst.u.tcp.port = cp->vport;
        nf_conntrack_alter_reply(ct, &new_tuple);
 }
 
@@ -428,7 +434,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
 
-       ip_vs_update_conntrack(skb, cp);
+       ip_vs_update_conntrack(skb, cp, 1);
 
        /* FIXME: when application helper enlarges the packet and the length
           is larger than the MTU of outgoing device, there will be still
@@ -506,7 +512,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
 
-       ip_vs_update_conntrack(skb, cp);
+       ip_vs_update_conntrack(skb, cp, 1);
 
        /* FIXME: when application helper enlarges the packet and the length
           is larger than the MTU of outgoing device, there will be still
index cdcc7649476b60e0e8584a7c2b2d31e7509a4bd4..5702de35e2bb327ea0e9bd346f57bf8ebf36c951 100644 (file)
 
 static DEFINE_MUTEX(nf_ct_ecache_mutex);
 
-struct nf_ct_event_notifier *nf_conntrack_event_cb __read_mostly;
+struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb __read_mostly;
 EXPORT_SYMBOL_GPL(nf_conntrack_event_cb);
 
-struct nf_exp_event_notifier *nf_expect_event_cb __read_mostly;
+struct nf_exp_event_notifier __rcu *nf_expect_event_cb __read_mostly;
 EXPORT_SYMBOL_GPL(nf_expect_event_cb);
 
 /* deliver cached events and clear cache entry - must be called with locally
index 7dcf7a404190e6aa3fa06e642f54279e2f30fba9..bd82450c193f5dbb4895f4fc7565040686fa740d 100644 (file)
@@ -16,7 +16,7 @@
 #include <linux/skbuff.h>
 #include <net/netfilter/nf_conntrack_extend.h>
 
-static struct nf_ct_ext_type *nf_ct_ext_types[NF_CT_EXT_NUM];
+static struct nf_ct_ext_type __rcu *nf_ct_ext_types[NF_CT_EXT_NUM];
 static DEFINE_MUTEX(nf_ct_ext_type_mutex);
 
 void __nf_ct_ext_destroy(struct nf_conn *ct)
@@ -48,15 +48,17 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp)
 {
        unsigned int off, len;
        struct nf_ct_ext_type *t;
+       size_t alloc_size;
 
        rcu_read_lock();
        t = rcu_dereference(nf_ct_ext_types[id]);
        BUG_ON(t == NULL);
        off = ALIGN(sizeof(struct nf_ct_ext), t->align);
        len = off + t->len;
+       alloc_size = t->alloc_size;
        rcu_read_unlock();
 
-       *ext = kzalloc(t->alloc_size, gfp);
+       *ext = kzalloc(alloc_size, gfp);
        if (!*ext)
                return NULL;
 
index 5bae1cd15eea93ee3f74cb51dab972c10c96d33c..146476c6441a9ea8894d78bc5a00c558c84a0874 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/rculist_nulls.h>
 #include <linux/types.h>
 #include <linux/timer.h>
+#include <linux/security.h>
 #include <linux/skbuff.h>
 #include <linux/errno.h>
 #include <linux/netlink.h>
@@ -245,16 +246,31 @@ nla_put_failure:
 
 #ifdef CONFIG_NF_CONNTRACK_SECMARK
 static inline int
-ctnetlink_dump_secmark(struct sk_buff *skb, const struct nf_conn *ct)
+ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
 {
-       NLA_PUT_BE32(skb, CTA_SECMARK, htonl(ct->secmark));
-       return 0;
+       struct nlattr *nest_secctx;
+       int len, ret;
+       char *secctx;
+
+       ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
+       if (ret)
+               return ret;
+
+       ret = -1;
+       nest_secctx = nla_nest_start(skb, CTA_SECCTX | NLA_F_NESTED);
+       if (!nest_secctx)
+               goto nla_put_failure;
+
+       NLA_PUT_STRING(skb, CTA_SECCTX_NAME, secctx);
+       nla_nest_end(skb, nest_secctx);
 
+       ret = 0;
 nla_put_failure:
-       return -1;
+       security_release_secctx(secctx, len);
+       return ret;
 }
 #else
-#define ctnetlink_dump_secmark(a, b) (0)
+#define ctnetlink_dump_secctx(a, b) (0)
 #endif
 
 #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
@@ -391,7 +407,7 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
            ctnetlink_dump_protoinfo(skb, ct) < 0 ||
            ctnetlink_dump_helpinfo(skb, ct) < 0 ||
            ctnetlink_dump_mark(skb, ct) < 0 ||
-           ctnetlink_dump_secmark(skb, ct) < 0 ||
+           ctnetlink_dump_secctx(skb, ct) < 0 ||
            ctnetlink_dump_id(skb, ct) < 0 ||
            ctnetlink_dump_use(skb, ct) < 0 ||
            ctnetlink_dump_master(skb, ct) < 0 ||
@@ -437,6 +453,17 @@ ctnetlink_counters_size(const struct nf_conn *ct)
               ;
 }
 
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+static int ctnetlink_nlmsg_secctx_size(const struct nf_conn *ct)
+{
+       int len;
+
+       security_secid_to_secctx(ct->secmark, NULL, &len);
+
+       return sizeof(char) * len;
+}
+#endif
+
 static inline size_t
 ctnetlink_nlmsg_size(const struct nf_conn *ct)
 {
@@ -453,7 +480,8 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
               + nla_total_size(0) /* CTA_HELP */
               + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
 #ifdef CONFIG_NF_CONNTRACK_SECMARK
-              + nla_total_size(sizeof(u_int32_t)) /* CTA_SECMARK */
+              + nla_total_size(0) /* CTA_SECCTX */
+              + nla_total_size(ctnetlink_nlmsg_secctx_size(ct)) /* CTA_SECCTX_NAME */
 #endif
 #ifdef CONFIG_NF_NAT_NEEDED
               + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
@@ -556,7 +584,7 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
 
 #ifdef CONFIG_NF_CONNTRACK_SECMARK
                if ((events & (1 << IPCT_SECMARK) || ct->secmark)
-                   && ctnetlink_dump_secmark(skb, ct) < 0)
+                   && ctnetlink_dump_secctx(skb, ct) < 0)
                        goto nla_put_failure;
 #endif
 
index 5886ba1d52a0c353a2538313c717328cacfdcac3..ed6d929580236c1b4aa77a42db959c9e522f2fc5 100644 (file)
@@ -28,8 +28,8 @@
 #include <net/netfilter/nf_conntrack_l4proto.h>
 #include <net/netfilter/nf_conntrack_core.h>
 
-static struct nf_conntrack_l4proto **nf_ct_protos[PF_MAX] __read_mostly;
-struct nf_conntrack_l3proto *nf_ct_l3protos[AF_MAX] __read_mostly;
+static struct nf_conntrack_l4proto __rcu **nf_ct_protos[PF_MAX] __read_mostly;
+struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[AF_MAX] __read_mostly;
 EXPORT_SYMBOL_GPL(nf_ct_l3protos);
 
 static DEFINE_MUTEX(nf_ct_proto_mutex);
index 53d892210a049363fa3ab04f6e17b04fa9969bbe..f64de95448669242cf23ac5d8a38987c2f8cd344 100644 (file)
@@ -1376,7 +1376,7 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
        unsigned int msglen, origlen;
        const char *dptr, *end;
        s16 diff, tdiff = 0;
-       int ret;
+       int ret = NF_ACCEPT;
        typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust;
 
        if (ctinfo != IP_CT_ESTABLISHED &&
index eb973fcd67ab4273a3cdee566a5f4a4fb44a24f2..0fb65705b44b522e3ba4de6c02d06f119f43f2d9 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/seq_file.h>
 #include <linux/percpu.h>
 #include <linux/netdevice.h>
+#include <linux/security.h>
 #include <net/net_namespace.h>
 #ifdef CONFIG_SYSCTL
 #include <linux/sysctl.h>
@@ -108,6 +109,29 @@ static void ct_seq_stop(struct seq_file *s, void *v)
        rcu_read_unlock();
 }
 
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+static int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
+{
+       int ret;
+       u32 len;
+       char *secctx;
+
+       ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
+       if (ret)
+               return ret;
+
+       ret = seq_printf(s, "secctx=%s ", secctx);
+
+       security_release_secctx(secctx, len);
+       return ret;
+}
+#else
+static inline int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
+{
+       return 0;
+}
+#endif
+
 /* return 0 on success, 1 in case of error */
 static int ct_seq_show(struct seq_file *s, void *v)
 {
@@ -168,10 +192,8 @@ static int ct_seq_show(struct seq_file *s, void *v)
                goto release;
 #endif
 
-#ifdef CONFIG_NF_CONNTRACK_SECMARK
-       if (seq_printf(s, "secmark=%u ", ct->secmark))
+       if (ct_show_secctx(s, ct))
                goto release;
-#endif
 
 #ifdef CONFIG_NF_CONNTRACK_ZONES
        if (seq_printf(s, "zone=%u ", nf_ct_zone(ct)))
index 7df37fd786bc19406a5ed8a8558df95cd26d93be..b07393eab88e2fb86a21d7556f7ce532c807a172 100644 (file)
@@ -16,7 +16,7 @@
 #define NF_LOG_PREFIXLEN               128
 #define NFLOGGER_NAME_LEN              64
 
-static const struct nf_logger *nf_loggers[NFPROTO_NUMPROTO] __read_mostly;
+static const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO] __read_mostly;
 static struct list_head nf_loggers_l[NFPROTO_NUMPROTO] __read_mostly;
 static DEFINE_MUTEX(nf_log_mutex);
 
index 78b3cf9c519ca86e66b0ba4ae88c6797a7518c3f..74aebed5bd28bb5c0c924cec7d908615b82ffdd6 100644 (file)
@@ -18,7 +18,7 @@
  * long term mutex.  The handler must provide an an outfn() to accept packets
  * for queueing and must reinject all packets it receives, no matter what.
  */
-static const struct nf_queue_handler *queue_handler[NFPROTO_NUMPROTO] __read_mostly;
+static const struct nf_queue_handler __rcu *queue_handler[NFPROTO_NUMPROTO] __read_mostly;
 
 static DEFINE_MUTEX(queue_handler_mutex);
 
index 5490fc37c92dfa5363a2992fd67fd4f145f65360..daab8c4a903ca20103c1c5d6ebe997fdb657f581 100644 (file)
@@ -70,7 +70,11 @@ nf_tproxy_destructor(struct sk_buff *skb)
 int
 nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
 {
-       if (inet_sk(sk)->transparent) {
+       bool transparent = (sk->sk_state == TCP_TIME_WAIT) ?
+                               inet_twsk(sk)->tw_transparent :
+                               inet_sk(sk)->transparent;
+
+       if (transparent) {
                skb_orphan(skb);
                skb->sk = sk;
                skb->destructor = nf_tproxy_destructor;
index 0cb6053f02fdf04723254bfe90d8ca9bff29edfc..782e51986a6f670ce6c033c1a1d99e5a26d46885 100644 (file)
@@ -9,7 +9,6 @@
 #include <linux/module.h>
 #include <linux/gfp.h>
 #include <linux/skbuff.h>
-#include <linux/selinux.h>
 #include <linux/netfilter_ipv4/ip_tables.h>
 #include <linux/netfilter_ipv6/ip6_tables.h>
 #include <linux/netfilter/x_tables.h>
index 23b2d6c486b573927dcefd00b575546b35376bfa..9faf5e050b796186b3204a02ece181726a26cb1a 100644 (file)
@@ -14,8 +14,8 @@
  */
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 #include <linux/module.h>
+#include <linux/security.h>
 #include <linux/skbuff.h>
-#include <linux/selinux.h>
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/xt_SECMARK.h>
 
@@ -39,9 +39,8 @@ secmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
 
        switch (mode) {
        case SECMARK_MODE_SEL:
-               secmark = info->u.sel.selsid;
+               secmark = info->secid;
                break;
-
        default:
                BUG();
        }
@@ -50,33 +49,33 @@ secmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
        return XT_CONTINUE;
 }
 
-static int checkentry_selinux(struct xt_secmark_target_info *info)
+static int checkentry_lsm(struct xt_secmark_target_info *info)
 {
        int err;
-       struct xt_secmark_target_selinux_info *sel = &info->u.sel;
 
-       sel->selctx[SECMARK_SELCTX_MAX - 1] = '\0';
+       info->secctx[SECMARK_SECCTX_MAX - 1] = '\0';
+       info->secid = 0;
 
-       err = selinux_string_to_sid(sel->selctx, &sel->selsid);
+       err = security_secctx_to_secid(info->secctx, strlen(info->secctx),
+                                      &info->secid);
        if (err) {
                if (err == -EINVAL)
-                       pr_info("invalid SELinux context \'%s\'\n",
-                               sel->selctx);
+                       pr_info("invalid security context \'%s\'\n", info->secctx);
                return err;
        }
 
-       if (!sel->selsid) {
-               pr_info("unable to map SELinux context \'%s\'\n", sel->selctx);
+       if (!info->secid) {
+               pr_info("unable to map security context \'%s\'\n", info->secctx);
                return -ENOENT;
        }
 
-       err = selinux_secmark_relabel_packet_permission(sel->selsid);
+       err = security_secmark_relabel_packet(info->secid);
        if (err) {
                pr_info("unable to obtain relabeling permission\n");
                return err;
        }
 
-       selinux_secmark_refcount_inc();
+       security_secmark_refcount_inc();
        return 0;
 }
 
@@ -100,16 +99,16 @@ static int secmark_tg_check(const struct xt_tgchk_param *par)
 
        switch (info->mode) {
        case SECMARK_MODE_SEL:
-               err = checkentry_selinux(info);
-               if (err <= 0)
-                       return err;
                break;
-
        default:
                pr_info("invalid mode: %hu\n", info->mode);
                return -EINVAL;
        }
 
+       err = checkentry_lsm(info);
+       if (err)
+               return err;
+
        if (!mode)
                mode = info->mode;
        return 0;
@@ -119,7 +118,7 @@ static void secmark_tg_destroy(const struct xt_tgdtor_param *par)
 {
        switch (mode) {
        case SECMARK_MODE_SEL:
-               selinux_secmark_refcount_dec();
+               security_secmark_refcount_dec();
        }
 }
 
index b2a3ae6cad78e28324e23b857dc0a5773f569786..15003021f4f0a8706e540150425b4f995dc582a6 100644 (file)
@@ -225,12 +225,13 @@ static void pipe_grant_credits(struct sock *sk)
 static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
 {
        struct pep_sock *pn = pep_sk(sk);
-       struct pnpipehdr *hdr = pnp_hdr(skb);
+       struct pnpipehdr *hdr;
        int wake = 0;
 
        if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
                return -EINVAL;
 
+       hdr = pnp_hdr(skb);
        if (hdr->data[0] != PN_PEP_TYPE_COMMON) {
                LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n",
                                (unsigned)hdr->data[0]);
index 595a952d4b17f069c60a457701d6e207f68e621b..1dfbfea12e9bc82ba8482277b4331289effa275b 100644 (file)
@@ -57,30 +57,17 @@ int rds_page_copy_user(struct page *page, unsigned long offset,
        unsigned long ret;
        void *addr;
 
-       if (to_user)
+       addr = kmap(page);
+       if (to_user) {
                rds_stats_add(s_copy_to_user, bytes);
-       else
+               ret = copy_to_user(ptr, addr + offset, bytes);
+       } else {
                rds_stats_add(s_copy_from_user, bytes);
-
-       addr = kmap_atomic(page, KM_USER0);
-       if (to_user)
-               ret = __copy_to_user_inatomic(ptr, addr + offset, bytes);
-       else
-               ret = __copy_from_user_inatomic(addr + offset, ptr, bytes);
-       kunmap_atomic(addr, KM_USER0);
-
-       if (ret) {
-               addr = kmap(page);
-               if (to_user)
-                       ret = copy_to_user(ptr, addr + offset, bytes);
-               else
-                       ret = copy_from_user(addr + offset, ptr, bytes);
-               kunmap(page);
-               if (ret)
-                       return -EFAULT;
+               ret = copy_from_user(addr + offset, ptr, bytes);
        }
+       kunmap(page);
 
-       return 0;
+       return ret ? -EFAULT : 0;
 }
 EXPORT_SYMBOL_GPL(rds_page_copy_user);
 
index c397524c039cdb28140ff5f6c0fe2359cebfa3b5..c519939e8da98fd3ae8252355a0ebb9efac4acd5 100644 (file)
@@ -43,7 +43,7 @@ void rds_tcp_state_change(struct sock *sk)
        struct rds_connection *conn;
        struct rds_tcp_connection *tc;
 
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        conn = sk->sk_user_data;
        if (conn == NULL) {
                state_change = sk->sk_state_change;
@@ -68,7 +68,7 @@ void rds_tcp_state_change(struct sock *sk)
                        break;
        }
 out:
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
        state_change(sk);
 }
 
index 975183fe6950a34b242ef55db011e6f04c1c6772..27844f231d103a4e49e542d670eaca66d01e761d 100644 (file)
@@ -114,7 +114,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
 
        rdsdebug("listen data ready sk %p\n", sk);
 
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        ready = sk->sk_user_data;
        if (ready == NULL) { /* check for teardown race */
                ready = sk->sk_data_ready;
@@ -131,7 +131,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
                queue_work(rds_wq, &rds_tcp_listen_work);
 
 out:
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
        ready(sk, bytes);
 }
 
index 1aba6878fa5dc42d4c54473350fde61d714a184c..e43797404102efcc2ed45117456e839b5425adb1 100644 (file)
@@ -324,7 +324,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
 
        rdsdebug("data ready sk %p bytes %d\n", sk, bytes);
 
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        conn = sk->sk_user_data;
        if (conn == NULL) { /* check for teardown race */
                ready = sk->sk_data_ready;
@@ -338,7 +338,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
        if (rds_tcp_read_sock(conn, GFP_ATOMIC, KM_SOFTIRQ0) == -ENOMEM)
                queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
 out:
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
        ready(sk, bytes);
 }
 
index a28b895ff0d10194730463b218e3ccb526cdea50..2f012a07d94d16d3aa01d4c78d0894e939dfd751 100644 (file)
@@ -224,7 +224,7 @@ void rds_tcp_write_space(struct sock *sk)
        struct rds_connection *conn;
        struct rds_tcp_connection *tc;
 
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        conn = sk->sk_user_data;
        if (conn == NULL) {
                write_space = sk->sk_write_space;
@@ -244,7 +244,7 @@ void rds_tcp_write_space(struct sock *sk)
                queue_delayed_work(rds_wq, &conn->c_send_w, 0);
 
 out:
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
 
        /*
         * write_space is only called when data leaves tcp's send queue if
index 8e45e76a95f51cdca332263ee7e32108f0beb95c..d952e7eac18867501a0bf7d2034c621c2fd08df7 100644 (file)
@@ -679,7 +679,7 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
                return -EINVAL;
 
-       if (addr->srose_ndigis > ROSE_MAX_DIGIS)
+       if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
                return -EINVAL;
 
        if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) {
@@ -739,7 +739,7 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
        if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
                return -EINVAL;
 
-       if (addr->srose_ndigis > ROSE_MAX_DIGIS)
+       if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
                return -EINVAL;
 
        /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
index 78ef2c5e130ba9224512dd3872d86226b36306f3..37dff78e9cb17c6fcf35e5302d57b84dd5e9f5f9 100644 (file)
@@ -123,7 +123,7 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp,
         * calls by looking at the number of nested bh disable calls because
         * softirqs always disables bh.
         */
-       if (softirq_count() != SOFTIRQ_OFFSET) {
+       if (in_serving_softirq()) {
                /* If there is an sk_classid we'll use that. */
                if (!skb->sk)
                        return -1;
index 7416a5c73b2a993550991ac66eca7cc254c6f2e6..b0c2a82178afa032ce1d09b0e9f400afb2b578f5 100644 (file)
@@ -137,7 +137,7 @@ next_knode:
                        int toff = off + key->off + (off2 & key->offmask);
                        __be32 *data, _data;
 
-                       if (skb_headroom(skb) + toff < 0)
+                       if (skb_headroom(skb) + toff > INT_MAX)
                                goto out;
 
                        data = skb_header_pointer(skb, toff, 4, &_data);
index 3406627895298324fdd9d27186ad8c9c8d9a9964..6318e1136b83de86f1b73150a0a5199522293f4f 100644 (file)
@@ -255,10 +255,6 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
                        error = -EINVAL;
                        goto err_out;
                }
-               if (!list_empty(&flow->list)) {
-                       error = -EEXIST;
-                       goto err_out;
-               }
        } else {
                int i;
                unsigned long cl;
index 86366390038a1cae0b536874fd037464f2a1678f..ddbbf7c81fa1d62adf50600b6c788d6769ff5d32 100644 (file)
@@ -543,16 +543,20 @@ struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc)
                id = ntohs(hmacs->hmac_ids[i]);
 
                /* Check the id is in the supported range */
-               if (id > SCTP_AUTH_HMAC_ID_MAX)
+               if (id > SCTP_AUTH_HMAC_ID_MAX) {
+                       id = 0;
                        continue;
+               }
 
                /* See is we support the id.  Supported IDs have name and
                 * length fields set, so that we can allocated and use
                 * them.  We can safely just check for name, for without the
                 * name, we can't allocate the TFM.
                 */
-               if (!sctp_hmac_list[id].hmac_name)
+               if (!sctp_hmac_list[id].hmac_name) {
+                       id = 0;
                        continue;
+               }
 
                break;
        }
index a646681f5acdffe30cd7b5b2f06c3bbf413609a6..bcc4590ccaf21bb988a7827614f71a39ffa31318 100644 (file)
@@ -92,7 +92,6 @@ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet,
        SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__,
                          packet, vtag);
 
-       sctp_packet_reset(packet);
        packet->vtag = vtag;
 
        if (ecn_capable && sctp_packet_empty(packet)) {
index 24b2cd55563726e8cd24be5dfbfafe8eb2892886..d344dc481ccc7a22d517c13995e825a73bed21ce 100644 (file)
@@ -1232,6 +1232,18 @@ out:
        return 0;
 }
 
+static bool list_has_sctp_addr(const struct list_head *list,
+                              union sctp_addr *ipaddr)
+{
+       struct sctp_transport *addr;
+
+       list_for_each_entry(addr, list, transports) {
+               if (sctp_cmp_addr_exact(ipaddr, &addr->ipaddr))
+                       return true;
+       }
+
+       return false;
+}
 /* A restart is occurring, check to make sure no new addresses
  * are being added as we may be under a takeover attack.
  */
@@ -1240,10 +1252,10 @@ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc,
                                       struct sctp_chunk *init,
                                       sctp_cmd_seq_t *commands)
 {
-       struct sctp_transport *new_addr, *addr;
-       int found;
+       struct sctp_transport *new_addr;
+       int ret = 1;
 
-       /* Implementor's Guide - Sectin 5.2.2
+       /* Implementor's Guide - Section 5.2.2
         * ...
         * Before responding the endpoint MUST check to see if the
         * unexpected INIT adds new addresses to the association. If new
@@ -1254,31 +1266,19 @@ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc,
        /* Search through all current addresses and make sure
         * we aren't adding any new ones.
         */
-       new_addr = NULL;
-       found = 0;
-
        list_for_each_entry(new_addr, &new_asoc->peer.transport_addr_list,
-                       transports) {
-               found = 0;
-               list_for_each_entry(addr, &asoc->peer.transport_addr_list,
-                               transports) {
-                       if (sctp_cmp_addr_exact(&new_addr->ipaddr,
-                                               &addr->ipaddr)) {
-                               found = 1;
-                               break;
-                       }
-               }
-               if (!found)
+                           transports) {
+               if (!list_has_sctp_addr(&asoc->peer.transport_addr_list,
+                                       &new_addr->ipaddr)) {
+                       sctp_sf_send_restart_abort(&new_addr->ipaddr, init,
+                                                  commands);
+                       ret = 0;
                        break;
-       }
-
-       /* If a new address was added, ABORT the sender. */
-       if (!found && new_addr) {
-               sctp_sf_send_restart_abort(&new_addr->ipaddr, init, commands);
+               }
        }
 
        /* Return success if all addresses were found. */
-       return found;
+       return ret;
 }
 
 /* Populate the verification/tie tags based on overlapping INIT
index ca44917872d2553b98f4e2f3601f95fe0f2c414f..fbb70770ad05d05807d25b5527e5616a621f58e8 100644 (file)
@@ -916,6 +916,11 @@ SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk,
        /* Walk through the addrs buffer and count the number of addresses. */
        addr_buf = kaddrs;
        while (walk_size < addrs_size) {
+               if (walk_size + sizeof(sa_family_t) > addrs_size) {
+                       kfree(kaddrs);
+                       return -EINVAL;
+               }
+
                sa_addr = (struct sockaddr *)addr_buf;
                af = sctp_get_af_specific(sa_addr->sa_family);
 
@@ -1002,9 +1007,13 @@ static int __sctp_connect(struct sock* sk,
        /* Walk through the addrs buffer and count the number of addresses. */
        addr_buf = kaddrs;
        while (walk_size < addrs_size) {
+               if (walk_size + sizeof(sa_family_t) > addrs_size) {
+                       err = -EINVAL;
+                       goto out_free;
+               }
+
                sa_addr = (union sctp_addr *)addr_buf;
                af = sctp_get_af_specific(sa_addr->sa.sa_family);
-               port = ntohs(sa_addr->v4.sin_port);
 
                /* If the address family is not supported or if this address
                 * causes the address buffer to overflow return EINVAL.
@@ -1014,6 +1023,8 @@ static int __sctp_connect(struct sock* sk,
                        goto out_free;
                }
 
+               port = ntohs(sa_addr->v4.sin_port);
+
                /* Save current address so we can work with it */
                memcpy(&to, sa_addr, af->sockaddr_len);
 
index 36cb66022a279e96869de77c3c9cebbbe9824ff4..e9eaaf7d43c18104167692f801794bdacf89b97e 100644 (file)
@@ -38,7 +38,7 @@ static const struct rpc_authops *auth_flavors[RPC_AUTH_MAXFLAVOR] = {
 static LIST_HEAD(cred_unused);
 static unsigned long number_cred_unused;
 
-#define MAX_HASHTABLE_BITS (10) 
+#define MAX_HASHTABLE_BITS (14)
 static int param_set_hashtbl_sz(const char *val, const struct kernel_param *kp)
 {
        unsigned long num;
index dcfc66bab2bb16f9872aca4adc6b258b55afe05a..12c4859828146cbdff7a0e9c38a039eaa3395426 100644 (file)
@@ -745,17 +745,18 @@ gss_pipe_release(struct inode *inode)
        struct rpc_inode *rpci = RPC_I(inode);
        struct gss_upcall_msg *gss_msg;
 
+restart:
        spin_lock(&inode->i_lock);
-       while (!list_empty(&rpci->in_downcall)) {
+       list_for_each_entry(gss_msg, &rpci->in_downcall, list) {
 
-               gss_msg = list_entry(rpci->in_downcall.next,
-                               struct gss_upcall_msg, list);
+               if (!list_empty(&gss_msg->msg.list))
+                       continue;
                gss_msg->msg.errno = -EPIPE;
                atomic_inc(&gss_msg->count);
                __gss_unhash_msg(gss_msg);
                spin_unlock(&inode->i_lock);
                gss_release_msg(gss_msg);
-               spin_lock(&inode->i_lock);
+               goto restart;
        }
        spin_unlock(&inode->i_lock);
 
index 032644610524306ea0e01383b3c4ea54888b10ab..778e5dfc5144910f83609b8bf48ca2a35011110d 100644 (file)
@@ -237,6 +237,7 @@ get_key(const void *p, const void *end,
        if (!supported_gss_krb5_enctype(alg)) {
                printk(KERN_WARNING "gss_kerberos_mech: unsupported "
                        "encryption key algorithm %d\n", alg);
+               p = ERR_PTR(-EINVAL);
                goto out_err;
        }
        p = simple_get_netobj(p, end, &key);
@@ -282,15 +283,19 @@ gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx)
        ctx->enctype = ENCTYPE_DES_CBC_RAW;
 
        ctx->gk5e = get_gss_krb5_enctype(ctx->enctype);
-       if (ctx->gk5e == NULL)
+       if (ctx->gk5e == NULL) {
+               p = ERR_PTR(-EINVAL);
                goto out_err;
+       }
 
        /* The downcall format was designed before we completely understood
         * the uses of the context fields; so it includes some stuff we
         * just give some minimal sanity-checking, and some we ignore
         * completely (like the next twenty bytes): */
-       if (unlikely(p + 20 > end || p + 20 < p))
+       if (unlikely(p + 20 > end || p + 20 < p)) {
+               p = ERR_PTR(-EFAULT);
                goto out_err;
+       }
        p += 20;
        p = simple_get_bytes(p, end, &tmp, sizeof(tmp));
        if (IS_ERR(p))
@@ -619,6 +624,7 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
        if (ctx->seq_send64 != ctx->seq_send) {
                dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__,
                        (long unsigned)ctx->seq_send64, ctx->seq_send);
+               p = ERR_PTR(-EINVAL);
                goto out_err;
        }
        p = simple_get_bytes(p, end, &ctx->enctype, sizeof(ctx->enctype));
index dc3f1f5ed8654da469bd477803eb3530ea9659b7..adade3d313f279bde98674e2e7ed21496267f9e7 100644 (file)
@@ -100,6 +100,7 @@ gss_import_sec_context_spkm3(const void *p, size_t len,
        if (version != 1) {
                dprintk("RPC:       unknown spkm3 token format: "
                                "obsolete nfs-utils?\n");
+               p = ERR_PTR(-EINVAL);
                goto out_err_free_ctx;
        }
 
@@ -135,8 +136,10 @@ gss_import_sec_context_spkm3(const void *p, size_t len,
        if (IS_ERR(p))
                goto out_err_free_intg_alg;
 
-       if (p != end)
+       if (p != end) {
+               p = ERR_PTR(-EFAULT);
                goto out_err_free_intg_key;
+       }
 
        ctx_id->internal_ctx_id = ctx;
 
index 2388d83b68ff75dc4644d1b5808ef19224cd469b..fa5549079d79ca7709d3a68bc686b77f10b08243 100644 (file)
@@ -226,7 +226,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
                        goto out_no_principal;
        }
 
-       kref_init(&clnt->cl_kref);
+       atomic_set(&clnt->cl_count, 1);
 
        err = rpc_setup_pipedir(clnt, program->pipe_dir_name);
        if (err < 0)
@@ -390,14 +390,14 @@ rpc_clone_client(struct rpc_clnt *clnt)
                if (new->cl_principal == NULL)
                        goto out_no_principal;
        }
-       kref_init(&new->cl_kref);
+       atomic_set(&new->cl_count, 1);
        err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);
        if (err != 0)
                goto out_no_path;
        if (new->cl_auth)
                atomic_inc(&new->cl_auth->au_count);
        xprt_get(clnt->cl_xprt);
-       kref_get(&clnt->cl_kref);
+       atomic_inc(&clnt->cl_count);
        rpc_register_client(new);
        rpciod_up();
        return new;
@@ -465,10 +465,8 @@ EXPORT_SYMBOL_GPL(rpc_shutdown_client);
  * Free an RPC client
  */
 static void
-rpc_free_client(struct kref *kref)
+rpc_free_client(struct rpc_clnt *clnt)
 {
-       struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref);
-
        dprintk("RPC:       destroying %s client for %s\n",
                        clnt->cl_protname, clnt->cl_server);
        if (!IS_ERR(clnt->cl_path.dentry)) {
@@ -495,12 +493,10 @@ out_free:
  * Free an RPC client
  */
 static void
-rpc_free_auth(struct kref *kref)
+rpc_free_auth(struct rpc_clnt *clnt)
 {
-       struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref);
-
        if (clnt->cl_auth == NULL) {
-               rpc_free_client(kref);
+               rpc_free_client(clnt);
                return;
        }
 
@@ -509,10 +505,11 @@ rpc_free_auth(struct kref *kref)
         *       release remaining GSS contexts. This mechanism ensures
         *       that it can do so safely.
         */
-       kref_init(kref);
+       atomic_inc(&clnt->cl_count);
        rpcauth_release(clnt->cl_auth);
        clnt->cl_auth = NULL;
-       kref_put(kref, rpc_free_client);
+       if (atomic_dec_and_test(&clnt->cl_count))
+               rpc_free_client(clnt);
 }
 
 /*
@@ -525,7 +522,8 @@ rpc_release_client(struct rpc_clnt *clnt)
 
        if (list_empty(&clnt->cl_tasks))
                wake_up(&destroy_wait);
-       kref_put(&clnt->cl_kref, rpc_free_auth);
+       if (atomic_dec_and_test(&clnt->cl_count))
+               rpc_free_auth(clnt);
 }
 
 /**
@@ -588,7 +586,7 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
        if (clnt != NULL) {
                rpc_task_release_client(task);
                task->tk_client = clnt;
-               kref_get(&clnt->cl_kref);
+               atomic_inc(&clnt->cl_count);
                if (clnt->cl_softrtry)
                        task->tk_flags |= RPC_TASK_SOFT;
                /* Add to the client's list of all tasks */
@@ -931,7 +929,7 @@ call_reserveresult(struct rpc_task *task)
        task->tk_status = 0;
        if (status >= 0) {
                if (task->tk_rqstp) {
-                       task->tk_action = call_allocate;
+                       task->tk_action = call_refresh;
                        return;
                }
 
@@ -966,13 +964,54 @@ call_reserveresult(struct rpc_task *task)
 }
 
 /*
- * 2.  Allocate the buffer. For details, see sched.c:rpc_malloc.
+ * 2.  Bind and/or refresh the credentials
+ */
+static void
+call_refresh(struct rpc_task *task)
+{
+       dprint_status(task);
+
+       task->tk_action = call_refreshresult;
+       task->tk_status = 0;
+       task->tk_client->cl_stats->rpcauthrefresh++;
+       rpcauth_refreshcred(task);
+}
+
+/*
+ * 2a. Process the results of a credential refresh
+ */
+static void
+call_refreshresult(struct rpc_task *task)
+{
+       int status = task->tk_status;
+
+       dprint_status(task);
+
+       task->tk_status = 0;
+       task->tk_action = call_allocate;
+       if (status >= 0 && rpcauth_uptodatecred(task))
+               return;
+       switch (status) {
+       case -EACCES:
+               rpc_exit(task, -EACCES);
+               return;
+       case -ENOMEM:
+               rpc_exit(task, -ENOMEM);
+               return;
+       case -ETIMEDOUT:
+               rpc_delay(task, 3*HZ);
+       }
+       task->tk_action = call_refresh;
+}
+
+/*
+ * 2b. Allocate the buffer. For details, see sched.c:rpc_malloc.
  *     (Note: buffer memory is freed in xprt_release).
  */
 static void
 call_allocate(struct rpc_task *task)
 {
-       unsigned int slack = task->tk_client->cl_auth->au_cslack;
+       unsigned int slack = task->tk_rqstp->rq_cred->cr_auth->au_cslack;
        struct rpc_rqst *req = task->tk_rqstp;
        struct rpc_xprt *xprt = task->tk_xprt;
        struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
@@ -980,7 +1019,7 @@ call_allocate(struct rpc_task *task)
        dprint_status(task);
 
        task->tk_status = 0;
-       task->tk_action = call_refresh;
+       task->tk_action = call_bind;
 
        if (req->rq_buffer)
                return;
@@ -1017,47 +1056,6 @@ call_allocate(struct rpc_task *task)
        rpc_exit(task, -ERESTARTSYS);
 }
 
-/*
- * 2a. Bind and/or refresh the credentials
- */
-static void
-call_refresh(struct rpc_task *task)
-{
-       dprint_status(task);
-
-       task->tk_action = call_refreshresult;
-       task->tk_status = 0;
-       task->tk_client->cl_stats->rpcauthrefresh++;
-       rpcauth_refreshcred(task);
-}
-
-/*
- * 2b. Process the results of a credential refresh
- */
-static void
-call_refreshresult(struct rpc_task *task)
-{
-       int status = task->tk_status;
-
-       dprint_status(task);
-
-       task->tk_status = 0;
-       task->tk_action = call_bind;
-       if (status >= 0 && rpcauth_uptodatecred(task))
-               return;
-       switch (status) {
-       case -EACCES:
-               rpc_exit(task, -EACCES);
-               return;
-       case -ENOMEM:
-               rpc_exit(task, -ENOMEM);
-               return;
-       case -ETIMEDOUT:
-               rpc_delay(task, 3*HZ);
-       }
-       task->tk_action = call_refresh;
-}
-
 static inline int
 rpc_task_need_encode(struct rpc_task *task)
 {
index 95ccbcf45d3eb64c6ee84c4767c85e7c5a9ee694..8c8eef2b8f26a205bef37706b900c6936a492ed8 100644 (file)
@@ -48,7 +48,7 @@ static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head,
                return;
        do {
                msg = list_entry(head->next, struct rpc_pipe_msg, list);
-               list_del(&msg->list);
+               list_del_init(&msg->list);
                msg->errno = err;
                destroy_msg(msg);
        } while (!list_empty(head));
@@ -208,7 +208,7 @@ rpc_pipe_release(struct inode *inode, struct file *filp)
        if (msg != NULL) {
                spin_lock(&inode->i_lock);
                msg->errno = -EAGAIN;
-               list_del(&msg->list);
+               list_del_init(&msg->list);
                spin_unlock(&inode->i_lock);
                rpci->ops->destroy_msg(msg);
        }
@@ -268,7 +268,7 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
        if (res < 0 || msg->len == msg->copied) {
                filp->private_data = NULL;
                spin_lock(&inode->i_lock);
-               list_del(&msg->list);
+               list_del_init(&msg->list);
                spin_unlock(&inode->i_lock);
                rpci->ops->destroy_msg(msg);
        }
@@ -371,21 +371,23 @@ rpc_show_info(struct seq_file *m, void *v)
 static int
 rpc_info_open(struct inode *inode, struct file *file)
 {
-       struct rpc_clnt *clnt;
+       struct rpc_clnt *clnt = NULL;
        int ret = single_open(file, rpc_show_info, NULL);
 
        if (!ret) {
                struct seq_file *m = file->private_data;
-               mutex_lock(&inode->i_mutex);
-               clnt = RPC_I(inode)->private;
-               if (clnt) {
-                       kref_get(&clnt->cl_kref);
+
+               spin_lock(&file->f_path.dentry->d_lock);
+               if (!d_unhashed(file->f_path.dentry))
+                       clnt = RPC_I(inode)->private;
+               if (clnt != NULL && atomic_inc_not_zero(&clnt->cl_count)) {
+                       spin_unlock(&file->f_path.dentry->d_lock);
                        m->private = clnt;
                } else {
+                       spin_unlock(&file->f_path.dentry->d_lock);
                        single_release(inode, file);
                        ret = -EINVAL;
                }
-               mutex_unlock(&inode->i_mutex);
        }
        return ret;
 }
index b6309db5622689deaf1c76a6acb1f6a09a02602c..fe9306bf10cc7f3bba4590ddf853c2ac39eadae9 100644 (file)
@@ -800,7 +800,7 @@ static void xs_udp_data_ready(struct sock *sk, int len)
        u32 _xid;
        __be32 *xp;
 
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        dprintk("RPC:       xs_udp_data_ready...\n");
        if (!(xprt = xprt_from_sock(sk)))
                goto out;
@@ -852,7 +852,7 @@ static void xs_udp_data_ready(struct sock *sk, int len)
  dropit:
        skb_free_datagram(sk, skb);
  out:
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
 }
 
 static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
@@ -1229,7 +1229,7 @@ static void xs_tcp_data_ready(struct sock *sk, int bytes)
 
        dprintk("RPC:       xs_tcp_data_ready...\n");
 
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        if (!(xprt = xprt_from_sock(sk)))
                goto out;
        if (xprt->shutdown)
@@ -1248,7 +1248,7 @@ static void xs_tcp_data_ready(struct sock *sk, int bytes)
                read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
        } while (read > 0);
 out:
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
 }
 
 /*
@@ -1301,7 +1301,7 @@ static void xs_tcp_state_change(struct sock *sk)
 {
        struct rpc_xprt *xprt;
 
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        if (!(xprt = xprt_from_sock(sk)))
                goto out;
        dprintk("RPC:       xs_tcp_state_change client %p...\n", xprt);
@@ -1313,7 +1313,7 @@ static void xs_tcp_state_change(struct sock *sk)
 
        switch (sk->sk_state) {
        case TCP_ESTABLISHED:
-               spin_lock_bh(&xprt->transport_lock);
+               spin_lock(&xprt->transport_lock);
                if (!xprt_test_and_set_connected(xprt)) {
                        struct sock_xprt *transport = container_of(xprt,
                                        struct sock_xprt, xprt);
@@ -1327,7 +1327,7 @@ static void xs_tcp_state_change(struct sock *sk)
 
                        xprt_wake_pending_tasks(xprt, -EAGAIN);
                }
-               spin_unlock_bh(&xprt->transport_lock);
+               spin_unlock(&xprt->transport_lock);
                break;
        case TCP_FIN_WAIT1:
                /* The client initiated a shutdown of the socket */
@@ -1365,7 +1365,7 @@ static void xs_tcp_state_change(struct sock *sk)
                xs_sock_mark_closed(xprt);
        }
  out:
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
 }
 
 /**
@@ -1376,7 +1376,7 @@ static void xs_error_report(struct sock *sk)
 {
        struct rpc_xprt *xprt;
 
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        if (!(xprt = xprt_from_sock(sk)))
                goto out;
        dprintk("RPC:       %s client %p...\n"
@@ -1384,7 +1384,7 @@ static void xs_error_report(struct sock *sk)
                        __func__, xprt, sk->sk_err);
        xprt_wake_pending_tasks(xprt, -EAGAIN);
 out:
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
 }
 
 static void xs_write_space(struct sock *sk)
@@ -1416,13 +1416,13 @@ static void xs_write_space(struct sock *sk)
  */
 static void xs_udp_write_space(struct sock *sk)
 {
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
 
        /* from net/core/sock.c:sock_def_write_space */
        if (sock_writeable(sk))
                xs_write_space(sk);
 
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
 }
 
 /**
@@ -1437,13 +1437,13 @@ static void xs_udp_write_space(struct sock *sk)
  */
 static void xs_tcp_write_space(struct sock *sk)
 {
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
 
        /* from net/core/stream.c:sk_stream_write_space */
        if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
                xs_write_space(sk);
 
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
 }
 
 static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
index 4414a18c63b49601357789fb5d6df78326819ba7..0b39b2451ea59958c0819c51faf337d000cbb88d 100644 (file)
@@ -692,6 +692,7 @@ static int unix_autobind(struct socket *sock)
        static u32 ordernum = 1;
        struct unix_address *addr;
        int err;
+       unsigned int retries = 0;
 
        mutex_lock(&u->readlock);
 
@@ -717,9 +718,17 @@ retry:
        if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
                                      addr->hash)) {
                spin_unlock(&unix_table_lock);
-               /* Sanity yield. It is unusual case, but yet... */
-               if (!(ordernum&0xFF))
-                       yield();
+               /*
+                * __unix_find_socket_byname() may take long time if many names
+                * are already in use.
+                */
+               cond_resched();
+               /* Give up if all names seems to be in use. */
+               if (retries++ == 0xFFFFF) {
+                       err = -ENOSPC;
+                       kfree(addr);
+                       goto out;
+               }
                goto retry;
        }
        addr->hash ^= sk->sk_type;
index 3feb28e41c5347b85175f57daf223918620723b2..674d426a9d24f9aab7657d1e8ecf342e3be87438 100644 (file)
@@ -152,7 +152,7 @@ static int ioctl_private_iw_point(struct iw_point *iwp, unsigned int cmd,
        } else if (!iwp->pointer)
                return -EFAULT;
 
-       extra = kmalloc(extra_size, GFP_KERNEL);
+       extra = kzalloc(extra_size, GFP_KERNEL);
        if (!extra)
                return -ENOMEM;
 
index a3cca0a94346319dec462ef7ef2fb7df2265945e..64f2ae1fdc15e2a63a28fc7073fc0e1f957cee44 100644 (file)
@@ -101,7 +101,7 @@ resume:
                        err = -EHOSTUNREACH;
                        goto error_nolock;
                }
-               skb_dst_set_noref(skb, dst);
+               skb_dst_set(skb, dst_clone(dst));
                x = dst->xfrm;
        } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL));
 
index 2b3ed7ad49338f3ec2d64caf1dd589c2aefc4264..cbab6e1a8c9c4043fcfdba5bdc17c5e9d5dea45c 100644 (file)
@@ -1175,9 +1175,8 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl,
                    tmpl->mode == XFRM_MODE_BEET) {
                        remote = &tmpl->id.daddr;
                        local = &tmpl->saddr;
-                       family = tmpl->encap_family;
-                       if (xfrm_addr_any(local, family)) {
-                               error = xfrm_get_saddr(net, &tmp, remote, family);
+                       if (xfrm_addr_any(local, tmpl->encap_family)) {
+                               error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family);
                                if (error)
                                        goto fail;
                                local = &tmp;
index 5208b12fbfb4942d4142f79ddb41ccb1420a4c93..eb96ce52f1789dd881116e76a08169189b50f02c 100644 (file)
@@ -656,15 +656,23 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
 EXPORT_SYMBOL(xfrm_sad_getinfo);
 
 static int
-xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
-                 struct xfrm_tmpl *tmpl,
-                 xfrm_address_t *daddr, xfrm_address_t *saddr,
-                 unsigned short family)
+xfrm_init_tempstate(struct xfrm_state *x, struct flowi *fl,
+                   struct xfrm_tmpl *tmpl,
+                   xfrm_address_t *daddr, xfrm_address_t *saddr,
+                   unsigned short family)
 {
        struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
        if (!afinfo)
                return -1;
-       afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
+       afinfo->init_tempsel(&x->sel, fl);
+
+       if (family != tmpl->encap_family) {
+               xfrm_state_put_afinfo(afinfo);
+               afinfo = xfrm_state_get_afinfo(tmpl->encap_family);
+               if (!afinfo)
+                       return -1;
+       }
+       afinfo->init_temprop(x, tmpl, daddr, saddr);
        xfrm_state_put_afinfo(afinfo);
        return 0;
 }
@@ -790,37 +798,38 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
        int error = 0;
        struct xfrm_state *best = NULL;
        u32 mark = pol->mark.v & pol->mark.m;
+       unsigned short encap_family = tmpl->encap_family;
 
        to_put = NULL;
 
        spin_lock_bh(&xfrm_state_lock);
-       h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, family);
+       h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
        hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
-               if (x->props.family == family &&
+               if (x->props.family == encap_family &&
                    x->props.reqid == tmpl->reqid &&
                    (mark & x->mark.m) == x->mark.v &&
                    !(x->props.flags & XFRM_STATE_WILDRECV) &&
-                   xfrm_state_addr_check(x, daddr, saddr, family) &&
+                   xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
                    tmpl->mode == x->props.mode &&
                    tmpl->id.proto == x->id.proto &&
                    (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
-                       xfrm_state_look_at(pol, x, fl, family, daddr, saddr,
+                       xfrm_state_look_at(pol, x, fl, encap_family, daddr, saddr,
                                           &best, &acquire_in_progress, &error);
        }
        if (best)
                goto found;
 
-       h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family);
+       h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
        hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) {
-               if (x->props.family == family &&
+               if (x->props.family == encap_family &&
                    x->props.reqid == tmpl->reqid &&
                    (mark & x->mark.m) == x->mark.v &&
                    !(x->props.flags & XFRM_STATE_WILDRECV) &&
-                   xfrm_state_addr_check(x, daddr, saddr, family) &&
+                   xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
                    tmpl->mode == x->props.mode &&
                    tmpl->id.proto == x->id.proto &&
                    (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
-                       xfrm_state_look_at(pol, x, fl, family, daddr, saddr,
+                       xfrm_state_look_at(pol, x, fl, encap_family, daddr, saddr,
                                           &best, &acquire_in_progress, &error);
        }
 
@@ -829,7 +838,7 @@ found:
        if (!x && !error && !acquire_in_progress) {
                if (tmpl->id.spi &&
                    (x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi,
-                                             tmpl->id.proto, family)) != NULL) {
+                                             tmpl->id.proto, encap_family)) != NULL) {
                        to_put = x0;
                        error = -EEXIST;
                        goto out;
@@ -839,9 +848,9 @@ found:
                        error = -ENOMEM;
                        goto out;
                }
-               /* Initialize temporary selector matching only
+               /* Initialize temporary state matching only
                 * to current session. */
-               xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
+               xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family);
                memcpy(&x->mark, &pol->mark, sizeof(x->mark));
 
                error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
@@ -856,10 +865,10 @@ found:
                        x->km.state = XFRM_STATE_ACQ;
                        list_add(&x->km.all, &net->xfrm.state_all);
                        hlist_add_head(&x->bydst, net->xfrm.state_bydst+h);
-                       h = xfrm_src_hash(net, daddr, saddr, family);
+                       h = xfrm_src_hash(net, daddr, saddr, encap_family);
                        hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h);
                        if (x->id.spi) {
-                               h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, family);
+                               h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
                                hlist_add_head(&x->byspi, net->xfrm.state_byspi+h);
                        }
                        x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
index ee03a4f0b64f4361af8c850b3123b8d457bec0bf..06473791c08adb7c5b0a7080ea9600d927c09d94 100644 (file)
@@ -24,6 +24,7 @@ static int __init example_init(void)
 {
        int                     i;
        unsigned int            ret;
+       unsigned int            nents;
        struct scatterlist      sg[10];
 
        printk(KERN_INFO "DMA fifo test start\n");
@@ -61,9 +62,9 @@ static int __init example_init(void)
         * byte at the beginning, after the kfifo_skip().
         */
        sg_init_table(sg, ARRAY_SIZE(sg));
-       ret = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE);
-       printk(KERN_INFO "DMA sgl entries: %d\n", ret);
-       if (!ret) {
+       nents = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE);
+       printk(KERN_INFO "DMA sgl entries: %d\n", nents);
+       if (!nents) {
                /* fifo is full and no sgl was created */
                printk(KERN_WARNING "error kfifo_dma_in_prepare\n");
                return -EIO;
@@ -71,7 +72,7 @@ static int __init example_init(void)
 
        /* receive data */
        printk(KERN_INFO "scatterlist for receive:\n");
-       for (i = 0; i < ARRAY_SIZE(sg); i++) {
+       for (i = 0; i < nents; i++) {
                printk(KERN_INFO
                "sg[%d] -> "
                "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
@@ -91,16 +92,16 @@ static int __init example_init(void)
        kfifo_dma_in_finish(&fifo, ret);
 
        /* Prepare to transmit data, example: 8 bytes */
-       ret = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8);
-       printk(KERN_INFO "DMA sgl entries: %d\n", ret);
-       if (!ret) {
+       nents = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8);
+       printk(KERN_INFO "DMA sgl entries: %d\n", nents);
+       if (!nents) {
                /* no data was available and no sgl was created */
                printk(KERN_WARNING "error kfifo_dma_out_prepare\n");
                return -EIO;
        }
 
        printk(KERN_INFO "scatterlist for transmit:\n");
-       for (i = 0; i < ARRAY_SIZE(sg); i++) {
+       for (i = 0; i < nents; i++) {
                printk(KERN_INFO
                "sg[%d] -> "
                "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
index 842dbc2d5aeda0af27ce18af9914deea370dc329..2e088109fbd5238f3e4f6d293848606d0ac95a58 100644 (file)
@@ -11,6 +11,7 @@ hostprogs-$(CONFIG_KALLSYMS)     += kallsyms
 hostprogs-$(CONFIG_LOGO)         += pnmtologo
 hostprogs-$(CONFIG_VT)           += conmakehash
 hostprogs-$(CONFIG_IKCONFIG)     += bin2c
+hostprogs-$(BUILD_C_RECORDMCOUNT) += recordmcount
 
 always         := $(hostprogs-y) $(hostprogs-m)
 
index a1a5cf95a68d73bd5179474e07b274f77eb5efaf..843bd4f4ffc931face60ece3907743ba766a1b08 100644 (file)
@@ -209,12 +209,22 @@ cmd_modversions =                                                         \
 endif
 
 ifdef CONFIG_FTRACE_MCOUNT_RECORD
+ifdef BUILD_C_RECORDMCOUNT
+# Due to recursion, we must skip empty.o.
+# The empty.o file is created in the make process in order to determine
+#  the target endianness and word size. It is made before all other C
+#  files, including recordmcount.
+cmd_record_mcount = if [ $(@) != "scripts/mod/empty.o" ]; then                 \
+                       $(objtree)/scripts/recordmcount "$(@)";                 \
+                   fi;
+else
 cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \
        "$(if $(CONFIG_CPU_BIG_ENDIAN),big,little)" \
        "$(if $(CONFIG_64BIT),64,32)" \
        "$(OBJDUMP)" "$(OBJCOPY)" "$(CC)" "$(LD)" "$(NM)" "$(RM)" "$(MV)" \
        "$(if $(part-of-module),1,0)" "$(@)";
 endif
+endif
 
 define rule_cc_o_c
        $(call echo-cmd,checksrc) $(cmd_checksrc)                         \
index 54fd1b700131e1e1fcb0ddd13d89ee3a06983ecb..7bfcf1a09ac599be43a125ddb5cabf17d3a37e65 100644 (file)
@@ -101,14 +101,6 @@ basename_flags = -D"KBUILD_BASENAME=KBUILD_STR($(call name-fix,$(basetarget)))"
 modname_flags  = $(if $(filter 1,$(words $(modname))),\
                  -D"KBUILD_MODNAME=KBUILD_STR($(call name-fix,$(modname)))")
 
-#hash values
-ifdef CONFIG_DYNAMIC_DEBUG
-debug_flags = -D"DEBUG_HASH=$(shell ./scripts/basic/hash djb2 $(@D)$(modname))"\
-              -D"DEBUG_HASH2=$(shell ./scripts/basic/hash r5 $(@D)$(modname))"
-else
-debug_flags =
-endif
-
 orig_c_flags   = $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(KBUILD_SUBDIR_CCFLAGS) \
                  $(ccflags-y) $(CFLAGS_$(basetarget).o)
 _c_flags       = $(filter-out $(CFLAGS_REMOVE_$(basetarget).o), $(orig_c_flags))
@@ -152,8 +144,7 @@ endif
 
 c_flags        = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE)     \
                 $(__c_flags) $(modkern_cflags)                           \
-                -D"KBUILD_STR(s)=\#s" $(basename_flags) $(modname_flags) \
-                 $(debug_flags)
+                -D"KBUILD_STR(s)=\#s" $(basename_flags) $(modname_flags)
 
 a_flags        = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE)     \
                 $(__a_flags) $(modkern_aflags)
index 09559951df1208e00f5a51efa1208145a87efe14..4c324a1f1e0efb8668b4d64e05b56e8e0f64f25b 100644 (file)
@@ -9,7 +9,7 @@
 # fixdep:       Used to generate dependency information during build process
 # docproc:      Used in Documentation/DocBook
 
-hostprogs-y    := fixdep docproc hash
+hostprogs-y    := fixdep docproc
 always         := $(hostprogs-y)
 
 # fixdep is needed to compile other host programs
index 79ab973fb43a42715d61092ce58d7408109e6eff..fc3b18d844af848a415c0e1a98024f5b097b9ead 100644 (file)
  *
  */
 
+#define _GNU_SOURCE
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
 #include <ctype.h>
 #include <unistd.h>
 #include <limits.h>
+#include <errno.h>
 #include <sys/types.h>
 #include <sys/wait.h>
 
@@ -54,6 +56,7 @@ typedef void FILEONLY(char * file);
 FILEONLY *internalfunctions;
 FILEONLY *externalfunctions;
 FILEONLY *symbolsonly;
+FILEONLY *findall;
 
 typedef void FILELINE(char * file, char * line);
 FILELINE * singlefunctions;
@@ -65,12 +68,30 @@ FILELINE * docsection;
 #define KERNELDOCPATH "scripts/"
 #define KERNELDOC     "kernel-doc"
 #define DOCBOOK       "-docbook"
+#define LIST          "-list"
 #define FUNCTION      "-function"
 #define NOFUNCTION    "-nofunction"
 #define NODOCSECTIONS "-no-doc-sections"
 
 static char *srctree, *kernsrctree;
 
+static char **all_list = NULL;
+static int all_list_len = 0;
+
+static void consume_symbol(const char *sym)
+{
+       int i;
+
+       for (i = 0; i < all_list_len; i++) {
+               if (!all_list[i])
+                       continue;
+               if (strcmp(sym, all_list[i]))
+                       continue;
+               all_list[i] = NULL;
+               break;
+       }
+}
+
 static void usage (void)
 {
        fprintf(stderr, "Usage: docproc {doc|depend} file\n");
@@ -248,6 +269,7 @@ static void docfunctions(char * filename, char * type)
                struct symfile * sym = &symfilelist[i];
                for (j=0; j < sym->symbolcnt; j++) {
                        vec[idx++]     = type;
+                       consume_symbol(sym->symbollist[j].name);
                        vec[idx++] = sym->symbollist[j].name;
                }
        }
@@ -287,6 +309,11 @@ static void singfunc(char * filename, char * line)
                         vec[idx++] = &line[i];
                 }
         }
+       for (i = 0; i < idx; i++) {
+               if (strcmp(vec[i], FUNCTION))
+                       continue;
+               consume_symbol(vec[i + 1]);
+       }
        vec[idx++] = filename;
        vec[idx] = NULL;
        exec_kernel_doc(vec);
@@ -306,6 +333,10 @@ static void docsect(char *filename, char *line)
                if (*s == '\n')
                        *s = '\0';
 
+       asprintf(&s, "DOC: %s", line);
+       consume_symbol(s);
+       free(s);
+
        vec[0] = KERNELDOC;
        vec[1] = DOCBOOK;
        vec[2] = FUNCTION;
@@ -315,6 +346,84 @@ static void docsect(char *filename, char *line)
        exec_kernel_doc(vec);
 }
 
+static void find_all_symbols(char *filename)
+{
+       char *vec[4]; /* kerneldoc -list file NULL */
+       pid_t pid;
+       int ret, i, count, start;
+       char real_filename[PATH_MAX + 1];
+       int pipefd[2];
+       char *data, *str;
+       size_t data_len = 0;
+
+       vec[0] = KERNELDOC;
+       vec[1] = LIST;
+       vec[2] = filename;
+       vec[3] = NULL;
+
+       if (pipe(pipefd)) {
+               perror("pipe");
+               exit(1);
+       }
+
+       switch (pid=fork()) {
+               case -1:
+                       perror("fork");
+                       exit(1);
+               case  0:
+                       close(pipefd[0]);
+                       dup2(pipefd[1], 1);
+                       memset(real_filename, 0, sizeof(real_filename));
+                       strncat(real_filename, kernsrctree, PATH_MAX);
+                       strncat(real_filename, "/" KERNELDOCPATH KERNELDOC,
+                                       PATH_MAX - strlen(real_filename));
+                       execvp(real_filename, vec);
+                       fprintf(stderr, "exec ");
+                       perror(real_filename);
+                       exit(1);
+               default:
+                       close(pipefd[1]);
+                       data = malloc(4096);
+                       do {
+                               while ((ret = read(pipefd[0],
+                                                  data + data_len,
+                                                  4096)) > 0) {
+                                       data_len += ret;
+                                       data = realloc(data, data_len + 4096);
+                               }
+                       } while (ret == -EAGAIN);
+                       if (ret != 0) {
+                               perror("read");
+                               exit(1);
+                       }
+                       waitpid(pid, &ret ,0);
+       }
+       if (WIFEXITED(ret))
+               exitstatus |= WEXITSTATUS(ret);
+       else
+               exitstatus = 0xff;
+
+       count = 0;
+       /* poor man's strtok, but with counting */
+       for (i = 0; i < data_len; i++) {
+               if (data[i] == '\n') {
+                       count++;
+                       data[i] = '\0';
+               }
+       }
+       start = all_list_len;
+       all_list_len += count;
+       all_list = realloc(all_list, sizeof(char *) * all_list_len);
+       str = data;
+       for (i = 0; i < data_len && start != all_list_len; i++) {
+               if (data[i] == '\0') {
+                       all_list[start] = str;
+                       str = data + i + 1;
+                       start++;
+               }
+       }
+}
+
 /*
  * Parse file, calling action specific functions for:
  * 1) Lines containing !E
@@ -322,7 +431,8 @@ static void docsect(char *filename, char *line)
  * 3) Lines containing !D
  * 4) Lines containing !F
  * 5) Lines containing !P
- * 6) Default lines - lines not matching the above
+ * 6) Lines containing !C
+ * 7) Default lines - lines not matching the above
  */
 static void parse_file(FILE *infile)
 {
@@ -365,6 +475,12 @@ static void parse_file(FILE *infile)
                                                s++;
                                        docsection(line + 2, s);
                                        break;
+                               case 'C':
+                                       while (*s && !isspace(*s)) s++;
+                                       *s = '\0';
+                                       if (findall)
+                                               findall(line+2);
+                                       break;
                                default:
                                        defaultline(line);
                        }
@@ -380,6 +496,7 @@ static void parse_file(FILE *infile)
 int main(int argc, char *argv[])
 {
        FILE * infile;
+       int i;
 
        srctree = getenv("SRCTREE");
        if (!srctree)
@@ -415,6 +532,7 @@ int main(int argc, char *argv[])
                symbolsonly       = find_export_symbols;
                singlefunctions   = noaction2;
                docsection        = noaction2;
+               findall           = find_all_symbols;
                parse_file(infile);
 
                /* Rewind to start from beginning of file again */
@@ -425,8 +543,16 @@ int main(int argc, char *argv[])
                symbolsonly       = printline;
                singlefunctions   = singfunc;
                docsection        = docsect;
+               findall           = NULL;
 
                parse_file(infile);
+
+               for (i = 0; i < all_list_len; i++) {
+                       if (!all_list[i])
+                               continue;
+                       fprintf(stderr, "Warning: didn't use docs for %s\n",
+                               all_list[i]);
+               }
        }
        else if (strcmp("depend", argv[1]) == 0)
        {
@@ -439,6 +565,7 @@ int main(int argc, char *argv[])
                symbolsonly       = adddep;
                singlefunctions   = adddep2;
                docsection        = adddep2;
+               findall           = adddep;
                parse_file(infile);
                printf("\n");
        }
diff --git a/scripts/basic/hash.c b/scripts/basic/hash.c
deleted file mode 100644 (file)
index 2ef5d3f..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (C) 2008 Red Hat, Inc., Jason Baron <jbaron@redhat.com>
- *
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#define DYNAMIC_DEBUG_HASH_BITS 6
-
-static const char *program;
-
-static void usage(void)
-{
-       printf("Usage: %s <djb2|r5> <modname>\n", program);
-       exit(1);
-}
-
-/* djb2 hashing algorithm by Dan Bernstein. From:
- * http://www.cse.yorku.ca/~oz/hash.html
- */
-
-static unsigned int djb2_hash(char *str)
-{
-       unsigned long hash = 5381;
-       int c;
-
-       c = *str;
-       while (c) {
-               hash = ((hash << 5) + hash) + c;
-               c = *++str;
-       }
-       return (unsigned int)(hash & ((1 << DYNAMIC_DEBUG_HASH_BITS) - 1));
-}
-
-static unsigned int r5_hash(char *str)
-{
-       unsigned long hash = 0;
-       int c;
-
-       c = *str;
-       while (c) {
-               hash = (hash + (c << 4) + (c >> 4)) * 11;
-               c = *++str;
-       }
-       return (unsigned int)(hash & ((1 << DYNAMIC_DEBUG_HASH_BITS) - 1));
-}
-
-int main(int argc, char *argv[])
-{
-       program = argv[0];
-
-       if (argc != 3)
-               usage();
-       if (!strcmp(argv[1], "djb2"))
-               printf("%d\n", djb2_hash(argv[2]));
-       else if (!strcmp(argv[1], "r5"))
-               printf("%d\n", r5_hash(argv[2]));
-       else
-               usage();
-       exit(0);
-}
-
diff --git a/scripts/gcc-goto.sh b/scripts/gcc-goto.sh
new file mode 100644 (file)
index 0000000..520d16b
--- /dev/null
@@ -0,0 +1,5 @@
+#!/bin/sh
+# Test for gcc 'asm goto' suport
+# Copyright (C) 2010, Jason Baron <jbaron@redhat.com>
+
+echo "int main(void) { entry: asm goto (\"\"::::entry); return 0; }" | $@ -x c - -c -o /dev/null >/dev/null 2>&1 && echo "y"
index 5b7c86ea43a1e3f27484ccfc83f8d2992ad548a4..7ef429cd5cb38f9bd07889aad8f5ab8f3983e27b 100644 (file)
@@ -427,7 +427,7 @@ static void check_conf(struct menu *menu)
                                if (sym->name && !sym_is_choice_value(sym)) {
                                        printf("CONFIG_%s\n", sym->name);
                                }
-                       } else {
+                       } else if (input_mode != oldnoconfig) {
                                if (!conf_cnt++)
                                        printf(_("*\n* Restart config...\n*\n"));
                                rootEntry = menu_get_parent_menu(menu);
index 6ee2e4fb148146ace18b2e5a7414e6a89d5d3e2f..170459c224a13d94be08b00b72b63c1503303ed1 100644 (file)
@@ -165,7 +165,6 @@ struct menu {
        struct symbol *sym;
        struct property *prompt;
        struct expr *dep;
-       struct expr *dir_dep;
        unsigned int flags;
        char *help;
        struct file *file;
index 4fb590247f330bb75215fe88eb749d42da8b489e..edda8b49619d9e66ff70e16dd8ef68caf4a86bc6 100644 (file)
@@ -107,7 +107,6 @@ static struct expr *menu_check_dep(struct expr *e)
 void menu_add_dep(struct expr *dep)
 {
        current_entry->dep = expr_alloc_and(current_entry->dep, menu_check_dep(dep));
-       current_entry->dir_dep = current_entry->dep;
 }
 
 void menu_set_type(int type)
@@ -291,10 +290,6 @@ void menu_finalize(struct menu *parent)
                for (menu = parent->list; menu; menu = menu->next)
                        menu_finalize(menu);
        } else if (sym) {
-               /* ignore inherited dependencies for dir_dep */
-               sym->dir_dep.expr = expr_transform(expr_copy(parent->dir_dep));
-               sym->dir_dep.expr = expr_eliminate_dups(sym->dir_dep.expr);
-
                basedep = parent->prompt ? parent->prompt->visible.expr : NULL;
                basedep = expr_trans_compare(basedep, E_UNEQUAL, &symbol_no);
                basedep = expr_eliminate_dups(expr_transform(basedep));
@@ -325,6 +320,8 @@ void menu_finalize(struct menu *parent)
                        parent->next = last_menu->next;
                        last_menu->next = NULL;
                }
+
+               sym->dir_dep.expr = parent->dep;
        }
        for (menu = parent->list; menu; menu = menu->next) {
                if (sym && sym_is_choice(sym) &&
index 943712ca6c0a6a0eb5bbf97dbba2a2d20e495b47..1f8b305449db354b103c6eb7ced09095c801dc1b 100644 (file)
@@ -350,6 +350,7 @@ void sym_calc_value(struct symbol *sym)
                                }
                        }
                calc_newval:
+#if 0
                        if (sym->dir_dep.tri == no && sym->rev_dep.tri != no) {
                                fprintf(stderr, "warning: (");
                                expr_fprint(sym->rev_dep.expr, stderr);
@@ -358,6 +359,7 @@ void sym_calc_value(struct symbol *sym)
                                expr_fprint(sym->dir_dep.expr, stderr);
                                fprintf(stderr, ")\n");
                        }
+#endif
                        newval.tri = EXPR_OR(newval.tri, sym->rev_dep.tri);
                }
                if (newval.tri == mod && sym_get_type(sym) == S_BOOLEAN)
index 102e1235fd5ced3f83f18a9a5ecd2306b31e087b..cdb6dc1f6458ba4634c0f8584ae3da615ce10696 100755 (executable)
@@ -44,12 +44,13 @@ use strict;
 # Note: This only supports 'c'.
 
 # usage:
-# kernel-doc [ -docbook | -html | -text | -man ] [ -no-doc-sections ]
+# kernel-doc [ -docbook | -html | -text | -man | -list ] [ -no-doc-sections ]
 #           [ -function funcname [ -function funcname ...] ] c file(s)s > outputfile
 # or
 #           [ -nofunction funcname [ -function funcname ...] ] c file(s)s > outputfile
 #
 #  Set output format using one of -docbook -html -text or -man.  Default is man.
+#  The -list format is for internal use by docproc.
 #
 #  -no-doc-sections
 #      Do not output DOC: sections
@@ -210,9 +211,16 @@ my %highlights_text = ( $type_constant, "\$1",
                        $type_param, "\$1" );
 my $blankline_text = "";
 
+# list mode
+my %highlights_list = ( $type_constant, "\$1",
+                       $type_func, "\$1",
+                       $type_struct, "\$1",
+                       $type_param, "\$1" );
+my $blankline_list = "";
 
 sub usage {
-    print "Usage: $0 [ -v ] [ -docbook | -html | -text | -man ] [ -no-doc-sections ]\n";
+    print "Usage: $0 [ -v ] [ -docbook | -html | -text | -man | -list ]\n";
+    print "         [ -no-doc-sections ]\n";
     print "         [ -function funcname [ -function funcname ...] ]\n";
     print "         [ -nofunction funcname [ -nofunction funcname ...] ]\n";
     print "         c source file(s) > outputfile\n";
@@ -318,6 +326,10 @@ while ($ARGV[0] =~ m/^-(.*)/) {
        $output_mode = "xml";
        %highlights = %highlights_xml;
        $blankline = $blankline_xml;
+    } elsif ($cmd eq "-list") {
+       $output_mode = "list";
+       %highlights = %highlights_list;
+       $blankline = $blankline_list;
     } elsif ($cmd eq "-gnome") {
        $output_mode = "gnome";
        %highlights = %highlights_gnome;
@@ -1361,6 +1373,42 @@ sub output_blockhead_text(%) {
     }
 }
 
+## list mode output functions
+
+sub output_function_list(%) {
+    my %args = %{$_[0]};
+
+    print $args{'function'} . "\n";
+}
+
+# output enum in list
+sub output_enum_list(%) {
+    my %args = %{$_[0]};
+    print $args{'enum'} . "\n";
+}
+
+# output typedef in list
+sub output_typedef_list(%) {
+    my %args = %{$_[0]};
+    print $args{'typedef'} . "\n";
+}
+
+# output struct as list
+sub output_struct_list(%) {
+    my %args = %{$_[0]};
+
+    print $args{'struct'} . "\n";
+}
+
+sub output_blockhead_list(%) {
+    my %args = %{$_[0]};
+    my ($parameter, $section);
+
+    foreach $section (@{$args{'sectionlist'}}) {
+       print "DOC: $section\n";
+    }
+}
+
 ##
 # generic output function for all types (function, struct/union, typedef, enum);
 # calls the generated, variable output_ function name based on
@@ -1679,7 +1727,7 @@ sub check_sections($$$$$$) {
                foreach $px (0 .. $#prms) {
                        $prm_clean = $prms[$px];
                        $prm_clean =~ s/\[.*\]//;
-                       $prm_clean =~ s/__attribute__\s*\(\([a-z,_\*\s\(\)]*\)\)//;
+                       $prm_clean =~ s/__attribute__\s*\(\([a-z,_\*\s\(\)]*\)\)//i;
                        # ignore array size in a parameter string;
                        # however, the original param string may contain
                        # spaces, e.g.:  addr[6 + 2]
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
new file mode 100644 (file)
index 0000000..26e1271
--- /dev/null
@@ -0,0 +1,363 @@
+/*
+ * recordmcount.c: construct a table of the locations of calls to 'mcount'
+ * so that ftrace can find them quickly.
+ * Copyright 2009 John F. Reiser <jreiser@BitWagon.com>.  All rights reserved.
+ * Licensed under the GNU General Public License, version 2 (GPLv2).
+ *
+ * Restructured to fit Linux format, as well as other updates:
+ *  Copyright 2010 Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
+ */
+
+/*
+ * Strategy: alter the .o file in-place.
+ *
+ * Append a new STRTAB that has the new section names, followed by a new array
+ * ElfXX_Shdr[] that has the new section headers, followed by the section
+ * contents for __mcount_loc and its relocations.  The old shstrtab strings,
+ * and the old ElfXX_Shdr[] array, remain as "garbage" (commonly, a couple
+ * kilobytes.)  Subsequent processing by /bin/ld (or the kernel module loader)
+ * will ignore the garbage regions, because they are not designated by the
+ * new .e_shoff nor the new ElfXX_Shdr[].  [In order to remove the garbage,
+ * then use "ld -r" to create a new file that omits the garbage.]
+ */
+
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <elf.h>
+#include <fcntl.h>
+#include <setjmp.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+static int fd_map;     /* File descriptor for file being modified. */
+static int mmap_failed; /* Boolean flag. */
+static void *ehdr_curr; /* current ElfXX_Ehdr *  for resource cleanup */
+static char gpfx;      /* prefix for global symbol name (sometimes '_') */
+static struct stat sb; /* Remember .st_size, etc. */
+static jmp_buf jmpenv; /* setjmp/longjmp per-file error escape */
+
+/* setjmp() return values */
+enum {
+       SJ_SETJMP = 0,  /* hardwired first return */
+       SJ_FAIL,
+       SJ_SUCCEED
+};
+
+/* Per-file resource cleanup when multiple files. */
+static void
+cleanup(void)
+{
+       if (!mmap_failed)
+               munmap(ehdr_curr, sb.st_size);
+       else
+               free(ehdr_curr);
+       close(fd_map);
+}
+
+static void __attribute__((noreturn))
+fail_file(void)
+{
+       cleanup();
+       longjmp(jmpenv, SJ_FAIL);
+}
+
+static void __attribute__((noreturn))
+succeed_file(void)
+{
+       cleanup();
+       longjmp(jmpenv, SJ_SUCCEED);
+}
+
+/* ulseek, uread, ...:  Check return value for errors. */
+
+static off_t
+ulseek(int const fd, off_t const offset, int const whence)
+{
+       off_t const w = lseek(fd, offset, whence);
+       if ((off_t)-1 == w) {
+               perror("lseek");
+               fail_file();
+       }
+       return w;
+}
+
+static size_t
+uread(int const fd, void *const buf, size_t const count)
+{
+       size_t const n = read(fd, buf, count);
+       if (n != count) {
+               perror("read");
+               fail_file();
+       }
+       return n;
+}
+
+static size_t
+uwrite(int const fd, void const *const buf, size_t const count)
+{
+       size_t const n = write(fd, buf, count);
+       if (n != count) {
+               perror("write");
+               fail_file();
+       }
+       return n;
+}
+
+static void *
+umalloc(size_t size)
+{
+       void *const addr = malloc(size);
+       if (0 == addr) {
+               fprintf(stderr, "malloc failed: %zu bytes\n", size);
+               fail_file();
+       }
+       return addr;
+}
+
+/*
+ * Get the whole file as a programming convenience in order to avoid
+ * malloc+lseek+read+free of many pieces.  If successful, then mmap
+ * avoids copying unused pieces; else just read the whole file.
+ * Open for both read and write; new info will be appended to the file.
+ * Use MAP_PRIVATE so that a few changes to the in-memory ElfXX_Ehdr
+ * do not propagate to the file until an explicit overwrite at the last.
+ * This preserves most aspects of consistency (all except .st_size)
+ * for simultaneous readers of the file while we are appending to it.
+ * However, multiple writers still are bad.  We choose not to use
+ * locking because it is expensive and the use case of kernel build
+ * makes multiple writers unlikely.
+ */
+static void *mmap_file(char const *fname)
+{
+       void *addr;
+
+       fd_map = open(fname, O_RDWR);
+       if (0 > fd_map || 0 > fstat(fd_map, &sb)) {
+               perror(fname);
+               fail_file();
+       }
+       if (!S_ISREG(sb.st_mode)) {
+               fprintf(stderr, "not a regular file: %s\n", fname);
+               fail_file();
+       }
+       addr = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_PRIVATE,
+                   fd_map, 0);
+       mmap_failed = 0;
+       if (MAP_FAILED == addr) {
+               mmap_failed = 1;
+               addr = umalloc(sb.st_size);
+               uread(fd_map, addr, sb.st_size);
+       }
+       return addr;
+}
+
+/* w8rev, w8nat, ...: Handle endianness. */
+
+static uint64_t w8rev(uint64_t const x)
+{
+       return   ((0xff & (x >> (0 * 8))) << (7 * 8))
+              | ((0xff & (x >> (1 * 8))) << (6 * 8))
+              | ((0xff & (x >> (2 * 8))) << (5 * 8))
+              | ((0xff & (x >> (3 * 8))) << (4 * 8))
+              | ((0xff & (x >> (4 * 8))) << (3 * 8))
+              | ((0xff & (x >> (5 * 8))) << (2 * 8))
+              | ((0xff & (x >> (6 * 8))) << (1 * 8))
+              | ((0xff & (x >> (7 * 8))) << (0 * 8));
+}
+
+static uint32_t w4rev(uint32_t const x)
+{
+       return   ((0xff & (x >> (0 * 8))) << (3 * 8))
+              | ((0xff & (x >> (1 * 8))) << (2 * 8))
+              | ((0xff & (x >> (2 * 8))) << (1 * 8))
+              | ((0xff & (x >> (3 * 8))) << (0 * 8));
+}
+
+static uint32_t w2rev(uint16_t const x)
+{
+       return   ((0xff & (x >> (0 * 8))) << (1 * 8))
+              | ((0xff & (x >> (1 * 8))) << (0 * 8));
+}
+
+static uint64_t w8nat(uint64_t const x)
+{
+       return x;
+}
+
+static uint32_t w4nat(uint32_t const x)
+{
+       return x;
+}
+
+static uint32_t w2nat(uint16_t const x)
+{
+       return x;
+}
+
+static uint64_t (*w8)(uint64_t);
+static uint32_t (*w)(uint32_t);
+static uint32_t (*w2)(uint16_t);
+
+/* Names of the sections that could contain calls to mcount. */
+static int
+is_mcounted_section_name(char const *const txtname)
+{
+       return 0 == strcmp(".text",          txtname) ||
+               0 == strcmp(".sched.text",    txtname) ||
+               0 == strcmp(".spinlock.text", txtname) ||
+               0 == strcmp(".irqentry.text", txtname) ||
+               0 == strcmp(".text.unlikely", txtname);
+}
+
+/* 32 bit and 64 bit are very similar */
+#include "recordmcount.h"
+#define RECORD_MCOUNT_64
+#include "recordmcount.h"
+
+static void
+do_file(char const *const fname)
+{
+       Elf32_Ehdr *const ehdr = mmap_file(fname);
+       unsigned int reltype = 0;
+
+       ehdr_curr = ehdr;
+       w = w4nat;
+       w2 = w2nat;
+       w8 = w8nat;
+       switch (ehdr->e_ident[EI_DATA]) {
+               static unsigned int const endian = 1;
+       default: {
+               fprintf(stderr, "unrecognized ELF data encoding %d: %s\n",
+                       ehdr->e_ident[EI_DATA], fname);
+               fail_file();
+       } break;
+       case ELFDATA2LSB: {
+               if (1 != *(unsigned char const *)&endian) {
+                       /* main() is big endian, file.o is little endian. */
+                       w = w4rev;
+                       w2 = w2rev;
+                       w8 = w8rev;
+               }
+       } break;
+       case ELFDATA2MSB: {
+               if (0 != *(unsigned char const *)&endian) {
+                       /* main() is little endian, file.o is big endian. */
+                       w = w4rev;
+                       w2 = w2rev;
+                       w8 = w8rev;
+               }
+       } break;
+       }  /* end switch */
+       if (0 != memcmp(ELFMAG, ehdr->e_ident, SELFMAG)
+       ||  ET_REL != w2(ehdr->e_type)
+       ||  EV_CURRENT != ehdr->e_ident[EI_VERSION]) {
+               fprintf(stderr, "unrecognized ET_REL file %s\n", fname);
+               fail_file();
+       }
+
+       gpfx = 0;
+       switch (w2(ehdr->e_machine)) {
+       default: {
+               fprintf(stderr, "unrecognized e_machine %d %s\n",
+                       w2(ehdr->e_machine), fname);
+               fail_file();
+       } break;
+       case EM_386:     reltype = R_386_32;                   break;
+       case EM_ARM:     reltype = R_ARM_ABS32;                break;
+       case EM_IA_64:   reltype = R_IA64_IMM64;   gpfx = '_'; break;
+       case EM_PPC:     reltype = R_PPC_ADDR32;   gpfx = '_'; break;
+       case EM_PPC64:   reltype = R_PPC64_ADDR64; gpfx = '_'; break;
+       case EM_S390:    /* reltype: e_class    */ gpfx = '_'; break;
+       case EM_SH:      reltype = R_SH_DIR32;                 break;
+       case EM_SPARCV9: reltype = R_SPARC_64;     gpfx = '_'; break;
+       case EM_X86_64:  reltype = R_X86_64_64;                break;
+       }  /* end switch */
+
+       switch (ehdr->e_ident[EI_CLASS]) {
+       default: {
+               fprintf(stderr, "unrecognized ELF class %d %s\n",
+                       ehdr->e_ident[EI_CLASS], fname);
+               fail_file();
+       } break;
+       case ELFCLASS32: {
+               if (sizeof(Elf32_Ehdr) != w2(ehdr->e_ehsize)
+               ||  sizeof(Elf32_Shdr) != w2(ehdr->e_shentsize)) {
+                       fprintf(stderr,
+                               "unrecognized ET_REL file: %s\n", fname);
+                       fail_file();
+               }
+               if (EM_S390 == w2(ehdr->e_machine))
+                       reltype = R_390_32;
+               do32(ehdr, fname, reltype);
+       } break;
+       case ELFCLASS64: {
+               Elf64_Ehdr *const ghdr = (Elf64_Ehdr *)ehdr;
+               if (sizeof(Elf64_Ehdr) != w2(ghdr->e_ehsize)
+               ||  sizeof(Elf64_Shdr) != w2(ghdr->e_shentsize)) {
+                       fprintf(stderr,
+                               "unrecognized ET_REL file: %s\n", fname);
+                       fail_file();
+               }
+               if (EM_S390 == w2(ghdr->e_machine))
+                       reltype = R_390_64;
+               do64(ghdr, fname, reltype);
+       } break;
+       }  /* end switch */
+
+       cleanup();
+}
+
+int
+main(int argc, char const *argv[])
+{
+       const char ftrace[] = "kernel/trace/ftrace.o";
+       int ftrace_size = sizeof(ftrace) - 1;
+       int n_error = 0;  /* gcc-4.3.0 false positive complaint */
+
+       if (argc <= 1) {
+               fprintf(stderr, "usage: recordmcount file.o...\n");
+               return 0;
+       }
+
+       /* Process each file in turn, allowing deep failure. */
+       for (--argc, ++argv; 0 < argc; --argc, ++argv) {
+               int const sjval = setjmp(jmpenv);
+               int len;
+
+               /*
+                * The file kernel/trace/ftrace.o references the mcount
+                * function but does not call it. Since ftrace.o should
+                * not be traced anyway, we just skip it.
+                */
+               len = strlen(argv[0]);
+               if (len >= ftrace_size &&
+                   strcmp(argv[0] + (len - ftrace_size), ftrace) == 0)
+                       continue;
+
+               switch (sjval) {
+               default: {
+                       fprintf(stderr, "internal error: %s\n", argv[0]);
+                       exit(1);
+               } break;
+               case SJ_SETJMP: {  /* normal sequence */
+                       /* Avoid problems if early cleanup() */
+                       fd_map = -1;
+                       ehdr_curr = NULL;
+                       mmap_failed = 1;
+                       do_file(argv[0]);
+               } break;
+               case SJ_FAIL: {  /* error in do_file or below */
+                       ++n_error;
+               } break;
+               case SJ_SUCCEED: {  /* premature success */
+                       /* do nothing */
+               } break;
+               }  /* end switch */
+       }
+       return !!n_error;
+}
+
+
diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
new file mode 100644 (file)
index 0000000..7f39d09
--- /dev/null
@@ -0,0 +1,366 @@
+/*
+ * recordmcount.h
+ *
+ * This code was taken out of recordmcount.c written by
+ * Copyright 2009 John F. Reiser <jreiser@BitWagon.com>.  All rights reserved.
+ *
+ * The original code had the same algorithms for both 32bit
+ * and 64bit ELF files, but the code was duplicated to support
+ * the difference in structures that were used. This
+ * file creates a macro of everything that is different between
+ * the 64 and 32 bit code, such that by including this header
+ * twice we can create both sets of functions by including this
+ * header once with RECORD_MCOUNT_64 undefined, and again with
+ * it defined.
+ *
+ * This conversion to macros was done by:
+ * Copyright 2010 Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
+ *
+ * Licensed under the GNU General Public License, version 2 (GPLv2).
+ */
+#undef append_func
+#undef sift_rel_mcount
+#undef find_secsym_ndx
+#undef __has_rel_mcount
+#undef has_rel_mcount
+#undef tot_relsize
+#undef do_func
+#undef Elf_Ehdr
+#undef Elf_Shdr
+#undef Elf_Rel
+#undef Elf_Rela
+#undef Elf_Sym
+#undef ELF_R_SYM
+#undef ELF_R_INFO
+#undef ELF_ST_BIND
+#undef uint_t
+#undef _w
+#undef _align
+#undef _size
+
+#ifdef RECORD_MCOUNT_64
+# define append_func           append64
+# define sift_rel_mcount       sift64_rel_mcount
+# define find_secsym_ndx       find64_secsym_ndx
+# define __has_rel_mcount      __has64_rel_mcount
+# define has_rel_mcount                has64_rel_mcount
+# define tot_relsize           tot64_relsize
+# define do_func               do64
+# define Elf_Ehdr              Elf64_Ehdr
+# define Elf_Shdr              Elf64_Shdr
+# define Elf_Rel               Elf64_Rel
+# define Elf_Rela              Elf64_Rela
+# define Elf_Sym               Elf64_Sym
+# define ELF_R_SYM             ELF64_R_SYM
+# define ELF_R_INFO            ELF64_R_INFO
+# define ELF_ST_BIND           ELF64_ST_BIND
+# define uint_t                        uint64_t
+# define _w                    w8
+# define _align                        7u
+# define _size                 8
+#else
+# define append_func           append32
+# define sift_rel_mcount       sift32_rel_mcount
+# define find_secsym_ndx       find32_secsym_ndx
+# define __has_rel_mcount      __has32_rel_mcount
+# define has_rel_mcount                has32_rel_mcount
+# define tot_relsize           tot32_relsize
+# define do_func               do32
+# define Elf_Ehdr              Elf32_Ehdr
+# define Elf_Shdr              Elf32_Shdr
+# define Elf_Rel               Elf32_Rel
+# define Elf_Rela              Elf32_Rela
+# define Elf_Sym               Elf32_Sym
+# define ELF_R_SYM             ELF32_R_SYM
+# define ELF_R_INFO            ELF32_R_INFO
+# define ELF_ST_BIND           ELF32_ST_BIND
+# define uint_t                        uint32_t
+# define _w                    w
+# define _align                        3u
+# define _size                 4
+#endif
+
+/* Append the new shstrtab, Elf_Shdr[], __mcount_loc and its relocations. */
+static void append_func(Elf_Ehdr *const ehdr,
+                       Elf_Shdr *const shstr,
+                       uint_t const *const mloc0,
+                       uint_t const *const mlocp,
+                       Elf_Rel const *const mrel0,
+                       Elf_Rel const *const mrelp,
+                       unsigned int const rel_entsize,
+                       unsigned int const symsec_sh_link)
+{
+       /* Begin constructing output file */
+       Elf_Shdr mcsec;
+       char const *mc_name = (sizeof(Elf_Rela) == rel_entsize)
+               ? ".rela__mcount_loc"
+               :  ".rel__mcount_loc";
+       unsigned const old_shnum = w2(ehdr->e_shnum);
+       uint_t const old_shoff = _w(ehdr->e_shoff);
+       uint_t const old_shstr_sh_size   = _w(shstr->sh_size);
+       uint_t const old_shstr_sh_offset = _w(shstr->sh_offset);
+       uint_t t = 1 + strlen(mc_name) + _w(shstr->sh_size);
+       uint_t new_e_shoff;
+
+       shstr->sh_size = _w(t);
+       shstr->sh_offset = _w(sb.st_size);
+       t += sb.st_size;
+       t += (_align & -t);  /* word-byte align */
+       new_e_shoff = t;
+
+       /* body for new shstrtab */
+       ulseek(fd_map, sb.st_size, SEEK_SET);
+       uwrite(fd_map, old_shstr_sh_offset + (void *)ehdr, old_shstr_sh_size);
+       uwrite(fd_map, mc_name, 1 + strlen(mc_name));
+
+       /* old(modified) Elf_Shdr table, word-byte aligned */
+       ulseek(fd_map, t, SEEK_SET);
+       t += sizeof(Elf_Shdr) * old_shnum;
+       uwrite(fd_map, old_shoff + (void *)ehdr,
+              sizeof(Elf_Shdr) * old_shnum);
+
+       /* new sections __mcount_loc and .rel__mcount_loc */
+       t += 2*sizeof(mcsec);
+       mcsec.sh_name = w((sizeof(Elf_Rela) == rel_entsize) + strlen(".rel")
+               + old_shstr_sh_size);
+       mcsec.sh_type = w(SHT_PROGBITS);
+       mcsec.sh_flags = _w(SHF_ALLOC);
+       mcsec.sh_addr = 0;
+       mcsec.sh_offset = _w(t);
+       mcsec.sh_size = _w((void *)mlocp - (void *)mloc0);
+       mcsec.sh_link = 0;
+       mcsec.sh_info = 0;
+       mcsec.sh_addralign = _w(_size);
+       mcsec.sh_entsize = _w(_size);
+       uwrite(fd_map, &mcsec, sizeof(mcsec));
+
+       mcsec.sh_name = w(old_shstr_sh_size);
+       mcsec.sh_type = (sizeof(Elf_Rela) == rel_entsize)
+               ? w(SHT_RELA)
+               : w(SHT_REL);
+       mcsec.sh_flags = 0;
+       mcsec.sh_addr = 0;
+       mcsec.sh_offset = _w((void *)mlocp - (void *)mloc0 + t);
+       mcsec.sh_size   = _w((void *)mrelp - (void *)mrel0);
+       mcsec.sh_link = w(symsec_sh_link);
+       mcsec.sh_info = w(old_shnum);
+       mcsec.sh_addralign = _w(_size);
+       mcsec.sh_entsize = _w(rel_entsize);
+       uwrite(fd_map, &mcsec, sizeof(mcsec));
+
+       uwrite(fd_map, mloc0, (void *)mlocp - (void *)mloc0);
+       uwrite(fd_map, mrel0, (void *)mrelp - (void *)mrel0);
+
+       ehdr->e_shoff = _w(new_e_shoff);
+       ehdr->e_shnum = w2(2 + w2(ehdr->e_shnum));  /* {.rel,}__mcount_loc */
+       ulseek(fd_map, 0, SEEK_SET);
+       uwrite(fd_map, ehdr, sizeof(*ehdr));
+}
+
+
+/*
+ * Look at the relocations in order to find the calls to mcount.
+ * Accumulate the section offsets that are found, and their relocation info,
+ * onto the end of the existing arrays.
+ */
+static uint_t *sift_rel_mcount(uint_t *mlocp,
+                              unsigned const offbase,
+                              Elf_Rel **const mrelpp,
+                              Elf_Shdr const *const relhdr,
+                              Elf_Ehdr const *const ehdr,
+                              unsigned const recsym,
+                              uint_t const recval,
+                              unsigned const reltype)
+{
+       uint_t *const mloc0 = mlocp;
+       Elf_Rel *mrelp = *mrelpp;
+       Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff)
+               + (void *)ehdr);
+       unsigned const symsec_sh_link = w(relhdr->sh_link);
+       Elf_Shdr const *const symsec = &shdr0[symsec_sh_link];
+       Elf_Sym const *const sym0 = (Elf_Sym const *)(_w(symsec->sh_offset)
+               + (void *)ehdr);
+
+       Elf_Shdr const *const strsec = &shdr0[w(symsec->sh_link)];
+       char const *const str0 = (char const *)(_w(strsec->sh_offset)
+               + (void *)ehdr);
+
+       Elf_Rel const *const rel0 = (Elf_Rel const *)(_w(relhdr->sh_offset)
+               + (void *)ehdr);
+       unsigned rel_entsize = _w(relhdr->sh_entsize);
+       unsigned const nrel = _w(relhdr->sh_size) / rel_entsize;
+       Elf_Rel const *relp = rel0;
+
+       unsigned mcountsym = 0;
+       unsigned t;
+
+       for (t = nrel; t; --t) {
+               if (!mcountsym) {
+                       Elf_Sym const *const symp =
+                               &sym0[ELF_R_SYM(_w(relp->r_info))];
+                       char const *symname = &str0[w(symp->st_name)];
+
+                       if ('.' == symname[0])
+                               ++symname;  /* ppc64 hack */
+                       if (0 == strcmp((('_' == gpfx) ? "_mcount" : "mcount"),
+                                       symname))
+                               mcountsym = ELF_R_SYM(_w(relp->r_info));
+               }
+
+               if (mcountsym == ELF_R_SYM(_w(relp->r_info))) {
+                       uint_t const addend = _w(_w(relp->r_offset) - recval);
+
+                       mrelp->r_offset = _w(offbase
+                               + ((void *)mlocp - (void *)mloc0));
+                       mrelp->r_info = _w(ELF_R_INFO(recsym, reltype));
+                       if (sizeof(Elf_Rela) == rel_entsize) {
+                               ((Elf_Rela *)mrelp)->r_addend = addend;
+                               *mlocp++ = 0;
+                       } else
+                               *mlocp++ = addend;
+
+                       mrelp = (Elf_Rel *)(rel_entsize + (void *)mrelp);
+               }
+               relp = (Elf_Rel const *)(rel_entsize + (void *)relp);
+       }
+       *mrelpp = mrelp;
+       return mlocp;
+}
+
+
+/*
+ * Find a symbol in the given section, to be used as the base for relocating
+ * the table of offsets of calls to mcount.  A local or global symbol suffices,
+ * but avoid a Weak symbol because it may be overridden; the change in value
+ * would invalidate the relocations of the offsets of the calls to mcount.
+ * Often the found symbol will be the unnamed local symbol generated by
+ * GNU 'as' for the start of each section.  For example:
+ *    Num:    Value  Size Type    Bind   Vis      Ndx Name
+ *      2: 00000000     0 SECTION LOCAL  DEFAULT    1
+ */
+static unsigned find_secsym_ndx(unsigned const txtndx,
+                               char const *const txtname,
+                               uint_t *const recvalp,
+                               Elf_Shdr const *const symhdr,
+                               Elf_Ehdr const *const ehdr)
+{
+       Elf_Sym const *const sym0 = (Elf_Sym const *)(_w(symhdr->sh_offset)
+               + (void *)ehdr);
+       unsigned const nsym = _w(symhdr->sh_size) / _w(symhdr->sh_entsize);
+       Elf_Sym const *symp;
+       unsigned t;
+
+       for (symp = sym0, t = nsym; t; --t, ++symp) {
+               unsigned int const st_bind = ELF_ST_BIND(symp->st_info);
+
+               if (txtndx == w2(symp->st_shndx)
+                       /* avoid STB_WEAK */
+                   && (STB_LOCAL == st_bind || STB_GLOBAL == st_bind)) {
+                       *recvalp = _w(symp->st_value);
+                       return symp - sym0;
+               }
+       }
+       fprintf(stderr, "Cannot find symbol for section %d: %s.\n",
+               txtndx, txtname);
+       fail_file();
+}
+
+
+/* Evade ISO C restriction: no declaration after statement in has_rel_mcount. */
+static char const *
+__has_rel_mcount(Elf_Shdr const *const relhdr,  /* is SHT_REL or SHT_RELA */
+                Elf_Shdr const *const shdr0,
+                char const *const shstrtab,
+                char const *const fname)
+{
+       /* .sh_info depends on .sh_type == SHT_REL[,A] */
+       Elf_Shdr const *const txthdr = &shdr0[w(relhdr->sh_info)];
+       char const *const txtname = &shstrtab[w(txthdr->sh_name)];
+
+       if (0 == strcmp("__mcount_loc", txtname)) {
+               fprintf(stderr, "warning: __mcount_loc already exists: %s\n",
+                       fname);
+               succeed_file();
+       }
+       if (SHT_PROGBITS != w(txthdr->sh_type) ||
+           !is_mcounted_section_name(txtname))
+               return NULL;
+       return txtname;
+}
+
+static char const *has_rel_mcount(Elf_Shdr const *const relhdr,
+                                 Elf_Shdr const *const shdr0,
+                                 char const *const shstrtab,
+                                 char const *const fname)
+{
+       if (SHT_REL  != w(relhdr->sh_type) && SHT_RELA != w(relhdr->sh_type))
+               return NULL;
+       return __has_rel_mcount(relhdr, shdr0, shstrtab, fname);
+}
+
+
+static unsigned tot_relsize(Elf_Shdr const *const shdr0,
+                           unsigned nhdr,
+                           const char *const shstrtab,
+                           const char *const fname)
+{
+       unsigned totrelsz = 0;
+       Elf_Shdr const *shdrp = shdr0;
+
+       for (; nhdr; --nhdr, ++shdrp) {
+               if (has_rel_mcount(shdrp, shdr0, shstrtab, fname))
+                       totrelsz += _w(shdrp->sh_size);
+       }
+       return totrelsz;
+}
+
+
+/* Overall supervision for Elf32 ET_REL file. */
+static void
+do_func(Elf_Ehdr *const ehdr, char const *const fname, unsigned const reltype)
+{
+       Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff)
+               + (void *)ehdr);
+       unsigned const nhdr = w2(ehdr->e_shnum);
+       Elf_Shdr *const shstr = &shdr0[w2(ehdr->e_shstrndx)];
+       char const *const shstrtab = (char const *)(_w(shstr->sh_offset)
+               + (void *)ehdr);
+
+       Elf_Shdr const *relhdr;
+       unsigned k;
+
+       /* Upper bound on space: assume all relevant relocs are for mcount. */
+       unsigned const totrelsz = tot_relsize(shdr0, nhdr, shstrtab, fname);
+       Elf_Rel *const mrel0 = umalloc(totrelsz);
+       Elf_Rel *      mrelp = mrel0;
+
+       /* 2*sizeof(address) <= sizeof(Elf_Rel) */
+       uint_t *const mloc0 = umalloc(totrelsz>>1);
+       uint_t *      mlocp = mloc0;
+
+       unsigned rel_entsize = 0;
+       unsigned symsec_sh_link = 0;
+
+       for (relhdr = shdr0, k = nhdr; k; --k, ++relhdr) {
+               char const *const txtname = has_rel_mcount(relhdr, shdr0,
+                       shstrtab, fname);
+               if (txtname) {
+                       uint_t recval = 0;
+                       unsigned const recsym = find_secsym_ndx(
+                               w(relhdr->sh_info), txtname, &recval,
+                               &shdr0[symsec_sh_link = w(relhdr->sh_link)],
+                               ehdr);
+
+                       rel_entsize = _w(relhdr->sh_entsize);
+                       mlocp = sift_rel_mcount(mlocp,
+                               (void *)mlocp - (void *)mloc0, &mrelp,
+                               relhdr, ehdr, recsym, recval, reltype);
+               }
+       }
+       if (mloc0 != mlocp) {
+               append_func(ehdr, shstr, mloc0, mlocp, mrel0, mrelp,
+                           rel_entsize, symsec_sh_link);
+       }
+       free(mrel0);
+       free(mloc0);
+}
index 0a0a99f3b08331f78f7499c5e964929af20e96c9..4d995aeaebc0ad0a3232faebead627da4ebb40b9 100644 (file)
@@ -3,3 +3,4 @@
 #
 af_names.h
 capability_names.h
+rlim_names.h
index 7320331b44aba5bd52eac6b2a97302ad21a4ca5f..544ff5837cb640623dadd738fe8879d823574eda 100644 (file)
@@ -29,7 +29,7 @@
  * aa_simple_write_to_buffer - common routine for getting policy from user
  * @op: operation doing the user buffer copy
  * @userbuf: user buffer to copy data from  (NOT NULL)
- * @alloc_size: size of user buffer
+ * @alloc_size: size of user buffer (REQUIRES: @alloc_size >= @copy_size)
  * @copy_size: size of data to copy from user buffer
  * @pos: position write is at in the file (NOT NULL)
  *
@@ -42,6 +42,8 @@ static char *aa_simple_write_to_buffer(int op, const char __user *userbuf,
 {
        char *data;
 
+       BUG_ON(copy_size > alloc_size);
+
        if (*pos != 0)
                /* only writes from pos 0, that is complete writes */
                return ERR_PTR(-ESPIPE);
index 3c88be94649408b412c0193f0bf4186273c6a972..02baec732bb512c77fed7d5ede247b72d788a147 100644 (file)
@@ -33,8 +33,8 @@ struct aa_rlimit {
 };
 
 int aa_map_resource(int resource);
-int aa_task_setrlimit(struct aa_profile *profile, unsigned int resource,
-                     struct rlimit *new_rlim);
+int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *,
+                     unsigned int resource, struct rlimit *new_rlim);
 
 void __aa_transition_rlimits(struct aa_profile *old, struct aa_profile *new);
 
index 6e85cdb4303f69fc1911c94aefd5f1c5b695239c..506d2baf614797624fc4b9450c0d12c9f56e8ae4 100644 (file)
@@ -40,6 +40,7 @@ char *aa_split_fqname(char *fqname, char **ns_name)
        *ns_name = NULL;
        if (name[0] == ':') {
                char *split = strchr(&name[1], ':');
+               *ns_name = skip_spaces(&name[1]);
                if (split) {
                        /* overwrite ':' with \0 */
                        *split = 0;
@@ -47,7 +48,6 @@ char *aa_split_fqname(char *fqname, char **ns_name)
                } else
                        /* a ns name without a following profile is allowed */
                        name = NULL;
-               *ns_name = &name[1];
        }
        if (name && *name == 0)
                name = NULL;
index f73e2c2042185fff2d079dbdb3f4b8b828371e72..cf1de4462ccd3fb297f48bf351dd3494804f22c1 100644 (file)
@@ -614,7 +614,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
        int error = 0;
 
        if (!unconfined(profile))
-               error = aa_task_setrlimit(profile, resource, new_rlim);
+               error = aa_task_setrlimit(profile, task, resource, new_rlim);
 
        return error;
 }
index 19358dc14605bae1422ae00226291751695ba44c..82396050f18646ac0519352321637e930c05e367 100644 (file)
@@ -59,8 +59,7 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
 {
        struct path root, tmp;
        char *res;
-       int deleted, connected;
-       int error = 0;
+       int connected, error = 0;
 
        /* Get the root we want to resolve too, released below */
        if (flags & PATH_CHROOT_REL) {
@@ -74,19 +73,8 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
        }
 
        spin_lock(&dcache_lock);
-       /* There is a race window between path lookup here and the
-        * need to strip the " (deleted) string that __d_path applies
-        * Detect the race and relookup the path
-        *
-        * The stripping of (deleted) is a hack that could be removed
-        * with an updated __d_path
-        */
-       do {
-               tmp = root;
-               deleted = d_unlinked(path->dentry);
-               res = __d_path(path, &tmp, buf, buflen);
-
-       } while (deleted != d_unlinked(path->dentry));
+       tmp = root;
+       res = __d_path(path, &tmp, buf, buflen);
        spin_unlock(&dcache_lock);
 
        *name = res;
@@ -98,21 +86,17 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
                *name = buf;
                goto out;
        }
-       if (deleted) {
-               /* On some filesystems, newly allocated dentries appear to the
-                * security_path hooks as a deleted dentry except without an
-                * inode allocated.
-                *
-                * Remove the appended deleted text and return as string for
-                * normal mediation, or auditing.  The (deleted) string is
-                * guaranteed to be added in this case, so just strip it.
-                */
-               buf[buflen - 11] = 0;   /* - (len(" (deleted)") +\0) */
 
-               if (path->dentry->d_inode && !(flags & PATH_MEDIATE_DELETED)) {
+       /* Handle two cases:
+        * 1. A deleted dentry && profile is not allowing mediation of deleted
+        * 2. On some filesystems, newly allocated dentries appear to the
+        *    security_path hooks as a deleted dentry except without an inode
+        *    allocated.
+        */
+       if (d_unlinked(path->dentry) && path->dentry->d_inode &&
+           !(flags & PATH_MEDIATE_DELETED)) {
                        error = -ENOENT;
                        goto out;
-               }
        }
 
        /* Determine if the path is connected to the expected root */
index 3cdc1ad0787ec9c4769455f8aeb004a417d246bf..52cc865f1464574e696fd28eca6a6e0eed326d68 100644 (file)
@@ -1151,12 +1151,14 @@ ssize_t aa_remove_profiles(char *fqname, size_t size)
                /* released below */
                ns = aa_get_namespace(root);
 
-       write_lock(&ns->lock);
        if (!name) {
                /* remove namespace - can only happen if fqname[0] == ':' */
+               write_lock(&ns->parent->lock);
                __remove_namespace(ns);
+               write_unlock(&ns->parent->lock);
        } else {
                /* remove profile */
+               write_lock(&ns->lock);
                profile = aa_get_profile(__lookup_profile(&ns->base, name));
                if (!profile) {
                        error = -ENOENT;
@@ -1165,8 +1167,8 @@ ssize_t aa_remove_profiles(char *fqname, size_t size)
                }
                name = profile->base.hname;
                __remove_profile(profile);
+               write_unlock(&ns->lock);
        }
-       write_unlock(&ns->lock);
 
        /* don't fail removal if audit fails */
        (void) audit_policy(OP_PROF_RM, GFP_KERNEL, name, info, error);
index 4a368f1fd36ddf02af7204d30ee1b136f1d57bf7..a4136c10b1c6292edbdadae7285803583fb74241 100644 (file)
@@ -72,6 +72,7 @@ int aa_map_resource(int resource)
 /**
  * aa_task_setrlimit - test permission to set an rlimit
  * @profile - profile confining the task  (NOT NULL)
+ * @task - task the resource is being set on
  * @resource - the resource being set
  * @new_rlim - the new resource limit  (NOT NULL)
  *
@@ -79,18 +80,21 @@ int aa_map_resource(int resource)
  *
  * Returns: 0 or error code if setting resource failed
  */
-int aa_task_setrlimit(struct aa_profile *profile, unsigned int resource,
-                     struct rlimit *new_rlim)
+int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *task,
+                     unsigned int resource, struct rlimit *new_rlim)
 {
        int error = 0;
 
-       if (profile->rlimits.mask & (1 << resource) &&
-           new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max)
-
-               error = audit_resource(profile, resource, new_rlim->rlim_max,
-                       -EACCES);
+       /* TODO: extend resource control to handle other (non current)
+        * processes.  AppArmor rules currently have the implicit assumption
+        * that the task is setting the resource of the current process
+        */
+       if ((task != current->group_leader) ||
+           (profile->rlimits.mask & (1 << resource) &&
+            new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max))
+               error = -EACCES;
 
-       return error;
+       return audit_resource(profile, resource, new_rlim->rlim_max, error);
 }
 
 /**
index 95a6599a37bb3ae0737779d16d9a0d811d5bfc82..30ae00fbecd591591acb55c1431d62a1bbbac427 100644 (file)
@@ -677,7 +677,18 @@ static void cap_inet_conn_established(struct sock *sk, struct sk_buff *skb)
 {
 }
 
+static int cap_secmark_relabel_packet(u32 secid)
+{
+       return 0;
+}
 
+static void cap_secmark_refcount_inc(void)
+{
+}
+
+static void cap_secmark_refcount_dec(void)
+{
+}
 
 static void cap_req_classify_flow(const struct request_sock *req,
                                  struct flowi *fl)
@@ -777,7 +788,8 @@ static int cap_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
 
 static int cap_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
 {
-       return -EOPNOTSUPP;
+       *secid = 0;
+       return 0;
 }
 
 static void cap_release_secctx(char *secdata, u32 seclen)
@@ -1018,6 +1030,9 @@ void __init security_fixup_ops(struct security_operations *ops)
        set_to_cap_if_null(ops, inet_conn_request);
        set_to_cap_if_null(ops, inet_csk_clone);
        set_to_cap_if_null(ops, inet_conn_established);
+       set_to_cap_if_null(ops, secmark_relabel_packet);
+       set_to_cap_if_null(ops, secmark_refcount_inc);
+       set_to_cap_if_null(ops, secmark_refcount_dec);
        set_to_cap_if_null(ops, req_classify_flow);
        set_to_cap_if_null(ops, tun_dev_create);
        set_to_cap_if_null(ops, tun_dev_post_create);
index 9d172e6e330c9fd7906a8a2e5754713f80dfb433..5e632b4857e443d8031eaa17c0e2bd7e877b3d14 100644 (file)
@@ -719,14 +719,11 @@ static int cap_safe_nice(struct task_struct *p)
 /**
  * cap_task_setscheduler - Detemine if scheduler policy change is permitted
  * @p: The task to affect
- * @policy: The policy to effect
- * @lp: The parameters to the scheduling policy
  *
  * Detemine if the requested scheduler policy change is permitted for the
  * specified task, returning 0 if permission is granted, -ve if denied.
  */
-int cap_task_setscheduler(struct task_struct *p, int policy,
-                          struct sched_param *lp)
+int cap_task_setscheduler(struct task_struct *p)
 {
        return cap_safe_nice(p);
 }
index 16d100d3fc38de931de8e1a2679bc025359f3d31..3fbcd1dda0ef6e06da4a5b4b9c23a240a6378a35 100644 (file)
@@ -35,6 +35,7 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
 #define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS)
 
 /* set during initialization */
+extern int iint_initialized;
 extern int ima_initialized;
 extern int ima_used_chip;
 extern char *ima_hash;
index 7625b85c2274f457fc0d260a21e2d9039758d12c..afba4aef812f699134f7c7bc66c32251d2f12c69 100644 (file)
 
 RADIX_TREE(ima_iint_store, GFP_ATOMIC);
 DEFINE_SPINLOCK(ima_iint_lock);
-
 static struct kmem_cache *iint_cache __read_mostly;
 
+int iint_initialized = 0;
+
 /* ima_iint_find_get - return the iint associated with an inode
  *
  * ima_iint_find_get gets a reference to the iint. Caller must
@@ -141,6 +142,7 @@ static int __init ima_iintcache_init(void)
        iint_cache =
            kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0,
                              SLAB_PANIC, init_once);
+       iint_initialized = 1;
        return 0;
 }
 security_initcall(ima_iintcache_init);
index f93641382e9f9483576578a3ba5f41286f4cc3ab..e662b89d407944103dc121b9ccb37f7e68ac62e1 100644 (file)
@@ -148,12 +148,14 @@ void ima_counts_get(struct file *file)
        struct ima_iint_cache *iint;
        int rc;
 
-       if (!ima_initialized || !S_ISREG(inode->i_mode))
+       if (!iint_initialized || !S_ISREG(inode->i_mode))
                return;
        iint = ima_iint_find_get(inode);
        if (!iint)
                return;
        mutex_lock(&iint->mutex);
+       if (!ima_initialized)
+               goto out;
        rc = ima_must_measure(iint, inode, MAY_READ, FILE_CHECK);
        if (rc < 0)
                goto out;
@@ -213,7 +215,7 @@ void ima_file_free(struct file *file)
        struct inode *inode = file->f_dentry->d_inode;
        struct ima_iint_cache *iint;
 
-       if (!ima_initialized || !S_ISREG(inode->i_mode))
+       if (!iint_initialized || !S_ISREG(inode->i_mode))
                return;
        iint = ima_iint_find_get(inode);
        if (!iint)
@@ -230,7 +232,7 @@ static int process_measurement(struct file *file, const unsigned char *filename,
 {
        struct inode *inode = file->f_dentry->d_inode;
        struct ima_iint_cache *iint;
-       int rc;
+       int rc = 0;
 
        if (!ima_initialized || !S_ISREG(inode->i_mode))
                return 0;
index b2b0998d6abda7759433d7032d255eb0a317c126..60924f6a52db2bbff40ddc953a50bb9d708febb5 100644 (file)
@@ -1272,6 +1272,7 @@ long keyctl_session_to_parent(void)
        keyring_r = NULL;
 
        me = current;
+       rcu_read_lock();
        write_lock_irq(&tasklist_lock);
 
        parent = me->real_parent;
@@ -1304,7 +1305,8 @@ long keyctl_session_to_parent(void)
                goto not_permitted;
 
        /* the keyrings must have the same UID */
-       if (pcred->tgcred->session_keyring->uid != mycred->euid ||
+       if ((pcred->tgcred->session_keyring &&
+            pcred->tgcred->session_keyring->uid != mycred->euid) ||
            mycred->tgcred->session_keyring->uid != mycred->euid)
                goto not_permitted;
 
@@ -1319,6 +1321,7 @@ long keyctl_session_to_parent(void)
        set_ti_thread_flag(task_thread_info(parent), TIF_NOTIFY_RESUME);
 
        write_unlock_irq(&tasklist_lock);
+       rcu_read_unlock();
        if (oldcred)
                put_cred(oldcred);
        return 0;
@@ -1327,6 +1330,7 @@ already_same:
        ret = 0;
 not_permitted:
        write_unlock_irq(&tasklist_lock);
+       rcu_read_unlock();
        put_cred(cred);
        return ret;
 
index c53949f17d9e0dddc0601032576ef2922fb88f86..b50f472061a43c6ec7fb53781c084fb2cb487dcb 100644 (file)
@@ -89,20 +89,12 @@ __setup("security=", choose_lsm);
  * Return true if:
  *     -The passed LSM is the one chosen by user at boot time,
  *     -or the passed LSM is configured as the default and the user did not
- *      choose an alternate LSM at boot time,
- *     -or there is no default LSM set and the user didn't specify a
- *      specific LSM and we're the first to ask for registration permission,
- *     -or the passed LSM is currently loaded.
+ *      choose an alternate LSM at boot time.
  * Otherwise, return false.
  */
 int __init security_module_enable(struct security_operations *ops)
 {
-       if (!*chosen_lsm)
-               strncpy(chosen_lsm, ops->name, SECURITY_NAME_MAX);
-       else if (strncmp(ops->name, chosen_lsm, SECURITY_NAME_MAX))
-               return 0;
-
-       return 1;
+       return !strcmp(ops->name, chosen_lsm);
 }
 
 /**
@@ -786,10 +778,9 @@ int security_task_setrlimit(struct task_struct *p, unsigned int resource,
        return security_ops->task_setrlimit(p, resource, new_rlim);
 }
 
-int security_task_setscheduler(struct task_struct *p,
-                               int policy, struct sched_param *lp)
+int security_task_setscheduler(struct task_struct *p)
 {
-       return security_ops->task_setscheduler(p, policy, lp);
+       return security_ops->task_setscheduler(p);
 }
 
 int security_task_getscheduler(struct task_struct *p)
@@ -1145,6 +1136,24 @@ void security_inet_conn_established(struct sock *sk,
        security_ops->inet_conn_established(sk, skb);
 }
 
+int security_secmark_relabel_packet(u32 secid)
+{
+       return security_ops->secmark_relabel_packet(secid);
+}
+EXPORT_SYMBOL(security_secmark_relabel_packet);
+
+void security_secmark_refcount_inc(void)
+{
+       security_ops->secmark_refcount_inc();
+}
+EXPORT_SYMBOL(security_secmark_refcount_inc);
+
+void security_secmark_refcount_dec(void)
+{
+       security_ops->secmark_refcount_dec();
+}
+EXPORT_SYMBOL(security_secmark_refcount_dec);
+
 int security_tun_dev_create(void)
 {
        return security_ops->tun_dev_create();
index 58d80f3bd6f681f6d366f5b67becd7ff433ce2c7..ad5cd76ec231cd14f02b2fb15f07a3d8a069972f 100644 (file)
@@ -2,25 +2,20 @@
 # Makefile for building the SELinux module as part of the kernel tree.
 #
 
-obj-$(CONFIG_SECURITY_SELINUX) := selinux.o ss/
-
-selinux-y := avc.o \
-            hooks.o \
-            selinuxfs.o \
-            netlink.o \
-            nlmsgtab.o \
-            netif.o \
-            netnode.o \
-            netport.o \
-            exports.o
+obj-$(CONFIG_SECURITY_SELINUX) := selinux.o
+
+selinux-y := avc.o hooks.o selinuxfs.o netlink.o nlmsgtab.o netif.o \
+            netnode.o netport.o exports.o \
+            ss/ebitmap.o ss/hashtab.o ss/symtab.o ss/sidtab.o ss/avtab.o \
+            ss/policydb.o ss/services.o ss/conditional.o ss/mls.o ss/status.o
 
 selinux-$(CONFIG_SECURITY_NETWORK_XFRM) += xfrm.o
 
 selinux-$(CONFIG_NETLABEL) += netlabel.o
 
-EXTRA_CFLAGS += -Isecurity/selinux -Isecurity/selinux/include
+ccflags-y := -Isecurity/selinux -Isecurity/selinux/include
 
-$(obj)/avc.o: $(obj)/flask.h
+$(addprefix $(obj)/,$(selinux-y)): $(obj)/flask.h
 
 quiet_cmd_flask = GEN     $(obj)/flask.h $(obj)/av_permissions.h
       cmd_flask = scripts/selinux/genheaders/genheaders $(obj)/flask.h $(obj)/av_permissions.h
index c0a454aee1e03cb0e3825a2c5f965c941636c4d9..90664385dead0df01f6eec2b7479c83f4f021160 100644 (file)
  * it under the terms of the GNU General Public License version 2,
  * as published by the Free Software Foundation.
  */
-#include <linux/types.h>
-#include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/selinux.h>
-#include <linux/fs.h>
-#include <linux/ipc.h>
-#include <asm/atomic.h>
 
 #include "security.h"
-#include "objsec.h"
-
-/* SECMARK reference count */
-extern atomic_t selinux_secmark_refcount;
-
-int selinux_string_to_sid(char *str, u32 *sid)
-{
-       if (selinux_enabled)
-               return security_context_to_sid(str, strlen(str), sid);
-       else {
-               *sid = 0;
-               return 0;
-       }
-}
-EXPORT_SYMBOL_GPL(selinux_string_to_sid);
-
-int selinux_secmark_relabel_packet_permission(u32 sid)
-{
-       if (selinux_enabled) {
-               const struct task_security_struct *__tsec;
-               u32 tsid;
-
-               __tsec = current_security();
-               tsid = __tsec->sid;
-
-               return avc_has_perm(tsid, sid, SECCLASS_PACKET,
-                                   PACKET__RELABELTO, NULL);
-       }
-       return 0;
-}
-EXPORT_SYMBOL_GPL(selinux_secmark_relabel_packet_permission);
-
-void selinux_secmark_refcount_inc(void)
-{
-       atomic_inc(&selinux_secmark_refcount);
-}
-EXPORT_SYMBOL_GPL(selinux_secmark_refcount_inc);
-
-void selinux_secmark_refcount_dec(void)
-{
-       atomic_dec(&selinux_secmark_refcount);
-}
-EXPORT_SYMBOL_GPL(selinux_secmark_refcount_dec);
 
 bool selinux_is_enabled(void)
 {
index 4796ddd4e721ae454a02563d713aa235870ece02..d9154cf90ae19cd4eb5f40d65882abb60781da3d 100644 (file)
@@ -3354,11 +3354,11 @@ static int selinux_task_setrlimit(struct task_struct *p, unsigned int resource,
        return 0;
 }
 
-static int selinux_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp)
+static int selinux_task_setscheduler(struct task_struct *p)
 {
        int rc;
 
-       rc = cap_task_setscheduler(p, policy, lp);
+       rc = cap_task_setscheduler(p);
        if (rc)
                return rc;
 
@@ -4279,6 +4279,27 @@ static void selinux_inet_conn_established(struct sock *sk, struct sk_buff *skb)
        selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid);
 }
 
+static int selinux_secmark_relabel_packet(u32 sid)
+{
+       const struct task_security_struct *__tsec;
+       u32 tsid;
+
+       __tsec = current_security();
+       tsid = __tsec->sid;
+
+       return avc_has_perm(tsid, sid, SECCLASS_PACKET, PACKET__RELABELTO, NULL);
+}
+
+static void selinux_secmark_refcount_inc(void)
+{
+       atomic_inc(&selinux_secmark_refcount);
+}
+
+static void selinux_secmark_refcount_dec(void)
+{
+       atomic_dec(&selinux_secmark_refcount);
+}
+
 static void selinux_req_classify_flow(const struct request_sock *req,
                                      struct flowi *fl)
 {
@@ -5533,6 +5554,9 @@ static struct security_operations selinux_ops = {
        .inet_conn_request =            selinux_inet_conn_request,
        .inet_csk_clone =               selinux_inet_csk_clone,
        .inet_conn_established =        selinux_inet_conn_established,
+       .secmark_relabel_packet =       selinux_secmark_relabel_packet,
+       .secmark_refcount_inc =         selinux_secmark_refcount_inc,
+       .secmark_refcount_dec =         selinux_secmark_refcount_dec,
        .req_classify_flow =            selinux_req_classify_flow,
        .tun_dev_create =               selinux_tun_dev_create,
        .tun_dev_post_create =          selinux_tun_dev_post_create,
index b4c9eb4bd6f9127a506e2a4483c592362c8fcafa..8858d2b2d4b6ad1dd1b005b20a06afa4a3505d03 100644 (file)
@@ -17,7 +17,7 @@ struct security_class_mapping secclass_map[] = {
          { "compute_av", "compute_create", "compute_member",
            "check_context", "load_policy", "compute_relabel",
            "compute_user", "setenforce", "setbool", "setsecparam",
-           "setcheckreqprot", NULL } },
+           "setcheckreqprot", "read_policy", NULL } },
        { "process",
          { "fork", "transition", "sigchld", "sigkill",
            "sigstop", "signull", "signal", "ptrace", "getsched", "setsched",
index 1f7c2491d3dccbc54769a6ccaf509d50255cfe3f..671273eb1115c4e7f05983af071069aea7535650 100644 (file)
@@ -9,6 +9,7 @@
 #define _SELINUX_SECURITY_H_
 
 #include <linux/magic.h>
+#include <linux/types.h>
 #include "flask.h"
 
 #define SECSID_NULL                    0x00000000 /* unspecified SID */
@@ -82,6 +83,8 @@ extern int selinux_policycap_openperm;
 int security_mls_enabled(void);
 
 int security_load_policy(void *data, size_t len);
+int security_read_policy(void **data, ssize_t *len);
+size_t security_policydb_len(void);
 
 int security_policycap_supported(unsigned int req_cap);
 
@@ -191,5 +194,25 @@ static inline int security_netlbl_sid_to_secattr(u32 sid,
 
 const char *security_get_initial_sid_context(u32 sid);
 
+/*
+ * status notifier using mmap interface
+ */
+extern struct page *selinux_kernel_status_page(void);
+
+#define SELINUX_KERNEL_STATUS_VERSION  1
+struct selinux_kernel_status {
+       u32     version;        /* version number of thie structure */
+       u32     sequence;       /* sequence number of seqlock logic */
+       u32     enforcing;      /* current setting of enforcing mode */
+       u32     policyload;     /* times of policy reloaded */
+       u32     deny_unknown;   /* current setting of deny_unknown */
+       /*
+        * The version > 0 supports above members.
+        */
+} __attribute__((packed));
+
+extern void selinux_status_update_setenforce(int enforcing);
+extern void selinux_status_update_policyload(int seqno);
+
 #endif /* _SELINUX_SECURITY_H_ */
 
index 79a1bb635662fbc7f65a306e10b5b67e534fcf1c..87e0556bae70ff977ea290b3cdfcc2c308d8edf5 100644 (file)
@@ -68,6 +68,8 @@ static int *bool_pending_values;
 static struct dentry *class_dir;
 static unsigned long last_class_ino;
 
+static char policy_opened;
+
 /* global data for policy capabilities */
 static struct dentry *policycap_dir;
 
@@ -110,6 +112,8 @@ enum sel_inos {
        SEL_COMPAT_NET, /* whether to use old compat network packet controls */
        SEL_REJECT_UNKNOWN, /* export unknown reject handling to userspace */
        SEL_DENY_UNKNOWN, /* export unknown deny handling to userspace */
+       SEL_STATUS,     /* export current status using mmap() */
+       SEL_POLICY,     /* allow userspace to read the in kernel policy */
        SEL_INO_NEXT,   /* The next inode number to use */
 };
 
@@ -171,6 +175,7 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
                if (selinux_enforcing)
                        avc_ss_reset(0);
                selnl_notify_setenforce(selinux_enforcing);
+               selinux_status_update_setenforce(selinux_enforcing);
        }
        length = count;
 out:
@@ -205,6 +210,59 @@ static const struct file_operations sel_handle_unknown_ops = {
        .llseek         = generic_file_llseek,
 };
 
+static int sel_open_handle_status(struct inode *inode, struct file *filp)
+{
+       struct page    *status = selinux_kernel_status_page();
+
+       if (!status)
+               return -ENOMEM;
+
+       filp->private_data = status;
+
+       return 0;
+}
+
+static ssize_t sel_read_handle_status(struct file *filp, char __user *buf,
+                                     size_t count, loff_t *ppos)
+{
+       struct page    *status = filp->private_data;
+
+       BUG_ON(!status);
+
+       return simple_read_from_buffer(buf, count, ppos,
+                                      page_address(status),
+                                      sizeof(struct selinux_kernel_status));
+}
+
+static int sel_mmap_handle_status(struct file *filp,
+                                 struct vm_area_struct *vma)
+{
+       struct page    *status = filp->private_data;
+       unsigned long   size = vma->vm_end - vma->vm_start;
+
+       BUG_ON(!status);
+
+       /* only allows one page from the head */
+       if (vma->vm_pgoff > 0 || size != PAGE_SIZE)
+               return -EIO;
+       /* disallow writable mapping */
+       if (vma->vm_flags & VM_WRITE)
+               return -EPERM;
+       /* disallow mprotect() turns it into writable */
+       vma->vm_flags &= ~VM_MAYWRITE;
+
+       return remap_pfn_range(vma, vma->vm_start,
+                              page_to_pfn(status),
+                              size, vma->vm_page_prot);
+}
+
+static const struct file_operations sel_handle_status_ops = {
+       .open           = sel_open_handle_status,
+       .read           = sel_read_handle_status,
+       .mmap           = sel_mmap_handle_status,
+       .llseek         = generic_file_llseek,
+};
+
 #ifdef CONFIG_SECURITY_SELINUX_DISABLE
 static ssize_t sel_write_disable(struct file *file, const char __user *buf,
                                 size_t count, loff_t *ppos)
@@ -296,6 +354,141 @@ static const struct file_operations sel_mls_ops = {
        .llseek         = generic_file_llseek,
 };
 
+struct policy_load_memory {
+       size_t len;
+       void *data;
+};
+
+static int sel_open_policy(struct inode *inode, struct file *filp)
+{
+       struct policy_load_memory *plm = NULL;
+       int rc;
+
+       BUG_ON(filp->private_data);
+
+       mutex_lock(&sel_mutex);
+
+       rc = task_has_security(current, SECURITY__READ_POLICY);
+       if (rc)
+               goto err;
+
+       rc = -EBUSY;
+       if (policy_opened)
+               goto err;
+
+       rc = -ENOMEM;
+       plm = kzalloc(sizeof(*plm), GFP_KERNEL);
+       if (!plm)
+               goto err;
+
+       if (i_size_read(inode) != security_policydb_len()) {
+               mutex_lock(&inode->i_mutex);
+               i_size_write(inode, security_policydb_len());
+               mutex_unlock(&inode->i_mutex);
+       }
+
+       rc = security_read_policy(&plm->data, &plm->len);
+       if (rc)
+               goto err;
+
+       policy_opened = 1;
+
+       filp->private_data = plm;
+
+       mutex_unlock(&sel_mutex);
+
+       return 0;
+err:
+       mutex_unlock(&sel_mutex);
+
+       if (plm)
+               vfree(plm->data);
+       kfree(plm);
+       return rc;
+}
+
+static int sel_release_policy(struct inode *inode, struct file *filp)
+{
+       struct policy_load_memory *plm = filp->private_data;
+
+       BUG_ON(!plm);
+
+       policy_opened = 0;
+
+       vfree(plm->data);
+       kfree(plm);
+
+       return 0;
+}
+
+static ssize_t sel_read_policy(struct file *filp, char __user *buf,
+                              size_t count, loff_t *ppos)
+{
+       struct policy_load_memory *plm = filp->private_data;
+       int ret;
+
+       mutex_lock(&sel_mutex);
+
+       ret = task_has_security(current, SECURITY__READ_POLICY);
+       if (ret)
+               goto out;
+
+       ret = simple_read_from_buffer(buf, count, ppos, plm->data, plm->len);
+out:
+       mutex_unlock(&sel_mutex);
+       return ret;
+}
+
+static int sel_mmap_policy_fault(struct vm_area_struct *vma,
+                                struct vm_fault *vmf)
+{
+       struct policy_load_memory *plm = vma->vm_file->private_data;
+       unsigned long offset;
+       struct page *page;
+
+       if (vmf->flags & (FAULT_FLAG_MKWRITE | FAULT_FLAG_WRITE))
+               return VM_FAULT_SIGBUS;
+
+       offset = vmf->pgoff << PAGE_SHIFT;
+       if (offset >= roundup(plm->len, PAGE_SIZE))
+               return VM_FAULT_SIGBUS;
+
+       page = vmalloc_to_page(plm->data + offset);
+       get_page(page);
+
+       vmf->page = page;
+
+       return 0;
+}
+
+static struct vm_operations_struct sel_mmap_policy_ops = {
+       .fault = sel_mmap_policy_fault,
+       .page_mkwrite = sel_mmap_policy_fault,
+};
+
+int sel_mmap_policy(struct file *filp, struct vm_area_struct *vma)
+{
+       if (vma->vm_flags & VM_SHARED) {
+               /* do not allow mprotect to make mapping writable */
+               vma->vm_flags &= ~VM_MAYWRITE;
+
+               if (vma->vm_flags & VM_WRITE)
+                       return -EACCES;
+       }
+
+       vma->vm_flags |= VM_RESERVED;
+       vma->vm_ops = &sel_mmap_policy_ops;
+
+       return 0;
+}
+
+static const struct file_operations sel_policy_ops = {
+       .open           = sel_open_policy,
+       .read           = sel_read_policy,
+       .mmap           = sel_mmap_policy,
+       .release        = sel_release_policy,
+};
+
 static ssize_t sel_write_load(struct file *file, const char __user *buf,
                              size_t count, loff_t *ppos)
 
@@ -1612,6 +1805,8 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent)
                [SEL_CHECKREQPROT] = {"checkreqprot", &sel_checkreqprot_ops, S_IRUGO|S_IWUSR},
                [SEL_REJECT_UNKNOWN] = {"reject_unknown", &sel_handle_unknown_ops, S_IRUGO},
                [SEL_DENY_UNKNOWN] = {"deny_unknown", &sel_handle_unknown_ops, S_IRUGO},
+               [SEL_STATUS] = {"status", &sel_handle_status_ops, S_IRUGO},
+               [SEL_POLICY] = {"policy", &sel_policy_ops, S_IRUSR},
                /* last one */ {""}
        };
        ret = simple_fill_super(sb, SELINUX_MAGIC, selinux_files);
diff --git a/security/selinux/ss/Makefile b/security/selinux/ss/Makefile
deleted file mode 100644 (file)
index 15d4e62..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# Makefile for building the SELinux security server as part of the kernel tree.
-#
-
-EXTRA_CFLAGS += -Isecurity/selinux -Isecurity/selinux/include
-obj-y := ss.o
-
-ss-y := ebitmap.o hashtab.o symtab.o sidtab.o avtab.o policydb.o services.o conditional.o mls.o
-
index 929480c6c4306e874eff82db107045b944cdd33b..a3dd9faa19c01eda269b13f7cfcd7ab6da6aa098 100644 (file)
@@ -266,8 +266,8 @@ int avtab_alloc(struct avtab *h, u32 nrules)
        if (shift > 2)
                shift = shift - 2;
        nslot = 1 << shift;
-       if (nslot > MAX_AVTAB_SIZE)
-               nslot = MAX_AVTAB_SIZE;
+       if (nslot > MAX_AVTAB_HASH_BUCKETS)
+               nslot = MAX_AVTAB_HASH_BUCKETS;
        mask = nslot - 1;
 
        h->htable = kcalloc(nslot, sizeof(*(h->htable)), GFP_KERNEL);
@@ -501,6 +501,48 @@ bad:
        goto out;
 }
 
+int avtab_write_item(struct policydb *p, struct avtab_node *cur, void *fp)
+{
+       __le16 buf16[4];
+       __le32 buf32[1];
+       int rc;
+
+       buf16[0] = cpu_to_le16(cur->key.source_type);
+       buf16[1] = cpu_to_le16(cur->key.target_type);
+       buf16[2] = cpu_to_le16(cur->key.target_class);
+       buf16[3] = cpu_to_le16(cur->key.specified);
+       rc = put_entry(buf16, sizeof(u16), 4, fp);
+       if (rc)
+               return rc;
+       buf32[0] = cpu_to_le32(cur->datum.data);
+       rc = put_entry(buf32, sizeof(u32), 1, fp);
+       if (rc)
+               return rc;
+       return 0;
+}
+
+int avtab_write(struct policydb *p, struct avtab *a, void *fp)
+{
+       unsigned int i;
+       int rc = 0;
+       struct avtab_node *cur;
+       __le32 buf[1];
+
+       buf[0] = cpu_to_le32(a->nel);
+       rc = put_entry(buf, sizeof(u32), 1, fp);
+       if (rc)
+               return rc;
+
+       for (i = 0; i < a->nslot; i++) {
+               for (cur = a->htable[i]; cur; cur = cur->next) {
+                       rc = avtab_write_item(p, cur, fp);
+                       if (rc)
+                               return rc;
+               }
+       }
+
+       return rc;
+}
 void avtab_cache_init(void)
 {
        avtab_node_cachep = kmem_cache_create("avtab_node",
index cd4f734e27499cf2f9e203f0edb2557cf02bc24c..dff0c75345c1642fd2b3181a6677035947834951 100644 (file)
@@ -71,6 +71,8 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
                    void *p);
 
 int avtab_read(struct avtab *a, void *fp, struct policydb *pol);
+int avtab_write_item(struct policydb *p, struct avtab_node *cur, void *fp);
+int avtab_write(struct policydb *p, struct avtab *a, void *fp);
 
 struct avtab_node *avtab_insert_nonunique(struct avtab *h, struct avtab_key *key,
                                          struct avtab_datum *datum);
@@ -85,7 +87,6 @@ void avtab_cache_destroy(void);
 #define MAX_AVTAB_HASH_BITS 11
 #define MAX_AVTAB_HASH_BUCKETS (1 << MAX_AVTAB_HASH_BITS)
 #define MAX_AVTAB_HASH_MASK (MAX_AVTAB_HASH_BUCKETS-1)
-#define MAX_AVTAB_SIZE MAX_AVTAB_HASH_BUCKETS
 
 #endif /* _SS_AVTAB_H_ */
 
index c91e150c3087d78127eb8ca6cc6faccf04e9cd32..655fe1c6cc69dccd862b3142a37e41b6fb4010a7 100644 (file)
@@ -490,6 +490,129 @@ err:
        return rc;
 }
 
+int cond_write_bool(void *vkey, void *datum, void *ptr)
+{
+       char *key = vkey;
+       struct cond_bool_datum *booldatum = datum;
+       struct policy_data *pd = ptr;
+       void *fp = pd->fp;
+       __le32 buf[3];
+       u32 len;
+       int rc;
+
+       len = strlen(key);
+       buf[0] = cpu_to_le32(booldatum->value);
+       buf[1] = cpu_to_le32(booldatum->state);
+       buf[2] = cpu_to_le32(len);
+       rc = put_entry(buf, sizeof(u32), 3, fp);
+       if (rc)
+               return rc;
+       rc = put_entry(key, 1, len, fp);
+       if (rc)
+               return rc;
+       return 0;
+}
+
+/*
+ * cond_write_cond_av_list doesn't write out the av_list nodes.
+ * Instead it writes out the key/value pairs from the avtab. This
+ * is necessary because there is no way to uniquely identifying rules
+ * in the avtab so it is not possible to associate individual rules
+ * in the avtab with a conditional without saving them as part of
+ * the conditional. This means that the avtab with the conditional
+ * rules will not be saved but will be rebuilt on policy load.
+ */
+static int cond_write_av_list(struct policydb *p,
+                             struct cond_av_list *list, struct policy_file *fp)
+{
+       __le32 buf[1];
+       struct cond_av_list *cur_list;
+       u32 len;
+       int rc;
+
+       len = 0;
+       for (cur_list = list; cur_list != NULL; cur_list = cur_list->next)
+               len++;
+
+       buf[0] = cpu_to_le32(len);
+       rc = put_entry(buf, sizeof(u32), 1, fp);
+       if (rc)
+               return rc;
+
+       if (len == 0)
+               return 0;
+
+       for (cur_list = list; cur_list != NULL; cur_list = cur_list->next) {
+               rc = avtab_write_item(p, cur_list->node, fp);
+               if (rc)
+                       return rc;
+       }
+
+       return 0;
+}
+
+int cond_write_node(struct policydb *p, struct cond_node *node,
+                   struct policy_file *fp)
+{
+       struct cond_expr *cur_expr;
+       __le32 buf[2];
+       int rc;
+       u32 len = 0;
+
+       buf[0] = cpu_to_le32(node->cur_state);
+       rc = put_entry(buf, sizeof(u32), 1, fp);
+       if (rc)
+               return rc;
+
+       for (cur_expr = node->expr; cur_expr != NULL; cur_expr = cur_expr->next)
+               len++;
+
+       buf[0] = cpu_to_le32(len);
+       rc = put_entry(buf, sizeof(u32), 1, fp);
+       if (rc)
+               return rc;
+
+       for (cur_expr = node->expr; cur_expr != NULL; cur_expr = cur_expr->next) {
+               buf[0] = cpu_to_le32(cur_expr->expr_type);
+               buf[1] = cpu_to_le32(cur_expr->bool);
+               rc = put_entry(buf, sizeof(u32), 2, fp);
+               if (rc)
+                       return rc;
+       }
+
+       rc = cond_write_av_list(p, node->true_list, fp);
+       if (rc)
+               return rc;
+       rc = cond_write_av_list(p, node->false_list, fp);
+       if (rc)
+               return rc;
+
+       return 0;
+}
+
+int cond_write_list(struct policydb *p, struct cond_node *list, void *fp)
+{
+       struct cond_node *cur;
+       u32 len;
+       __le32 buf[1];
+       int rc;
+
+       len = 0;
+       for (cur = list; cur != NULL; cur = cur->next)
+               len++;
+       buf[0] = cpu_to_le32(len);
+       rc = put_entry(buf, sizeof(u32), 1, fp);
+       if (rc)
+               return rc;
+
+       for (cur = list; cur != NULL; cur = cur->next) {
+               rc = cond_write_node(p, cur, fp);
+               if (rc)
+                       return rc;
+       }
+
+       return 0;
+}
 /* Determine whether additional permissions are granted by the conditional
  * av table, and if so, add them to the result
  */
index 53ddb013ae573f8bb053da9fa9416fbcf5ee9701..3f209c635295681f026874e332573577e743dea4 100644 (file)
@@ -69,6 +69,8 @@ int cond_index_bool(void *key, void *datum, void *datap);
 
 int cond_read_bool(struct policydb *p, struct hashtab *h, void *fp);
 int cond_read_list(struct policydb *p, void *fp);
+int cond_write_bool(void *key, void *datum, void *ptr);
+int cond_write_list(struct policydb *p, struct cond_node *list, void *fp);
 
 void cond_compute_av(struct avtab *ctab, struct avtab_key *key, struct av_decision *avd);
 
index 04b6145d767f96093423d5f733e6fe1b6a4e5687..d42951fcbe877355b08c16d5a63526c729f4355c 100644 (file)
@@ -22,6 +22,8 @@
 #include "ebitmap.h"
 #include "policydb.h"
 
+#define BITS_PER_U64   (sizeof(u64) * 8)
+
 int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2)
 {
        struct ebitmap_node *n1, *n2;
@@ -363,10 +365,10 @@ int ebitmap_read(struct ebitmap *e, void *fp)
        e->highbit = le32_to_cpu(buf[1]);
        count = le32_to_cpu(buf[2]);
 
-       if (mapunit != sizeof(u64) * 8) {
+       if (mapunit != BITS_PER_U64) {
                printk(KERN_ERR "SELinux: ebitmap: map size %u does not "
                       "match my size %Zd (high bit was %d)\n",
-                      mapunit, sizeof(u64) * 8, e->highbit);
+                      mapunit, BITS_PER_U64, e->highbit);
                goto bad;
        }
 
@@ -446,3 +448,78 @@ bad:
        ebitmap_destroy(e);
        goto out;
 }
+
+int ebitmap_write(struct ebitmap *e, void *fp)
+{
+       struct ebitmap_node *n;
+       u32 count;
+       __le32 buf[3];
+       u64 map;
+       int bit, last_bit, last_startbit, rc;
+
+       buf[0] = cpu_to_le32(BITS_PER_U64);
+
+       count = 0;
+       last_bit = 0;
+       last_startbit = -1;
+       ebitmap_for_each_positive_bit(e, n, bit) {
+               if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) {
+                       count++;
+                       last_startbit = rounddown(bit, BITS_PER_U64);
+               }
+               last_bit = roundup(bit + 1, BITS_PER_U64);
+       }
+       buf[1] = cpu_to_le32(last_bit);
+       buf[2] = cpu_to_le32(count);
+
+       rc = put_entry(buf, sizeof(u32), 3, fp);
+       if (rc)
+               return rc;
+
+       map = 0;
+       last_startbit = INT_MIN;
+       ebitmap_for_each_positive_bit(e, n, bit) {
+               if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) {
+                       __le64 buf64[1];
+
+                       /* this is the very first bit */
+                       if (!map) {
+                               last_startbit = rounddown(bit, BITS_PER_U64);
+                               map = (u64)1 << (bit - last_startbit);
+                               continue;
+                       }
+
+                       /* write the last node */
+                       buf[0] = cpu_to_le32(last_startbit);
+                       rc = put_entry(buf, sizeof(u32), 1, fp);
+                       if (rc)
+                               return rc;
+
+                       buf64[0] = cpu_to_le64(map);
+                       rc = put_entry(buf64, sizeof(u64), 1, fp);
+                       if (rc)
+                               return rc;
+
+                       /* set up for the next node */
+                       map = 0;
+                       last_startbit = rounddown(bit, BITS_PER_U64);
+               }
+               map |= (u64)1 << (bit - last_startbit);
+       }
+       /* write the last node */
+       if (map) {
+               __le64 buf64[1];
+
+               /* write the last node */
+               buf[0] = cpu_to_le32(last_startbit);
+               rc = put_entry(buf, sizeof(u32), 1, fp);
+               if (rc)
+                       return rc;
+
+               buf64[0] = cpu_to_le64(map);
+               rc = put_entry(buf64, sizeof(u64), 1, fp);
+               if (rc)
+                       return rc;
+       }
+       return 0;
+}
index f283b4367f54d640e6a3b6fedb8f3180786883c3..1f4e93c2ae8695430c4a9ff33cfd31e7d36cec36 100644 (file)
@@ -123,6 +123,7 @@ int ebitmap_get_bit(struct ebitmap *e, unsigned long bit);
 int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value);
 void ebitmap_destroy(struct ebitmap *e);
 int ebitmap_read(struct ebitmap *e, void *fp);
+int ebitmap_write(struct ebitmap *e, void *fp);
 
 #ifdef CONFIG_NETLABEL
 int ebitmap_netlbl_export(struct ebitmap *ebmap,
index 3a29704be8ce10f4409dd0a3d4f0bea8bb0d1086..94f630d93a5c5d0964a51e030646d26652619ee6 100644 (file)
@@ -37,6 +37,7 @@
 #include "policydb.h"
 #include "conditional.h"
 #include "mls.h"
+#include "services.h"
 
 #define _DEBUG_HASHES
 
@@ -185,9 +186,19 @@ static u32 rangetr_hash(struct hashtab *h, const void *k)
 static int rangetr_cmp(struct hashtab *h, const void *k1, const void *k2)
 {
        const struct range_trans *key1 = k1, *key2 = k2;
-       return (key1->source_type != key2->source_type ||
-               key1->target_type != key2->target_type ||
-               key1->target_class != key2->target_class);
+       int v;
+
+       v = key1->source_type - key2->source_type;
+       if (v)
+               return v;
+
+       v = key1->target_type - key2->target_type;
+       if (v)
+               return v;
+
+       v = key1->target_class - key2->target_class;
+
+       return v;
 }
 
 /*
@@ -1624,11 +1635,11 @@ static int role_bounds_sanity_check(void *key, void *datum, void *datap)
 
 static int type_bounds_sanity_check(void *key, void *datum, void *datap)
 {
-       struct type_datum *upper, *type;
+       struct type_datum *upper;
        struct policydb *p = datap;
        int depth = 0;
 
-       upper = type = datum;
+       upper = datum;
        while (upper->bounds) {
                if (++depth == POLICYDB_BOUNDS_MAXDEPTH) {
                        printk(KERN_ERR "SELinux: type %s: "
@@ -2306,3 +2317,843 @@ bad:
        policydb_destroy(p);
        goto out;
 }
+
+/*
+ * Write a MLS level structure to a policydb binary
+ * representation file.
+ */
+static int mls_write_level(struct mls_level *l, void *fp)
+{
+       __le32 buf[1];
+       int rc;
+
+       buf[0] = cpu_to_le32(l->sens);
+       rc = put_entry(buf, sizeof(u32), 1, fp);
+       if (rc)
+               return rc;
+
+       rc = ebitmap_write(&l->cat, fp);
+       if (rc)
+               return rc;
+
+       return 0;
+}
+
+/*
+ * Write a MLS range structure to a policydb binary
+ * representation file.
+ */
+static int mls_write_range_helper(struct mls_range *r, void *fp)
+{
+       __le32 buf[3];
+       size_t items;
+       int rc, eq;
+
+       eq = mls_level_eq(&r->level[1], &r->level[0]);
+
+       if (eq)
+               items = 2;
+       else
+               items = 3;
+       buf[0] = cpu_to_le32(items-1);
+       buf[1] = cpu_to_le32(r->level[0].sens);
+       if (!eq)
+               buf[2] = cpu_to_le32(r->level[1].sens);
+
+       BUG_ON(items > (sizeof(buf)/sizeof(buf[0])));
+
+       rc = put_entry(buf, sizeof(u32), items, fp);
+       if (rc)
+               return rc;
+
+       rc = ebitmap_write(&r->level[0].cat, fp);
+       if (rc)
+               return rc;
+       if (!eq) {
+               rc = ebitmap_write(&r->level[1].cat, fp);
+               if (rc)
+                       return rc;
+       }
+
+       return 0;
+}
+
+static int sens_write(void *vkey, void *datum, void *ptr)
+{
+       char *key = vkey;
+       struct level_datum *levdatum = datum;
+       struct policy_data *pd = ptr;
+       void *fp = pd->fp;
+       __le32 buf[2];
+       size_t len;
+       int rc;
+
+       len = strlen(key);
+       buf[0] = cpu_to_le32(len);
+       buf[1] = cpu_to_le32(levdatum->isalias);
+       rc = put_entry(buf, sizeof(u32), 2, fp);
+       if (rc)
+               return rc;
+
+       rc = put_entry(key, 1, len, fp);
+       if (rc)
+               return rc;
+
+       rc = mls_write_level(levdatum->level, fp);
+       if (rc)
+               return rc;
+
+       return 0;
+}
+
+static int cat_write(void *vkey, void *datum, void *ptr)
+{
+       char *key = vkey;
+       struct cat_datum *catdatum = datum;
+       struct policy_data *pd = ptr;
+       void *fp = pd->fp;
+       __le32 buf[3];
+       size_t len;
+       int rc;
+
+       len = strlen(key);
+       buf[0] = cpu_to_le32(len);
+       buf[1] = cpu_to_le32(catdatum->value);
+       buf[2] = cpu_to_le32(catdatum->isalias);
+       rc = put_entry(buf, sizeof(u32), 3, fp);
+       if (rc)
+               return rc;
+
+       rc = put_entry(key, 1, len, fp);
+       if (rc)
+               return rc;
+
+       return 0;
+}
+
+static int role_trans_write(struct role_trans *r, void *fp)
+{
+       struct role_trans *tr;
+       u32 buf[3];
+       size_t nel;
+       int rc;
+
+       nel = 0;
+       for (tr = r; tr; tr = tr->next)
+               nel++;
+       buf[0] = cpu_to_le32(nel);
+       rc = put_entry(buf, sizeof(u32), 1, fp);
+       if (rc)
+               return rc;
+       for (tr = r; tr; tr = tr->next) {
+               buf[0] = cpu_to_le32(tr->role);
+               buf[1] = cpu_to_le32(tr->type);
+               buf[2] = cpu_to_le32(tr->new_role);
+               rc = put_entry(buf, sizeof(u32), 3, fp);
+               if (rc)
+                       return rc;
+       }
+
+       return 0;
+}
+
+static int role_allow_write(struct role_allow *r, void *fp)
+{
+       struct role_allow *ra;
+       u32 buf[2];
+       size_t nel;
+       int rc;
+
+       nel = 0;
+       for (ra = r; ra; ra = ra->next)
+               nel++;
+       buf[0] = cpu_to_le32(nel);
+       rc = put_entry(buf, sizeof(u32), 1, fp);
+       if (rc)
+               return rc;
+       for (ra = r; ra; ra = ra->next) {
+               buf[0] = cpu_to_le32(ra->role);
+               buf[1] = cpu_to_le32(ra->new_role);
+               rc = put_entry(buf, sizeof(u32), 2, fp);
+               if (rc)
+                       return rc;
+       }
+       return 0;
+}
+
+/*
+ * Write a security context structure
+ * to a policydb binary representation file.
+ */
+static int context_write(struct policydb *p, struct context *c,
+                        void *fp)
+{
+       int rc;
+       __le32 buf[3];
+
+       buf[0] = cpu_to_le32(c->user);
+       buf[1] = cpu_to_le32(c->role);
+       buf[2] = cpu_to_le32(c->type);
+
+       rc = put_entry(buf, sizeof(u32), 3, fp);
+       if (rc)
+               return rc;
+
+       rc = mls_write_range_helper(&c->range, fp);
+       if (rc)
+               return rc;
+
+       return 0;
+}
+
+/*
+ * The following *_write functions are used to
+ * write the symbol data to a policy database
+ * binary representation file.
+ */
+
+static int perm_write(void *vkey, void *datum, void *fp)
+{
+       char *key = vkey;
+       struct perm_datum *perdatum = datum;
+       __le32 buf[2];
+       size_t len;
+       int rc;
+
+       len = strlen(key);
+       buf[0] = cpu_to_le32(len);
+       buf[1] = cpu_to_le32(perdatum->value);
+       rc = put_entry(buf, sizeof(u32), 2, fp);
+       if (rc)
+               return rc;
+
+       rc = put_entry(key, 1, len, fp);
+       if (rc)
+               return rc;
+
+       return 0;
+}
+
+static int common_write(void *vkey, void *datum, void *ptr)
+{
+       char *key = vkey;
+       struct common_datum *comdatum = datum;
+       struct policy_data *pd = ptr;
+       void *fp = pd->fp;
+       __le32 buf[4];
+       size_t len;
+       int rc;
+
+       len = strlen(key);
+       buf[0] = cpu_to_le32(len);
+       buf[1] = cpu_to_le32(comdatum->value);
+       buf[2] = cpu_to_le32(comdatum->permissions.nprim);
+       buf[3] = cpu_to_le32(comdatum->permissions.table->nel);
+       rc = put_entry(buf, sizeof(u32), 4, fp);
+       if (rc)
+               return rc;
+
+       rc = put_entry(key, 1, len, fp);
+       if (rc)
+               return rc;
+
+       rc = hashtab_map(comdatum->permissions.table, perm_write, fp);
+       if (rc)
+               return rc;
+
+       return 0;
+}
+
+static int write_cons_helper(struct policydb *p, struct constraint_node *node,
+                            void *fp)
+{
+       struct constraint_node *c;
+       struct constraint_expr *e;
+       __le32 buf[3];
+       u32 nel;
+       int rc;
+
+       for (c = node; c; c = c->next) {
+               nel = 0;
+               for (e = c->expr; e; e = e->next)
+                       nel++;
+               buf[0] = cpu_to_le32(c->permissions);
+               buf[1] = cpu_to_le32(nel);
+               rc = put_entry(buf, sizeof(u32), 2, fp);
+               if (rc)
+                       return rc;
+               for (e = c->expr; e; e = e->next) {
+                       buf[0] = cpu_to_le32(e->expr_type);
+                       buf[1] = cpu_to_le32(e->attr);
+                       buf[2] = cpu_to_le32(e->op);
+                       rc = put_entry(buf, sizeof(u32), 3, fp);
+                       if (rc)
+                               return rc;
+
+                       switch (e->expr_type) {
+                       case CEXPR_NAMES:
+                               rc = ebitmap_write(&e->names, fp);
+                               if (rc)
+                                       return rc;
+                               break;
+                       default:
+                               break;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static int class_write(void *vkey, void *datum, void *ptr)
+{
+       char *key = vkey;
+       struct class_datum *cladatum = datum;
+       struct policy_data *pd = ptr;
+       void *fp = pd->fp;
+       struct policydb *p = pd->p;
+       struct constraint_node *c;
+       __le32 buf[6];
+       u32 ncons;
+       size_t len, len2;
+       int rc;
+
+       len = strlen(key);
+       if (cladatum->comkey)
+               len2 = strlen(cladatum->comkey);
+       else
+               len2 = 0;
+
+       ncons = 0;
+       for (c = cladatum->constraints; c; c = c->next)
+               ncons++;
+
+       buf[0] = cpu_to_le32(len);
+       buf[1] = cpu_to_le32(len2);
+       buf[2] = cpu_to_le32(cladatum->value);
+       buf[3] = cpu_to_le32(cladatum->permissions.nprim);
+       if (cladatum->permissions.table)
+               buf[4] = cpu_to_le32(cladatum->permissions.table->nel);
+       else
+               buf[4] = 0;
+       buf[5] = cpu_to_le32(ncons);
+       rc = put_entry(buf, sizeof(u32), 6, fp);
+       if (rc)
+               return rc;
+
+       rc = put_entry(key, 1, len, fp);
+       if (rc)
+               return rc;
+
+       if (cladatum->comkey) {
+               rc = put_entry(cladatum->comkey, 1, len2, fp);
+               if (rc)
+                       return rc;
+       }
+
+       rc = hashtab_map(cladatum->permissions.table, perm_write, fp);
+       if (rc)
+               return rc;
+
+       rc = write_cons_helper(p, cladatum->constraints, fp);
+       if (rc)
+               return rc;
+
+       /* write out the validatetrans rule */
+       ncons = 0;
+       for (c = cladatum->validatetrans; c; c = c->next)
+               ncons++;
+
+       buf[0] = cpu_to_le32(ncons);
+       rc = put_entry(buf, sizeof(u32), 1, fp);
+       if (rc)
+               return rc;
+
+       rc = write_cons_helper(p, cladatum->validatetrans, fp);
+       if (rc)
+               return rc;
+
+       return 0;
+}
+
+static int role_write(void *vkey, void *datum, void *ptr)
+{
+       char *key = vkey;
+       struct role_datum *role = datum;
+       struct policy_data *pd = ptr;
+       void *fp = pd->fp;
+       struct policydb *p = pd->p;
+       __le32 buf[3];
+       size_t items, len;
+       int rc;
+
+       len = strlen(key);
+       items = 0;
+       buf[items++] = cpu_to_le32(len);
+       buf[items++] = cpu_to_le32(role->value);
+       if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
+               buf[items++] = cpu_to_le32(role->bounds);
+
+       BUG_ON(items > (sizeof(buf)/sizeof(buf[0])));
+
+       rc = put_entry(buf, sizeof(u32), items, fp);
+       if (rc)
+               return rc;
+
+       rc = put_entry(key, 1, len, fp);
+       if (rc)
+               return rc;
+
+       rc = ebitmap_write(&role->dominates, fp);
+       if (rc)
+               return rc;
+
+       rc = ebitmap_write(&role->types, fp);
+       if (rc)
+               return rc;
+
+       return 0;
+}
+
+static int type_write(void *vkey, void *datum, void *ptr)
+{
+       char *key = vkey;
+       struct type_datum *typdatum = datum;
+       struct policy_data *pd = ptr;
+       struct policydb *p = pd->p;
+       void *fp = pd->fp;
+       __le32 buf[4];
+       int rc;
+       size_t items, len;
+
+       len = strlen(key);
+       items = 0;
+       buf[items++] = cpu_to_le32(len);
+       buf[items++] = cpu_to_le32(typdatum->value);
+       if (p->policyvers >= POLICYDB_VERSION_BOUNDARY) {
+               u32 properties = 0;
+
+               if (typdatum->primary)
+                       properties |= TYPEDATUM_PROPERTY_PRIMARY;
+
+               if (typdatum->attribute)
+                       properties |= TYPEDATUM_PROPERTY_ATTRIBUTE;
+
+               buf[items++] = cpu_to_le32(properties);
+               buf[items++] = cpu_to_le32(typdatum->bounds);
+       } else {
+               buf[items++] = cpu_to_le32(typdatum->primary);
+       }
+       BUG_ON(items > (sizeof(buf) / sizeof(buf[0])));
+       rc = put_entry(buf, sizeof(u32), items, fp);
+       if (rc)
+               return rc;
+
+       rc = put_entry(key, 1, len, fp);
+       if (rc)
+               return rc;
+
+       return 0;
+}
+
+static int user_write(void *vkey, void *datum, void *ptr)
+{
+       char *key = vkey;
+       struct user_datum *usrdatum = datum;
+       struct policy_data *pd = ptr;
+       struct policydb *p = pd->p;
+       void *fp = pd->fp;
+       __le32 buf[3];
+       size_t items, len;
+       int rc;
+
+       len = strlen(key);
+       items = 0;
+       buf[items++] = cpu_to_le32(len);
+       buf[items++] = cpu_to_le32(usrdatum->value);
+       if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
+               buf[items++] = cpu_to_le32(usrdatum->bounds);
+       BUG_ON(items > (sizeof(buf) / sizeof(buf[0])));
+       rc = put_entry(buf, sizeof(u32), items, fp);
+       if (rc)
+               return rc;
+
+       rc = put_entry(key, 1, len, fp);
+       if (rc)
+               return rc;
+
+       rc = ebitmap_write(&usrdatum->roles, fp);
+       if (rc)
+               return rc;
+
+       rc = mls_write_range_helper(&usrdatum->range, fp);
+       if (rc)
+               return rc;
+
+       rc = mls_write_level(&usrdatum->dfltlevel, fp);
+       if (rc)
+               return rc;
+
+       return 0;
+}
+
+static int (*write_f[SYM_NUM]) (void *key, void *datum,
+                               void *datap) =
+{
+       common_write,
+       class_write,
+       role_write,
+       type_write,
+       user_write,
+       cond_write_bool,
+       sens_write,
+       cat_write,
+};
+
+static int ocontext_write(struct policydb *p, struct policydb_compat_info *info,
+                         void *fp)
+{
+       unsigned int i, j, rc;
+       size_t nel, len;
+       __le32 buf[3];
+       u32 nodebuf[8];
+       struct ocontext *c;
+       for (i = 0; i < info->ocon_num; i++) {
+               nel = 0;
+               for (c = p->ocontexts[i]; c; c = c->next)
+                       nel++;
+               buf[0] = cpu_to_le32(nel);
+               rc = put_entry(buf, sizeof(u32), 1, fp);
+               if (rc)
+                       return rc;
+               for (c = p->ocontexts[i]; c; c = c->next) {
+                       switch (i) {
+                       case OCON_ISID:
+                               buf[0] = cpu_to_le32(c->sid[0]);
+                               rc = put_entry(buf, sizeof(u32), 1, fp);
+                               if (rc)
+                                       return rc;
+                               rc = context_write(p, &c->context[0], fp);
+                               if (rc)
+                                       return rc;
+                               break;
+                       case OCON_FS:
+                       case OCON_NETIF:
+                               len = strlen(c->u.name);
+                               buf[0] = cpu_to_le32(len);
+                               rc = put_entry(buf, sizeof(u32), 1, fp);
+                               if (rc)
+                                       return rc;
+                               rc = put_entry(c->u.name, 1, len, fp);
+                               if (rc)
+                                       return rc;
+                               rc = context_write(p, &c->context[0], fp);
+                               if (rc)
+                                       return rc;
+                               rc = context_write(p, &c->context[1], fp);
+                               if (rc)
+                                       return rc;
+                               break;
+                       case OCON_PORT:
+                               buf[0] = cpu_to_le32(c->u.port.protocol);
+                               buf[1] = cpu_to_le32(c->u.port.low_port);
+                               buf[2] = cpu_to_le32(c->u.port.high_port);
+                               rc = put_entry(buf, sizeof(u32), 3, fp);
+                               if (rc)
+                                       return rc;
+                               rc = context_write(p, &c->context[0], fp);
+                               if (rc)
+                                       return rc;
+                               break;
+                       case OCON_NODE:
+                               nodebuf[0] = c->u.node.addr; /* network order */
+                               nodebuf[1] = c->u.node.mask; /* network order */
+                               rc = put_entry(nodebuf, sizeof(u32), 2, fp);
+                               if (rc)
+                                       return rc;
+                               rc = context_write(p, &c->context[0], fp);
+                               if (rc)
+                                       return rc;
+                               break;
+                       case OCON_FSUSE:
+                               buf[0] = cpu_to_le32(c->v.behavior);
+                               len = strlen(c->u.name);
+                               buf[1] = cpu_to_le32(len);
+                               rc = put_entry(buf, sizeof(u32), 2, fp);
+                               if (rc)
+                                       return rc;
+                               rc = put_entry(c->u.name, 1, len, fp);
+                               if (rc)
+                                       return rc;
+                               rc = context_write(p, &c->context[0], fp);
+                               if (rc)
+                                       return rc;
+                               break;
+                       case OCON_NODE6:
+                               for (j = 0; j < 4; j++)
+                                       nodebuf[j] = c->u.node6.addr[j]; /* network order */
+                               for (j = 0; j < 4; j++)
+                                       nodebuf[j + 4] = c->u.node6.mask[j]; /* network order */
+                               rc = put_entry(nodebuf, sizeof(u32), 8, fp);
+                               if (rc)
+                                       return rc;
+                               rc = context_write(p, &c->context[0], fp);
+                               if (rc)
+                                       return rc;
+                               break;
+                       }
+               }
+       }
+       return 0;
+}
+
+static int genfs_write(struct policydb *p, void *fp)
+{
+       struct genfs *genfs;
+       struct ocontext *c;
+       size_t len;
+       __le32 buf[1];
+       int rc;
+
+       len = 0;
+       for (genfs = p->genfs; genfs; genfs = genfs->next)
+               len++;
+       buf[0] = cpu_to_le32(len);
+       rc = put_entry(buf, sizeof(u32), 1, fp);
+       if (rc)
+               return rc;
+       for (genfs = p->genfs; genfs; genfs = genfs->next) {
+               len = strlen(genfs->fstype);
+               buf[0] = cpu_to_le32(len);
+               rc = put_entry(buf, sizeof(u32), 1, fp);
+               if (rc)
+                       return rc;
+               rc = put_entry(genfs->fstype, 1, len, fp);
+               if (rc)
+                       return rc;
+               len = 0;
+               for (c = genfs->head; c; c = c->next)
+                       len++;
+               buf[0] = cpu_to_le32(len);
+               rc = put_entry(buf, sizeof(u32), 1, fp);
+               if (rc)
+                       return rc;
+               for (c = genfs->head; c; c = c->next) {
+                       len = strlen(c->u.name);
+                       buf[0] = cpu_to_le32(len);
+                       rc = put_entry(buf, sizeof(u32), 1, fp);
+                       if (rc)
+                               return rc;
+                       rc = put_entry(c->u.name, 1, len, fp);
+                       if (rc)
+                               return rc;
+                       buf[0] = cpu_to_le32(c->v.sclass);
+                       rc = put_entry(buf, sizeof(u32), 1, fp);
+                       if (rc)
+                               return rc;
+                       rc = context_write(p, &c->context[0], fp);
+                       if (rc)
+                               return rc;
+               }
+       }
+       return 0;
+}
+
+static int range_count(void *key, void *data, void *ptr)
+{
+       int *cnt = ptr;
+       *cnt = *cnt + 1;
+
+       return 0;
+}
+
+static int range_write_helper(void *key, void *data, void *ptr)
+{
+       __le32 buf[2];
+       struct range_trans *rt = key;
+       struct mls_range *r = data;
+       struct policy_data *pd = ptr;
+       void *fp = pd->fp;
+       struct policydb *p = pd->p;
+       int rc;
+
+       buf[0] = cpu_to_le32(rt->source_type);
+       buf[1] = cpu_to_le32(rt->target_type);
+       rc = put_entry(buf, sizeof(u32), 2, fp);
+       if (rc)
+               return rc;
+       if (p->policyvers >= POLICYDB_VERSION_RANGETRANS) {
+               buf[0] = cpu_to_le32(rt->target_class);
+               rc = put_entry(buf, sizeof(u32), 1, fp);
+               if (rc)
+                       return rc;
+       }
+       rc = mls_write_range_helper(r, fp);
+       if (rc)
+               return rc;
+
+       return 0;
+}
+
+static int range_write(struct policydb *p, void *fp)
+{
+       size_t nel;
+       __le32 buf[1];
+       int rc;
+       struct policy_data pd;
+
+       pd.p = p;
+       pd.fp = fp;
+
+       /* count the number of entries in the hashtab */
+       nel = 0;
+       rc = hashtab_map(p->range_tr, range_count, &nel);
+       if (rc)
+               return rc;
+
+       buf[0] = cpu_to_le32(nel);
+       rc = put_entry(buf, sizeof(u32), 1, fp);
+       if (rc)
+               return rc;
+
+       /* actually write all of the entries */
+       rc = hashtab_map(p->range_tr, range_write_helper, &pd);
+       if (rc)
+               return rc;
+
+       return 0;
+}
+
+/*
+ * Write the configuration data in a policy database
+ * structure to a policy database binary representation
+ * file.
+ */
+int policydb_write(struct policydb *p, void *fp)
+{
+       unsigned int i, num_syms;
+       int rc;
+       __le32 buf[4];
+       u32 config;
+       size_t len;
+       struct policydb_compat_info *info;
+
+       /*
+        * refuse to write policy older than compressed avtab
+        * to simplify the writer.  There are other tests dropped
+        * since we assume this throughout the writer code.  Be
+        * careful if you ever try to remove this restriction
+        */
+       if (p->policyvers < POLICYDB_VERSION_AVTAB) {
+               printk(KERN_ERR "SELinux: refusing to write policy version %d."
+                      "  Because it is less than version %d\n", p->policyvers,
+                      POLICYDB_VERSION_AVTAB);
+               return -EINVAL;
+       }
+
+       config = 0;
+       if (p->mls_enabled)
+               config |= POLICYDB_CONFIG_MLS;
+
+       if (p->reject_unknown)
+               config |= REJECT_UNKNOWN;
+       if (p->allow_unknown)
+               config |= ALLOW_UNKNOWN;
+
+       /* Write the magic number and string identifiers. */
+       buf[0] = cpu_to_le32(POLICYDB_MAGIC);
+       len = strlen(POLICYDB_STRING);
+       buf[1] = cpu_to_le32(len);
+       rc = put_entry(buf, sizeof(u32), 2, fp);
+       if (rc)
+               return rc;
+       rc = put_entry(POLICYDB_STRING, 1, len, fp);
+       if (rc)
+               return rc;
+
+       /* Write the version, config, and table sizes. */
+       info = policydb_lookup_compat(p->policyvers);
+       if (!info) {
+               printk(KERN_ERR "SELinux: compatibility lookup failed for policy "
+                   "version %d", p->policyvers);
+               return rc;
+       }
+
+       buf[0] = cpu_to_le32(p->policyvers);
+       buf[1] = cpu_to_le32(config);
+       buf[2] = cpu_to_le32(info->sym_num);
+       buf[3] = cpu_to_le32(info->ocon_num);
+
+       rc = put_entry(buf, sizeof(u32), 4, fp);
+       if (rc)
+               return rc;
+
+       if (p->policyvers >= POLICYDB_VERSION_POLCAP) {
+               rc = ebitmap_write(&p->policycaps, fp);
+               if (rc)
+                       return rc;
+       }
+
+       if (p->policyvers >= POLICYDB_VERSION_PERMISSIVE) {
+               rc = ebitmap_write(&p->permissive_map, fp);
+               if (rc)
+                       return rc;
+       }
+
+       num_syms = info->sym_num;
+       for (i = 0; i < num_syms; i++) {
+               struct policy_data pd;
+
+               pd.fp = fp;
+               pd.p = p;
+
+               buf[0] = cpu_to_le32(p->symtab[i].nprim);
+               buf[1] = cpu_to_le32(p->symtab[i].table->nel);
+
+               rc = put_entry(buf, sizeof(u32), 2, fp);
+               if (rc)
+                       return rc;
+               rc = hashtab_map(p->symtab[i].table, write_f[i], &pd);
+               if (rc)
+                       return rc;
+       }
+
+       rc = avtab_write(p, &p->te_avtab, fp);
+       if (rc)
+               return rc;
+
+       rc = cond_write_list(p, p->cond_list, fp);
+       if (rc)
+               return rc;
+
+       rc = role_trans_write(p->role_tr, fp);
+       if (rc)
+               return rc;
+
+       rc = role_allow_write(p->role_allow, fp);
+       if (rc)
+               return rc;
+
+       rc = ocontext_write(p, info, fp);
+       if (rc)
+               return rc;
+
+       rc = genfs_write(p, fp);
+       if (rc)
+               return rc;
+
+       rc = range_write(p, fp);
+       if (rc)
+               return rc;
+
+       for (i = 0; i < p->p_types.nprim; i++) {
+               struct ebitmap *e = flex_array_get(p->type_attr_map_array, i);
+
+               BUG_ON(!e);
+               rc = ebitmap_write(e, fp);
+               if (rc)
+                       return rc;
+       }
+
+       return 0;
+}
index 310e94442cb8b3535a8774b952de45ba0180794e..95d3d7de361e628adcc53b974533cafb384ef851 100644 (file)
@@ -254,6 +254,9 @@ struct policydb {
 
        struct ebitmap permissive_map;
 
+       /* length of this policy when it was loaded */
+       size_t len;
+
        unsigned int policyvers;
 
        unsigned int reject_unknown : 1;
@@ -270,6 +273,7 @@ extern int policydb_class_isvalid(struct policydb *p, unsigned int class);
 extern int policydb_type_isvalid(struct policydb *p, unsigned int type);
 extern int policydb_role_isvalid(struct policydb *p, unsigned int role);
 extern int policydb_read(struct policydb *p, void *fp);
+extern int policydb_write(struct policydb *p, void *fp);
 
 #define PERM_SYMTAB_SIZE 32
 
@@ -290,6 +294,11 @@ struct policy_file {
        size_t len;
 };
 
+struct policy_data {
+       struct policydb *p;
+       void *fp;
+};
+
 static inline int next_entry(void *buf, struct policy_file *fp, size_t bytes)
 {
        if (bytes > fp->len)
@@ -301,6 +310,17 @@ static inline int next_entry(void *buf, struct policy_file *fp, size_t bytes)
        return 0;
 }
 
+static inline int put_entry(void *buf, size_t bytes, int num, struct policy_file *fp)
+{
+       size_t len = bytes * num;
+
+       memcpy(fp->data, buf, len);
+       fp->data += len;
+       fp->len -= len;
+
+       return 0;
+}
+
 extern u16 string_to_security_class(struct policydb *p, const char *name);
 extern u32 string_to_av_perm(struct policydb *p, u16 tclass, const char *name);
 
index 9ea2feca3cd4f7b572361543fdf1b265002cecfb..223c1ff6ef2324488eca915d2ba87bc4f6e27fa9 100644 (file)
@@ -51,6 +51,7 @@
 #include <linux/mutex.h>
 #include <linux/selinux.h>
 #include <linux/flex_array.h>
+#include <linux/vmalloc.h>
 #include <net/netlabel.h>
 
 #include "flask.h"
@@ -991,7 +992,8 @@ static int context_struct_to_string(struct context *context, char **scontext, u3
 {
        char *scontextp;
 
-       *scontext = NULL;
+       if (scontext)
+               *scontext = NULL;
        *scontext_len = 0;
 
        if (context->len) {
@@ -1008,6 +1010,9 @@ static int context_struct_to_string(struct context *context, char **scontext, u3
        *scontext_len += strlen(policydb.p_type_val_to_name[context->type - 1]) + 1;
        *scontext_len += mls_compute_context_len(context);
 
+       if (!scontext)
+               return 0;
+
        /* Allocate space for the context; caller must free this space. */
        scontextp = kmalloc(*scontext_len, GFP_ATOMIC);
        if (!scontextp)
@@ -1047,7 +1052,8 @@ static int security_sid_to_context_core(u32 sid, char **scontext,
        struct context *context;
        int rc = 0;
 
-       *scontext = NULL;
+       if (scontext)
+               *scontext = NULL;
        *scontext_len  = 0;
 
        if (!ss_initialized) {
@@ -1055,6 +1061,8 @@ static int security_sid_to_context_core(u32 sid, char **scontext,
                        char *scontextp;
 
                        *scontext_len = strlen(initial_sid_to_string[sid]) + 1;
+                       if (!scontext)
+                               goto out;
                        scontextp = kmalloc(*scontext_len, GFP_ATOMIC);
                        if (!scontextp) {
                                rc = -ENOMEM;
@@ -1769,6 +1777,7 @@ int security_load_policy(void *data, size_t len)
                        return rc;
                }
 
+               policydb.len = len;
                rc = selinux_set_mapping(&policydb, secclass_map,
                                         &current_mapping,
                                         &current_mapping_size);
@@ -1791,6 +1800,7 @@ int security_load_policy(void *data, size_t len)
                selinux_complete_init();
                avc_ss_reset(seqno);
                selnl_notify_policyload(seqno);
+               selinux_status_update_policyload(seqno);
                selinux_netlbl_cache_invalidate();
                selinux_xfrm_notify_policyload();
                return 0;
@@ -1804,6 +1814,7 @@ int security_load_policy(void *data, size_t len)
        if (rc)
                return rc;
 
+       newpolicydb.len = len;
        /* If switching between different policy types, log MLS status */
        if (policydb.mls_enabled && !newpolicydb.mls_enabled)
                printk(KERN_INFO "SELinux: Disabling MLS support...\n");
@@ -1870,6 +1881,7 @@ int security_load_policy(void *data, size_t len)
 
        avc_ss_reset(seqno);
        selnl_notify_policyload(seqno);
+       selinux_status_update_policyload(seqno);
        selinux_netlbl_cache_invalidate();
        selinux_xfrm_notify_policyload();
 
@@ -1883,6 +1895,17 @@ err:
 
 }
 
+size_t security_policydb_len(void)
+{
+       size_t len;
+
+       read_lock(&policy_rwlock);
+       len = policydb.len;
+       read_unlock(&policy_rwlock);
+
+       return len;
+}
+
 /**
  * security_port_sid - Obtain the SID for a port.
  * @protocol: protocol number
@@ -2374,6 +2397,7 @@ out:
        if (!rc) {
                avc_ss_reset(seqno);
                selnl_notify_policyload(seqno);
+               selinux_status_update_policyload(seqno);
                selinux_xfrm_notify_policyload();
        }
        return rc;
@@ -3129,3 +3153,38 @@ netlbl_sid_to_secattr_failure:
        return rc;
 }
 #endif /* CONFIG_NETLABEL */
+
+/**
+ * security_read_policy - read the policy.
+ * @data: binary policy data
+ * @len: length of data in bytes
+ *
+ */
+int security_read_policy(void **data, ssize_t *len)
+{
+       int rc;
+       struct policy_file fp;
+
+       if (!ss_initialized)
+               return -EINVAL;
+
+       *len = security_policydb_len();
+
+       *data = vmalloc_user(*len);
+       if (!*data)
+               return -ENOMEM;
+
+       fp.data = *data;
+       fp.len = *len;
+
+       read_lock(&policy_rwlock);
+       rc = policydb_write(&policydb, &fp);
+       read_unlock(&policy_rwlock);
+
+       if (rc)
+               return rc;
+
+       *len = (unsigned long)fp.data - (unsigned long)*data;
+       return 0;
+
+}
diff --git a/security/selinux/ss/status.c b/security/selinux/ss/status.c
new file mode 100644 (file)
index 0000000..d982365
--- /dev/null
@@ -0,0 +1,126 @@
+/*
+ * mmap based event notifications for SELinux
+ *
+ * Author: KaiGai Kohei <kaigai@ak.jp.nec.com>
+ *
+ * Copyright (C) 2010 NEC corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2,
+ * as published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include "avc.h"
+#include "services.h"
+
+/*
+ * The selinux_status_page shall be exposed to userspace applications
+ * using mmap interface on /selinux/status.
+ * It enables to notify applications a few events that will cause reset
+ * of userspace access vector without context switching.
+ *
+ * The selinux_kernel_status structure on the head of status page is
+ * protected from concurrent accesses using seqlock logic, so userspace
+ * application should reference the status page according to the seqlock
+ * logic.
+ *
+ * Typically, application checks status->sequence at the head of access
+ * control routine. If it is odd-number, kernel is updating the status,
+ * so please wait for a moment. If it is changed from the last sequence
+ * number, it means something happen, so application will reset userspace
+ * avc, if needed.
+ * In most cases, application shall confirm the kernel status is not
+ * changed without any system call invocations.
+ */
+static struct page *selinux_status_page;
+static DEFINE_MUTEX(selinux_status_lock);
+
+/*
+ * selinux_kernel_status_page
+ *
+ * It returns a reference to selinux_status_page. If the status page is
+ * not allocated yet, it also tries to allocate it at the first time.
+ */
+struct page *selinux_kernel_status_page(void)
+{
+       struct selinux_kernel_status   *status;
+       struct page                    *result = NULL;
+
+       mutex_lock(&selinux_status_lock);
+       if (!selinux_status_page) {
+               selinux_status_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
+
+               if (selinux_status_page) {
+                       status = page_address(selinux_status_page);
+
+                       status->version = SELINUX_KERNEL_STATUS_VERSION;
+                       status->sequence = 0;
+                       status->enforcing = selinux_enforcing;
+                       /*
+                        * NOTE: the next policyload event shall set
+                        * a positive value on the status->policyload,
+                        * although it may not be 1, but never zero.
+                        * So, application can know it was updated.
+                        */
+                       status->policyload = 0;
+                       status->deny_unknown = !security_get_allow_unknown();
+               }
+       }
+       result = selinux_status_page;
+       mutex_unlock(&selinux_status_lock);
+
+       return result;
+}
+
+/*
+ * selinux_status_update_setenforce
+ *
+ * It updates status of the current enforcing/permissive mode.
+ */
+void selinux_status_update_setenforce(int enforcing)
+{
+       struct selinux_kernel_status   *status;
+
+       mutex_lock(&selinux_status_lock);
+       if (selinux_status_page) {
+               status = page_address(selinux_status_page);
+
+               status->sequence++;
+               smp_wmb();
+
+               status->enforcing = enforcing;
+
+               smp_wmb();
+               status->sequence++;
+       }
+       mutex_unlock(&selinux_status_lock);
+}
+
+/*
+ * selinux_status_update_policyload
+ *
+ * It updates status of the times of policy reloaded, and current
+ * setting of deny_unknown.
+ */
+void selinux_status_update_policyload(int seqno)
+{
+       struct selinux_kernel_status   *status;
+
+       mutex_lock(&selinux_status_lock);
+       if (selinux_status_page) {
+               status = page_address(selinux_status_page);
+
+               status->sequence++;
+               smp_wmb();
+
+               status->policyload = seqno;
+               status->deny_unknown = !security_get_allow_unknown();
+
+               smp_wmb();
+               status->sequence++;
+       }
+       mutex_unlock(&selinux_status_lock);
+}
index c448d57ae2b7721f72f17c5cf42e88f3f1bcba5e..bc39f4067af668874312af4187ad6f21dbdbb113 100644 (file)
@@ -1281,12 +1281,11 @@ static int smack_task_getioprio(struct task_struct *p)
  *
  * Return 0 if read access is permitted
  */
-static int smack_task_setscheduler(struct task_struct *p, int policy,
-                                  struct sched_param *lp)
+static int smack_task_setscheduler(struct task_struct *p)
 {
        int rc;
 
-       rc = cap_task_setscheduler(p, policy, lp);
+       rc = cap_task_setscheduler(p);
        if (rc == 0)
                rc = smk_curacc_on_task(p, MAY_WRITE);
        return rc;
@@ -3005,7 +3004,8 @@ static int smack_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
 {
        char *sp = smack_from_secid(secid);
 
-       *secdata = sp;
+       if (secdata)
+               *secdata = sp;
        *seclen = strlen(sp);
        return 0;
 }
index ef43995119a453401dd768adfa5ae41a2602dd3a..7556315c197823e0baedcf15d0f0930c74345429 100644 (file)
@@ -768,8 +768,10 @@ static bool tomoyo_select_one(struct tomoyo_io_buffer *head, const char *data)
                return true; /* Do nothing if open(O_WRONLY). */
        memset(&head->r, 0, sizeof(head->r));
        head->r.print_this_domain_only = true;
-       head->r.eof = !domain;
-       head->r.domain = &domain->list;
+       if (domain)
+               head->r.domain = &domain->list;
+       else
+               head->r.eof = 1;
        tomoyo_io_printf(head, "# select %s\n", data);
        if (domain && domain->is_deleted)
                tomoyo_io_printf(head, "# This is a deleted domain.\n");
@@ -1416,15 +1418,19 @@ static char *tomoyo_print_header(struct tomoyo_request_info *r)
        const pid_t gpid = task_pid_nr(current);
        static const int tomoyo_buffer_len = 4096;
        char *buffer = kmalloc(tomoyo_buffer_len, GFP_NOFS);
+       pid_t ppid;
        if (!buffer)
                return NULL;
        do_gettimeofday(&tv);
+       rcu_read_lock();
+       ppid = task_tgid_vnr(current->real_parent);
+       rcu_read_unlock();
        snprintf(buffer, tomoyo_buffer_len - 1,
                 "#timestamp=%lu profile=%u mode=%s (global-pid=%u)"
                 " task={ pid=%u ppid=%u uid=%u gid=%u euid=%u"
                 " egid=%u suid=%u sgid=%u fsuid=%u fsgid=%u }",
                 tv.tv_sec, r->profile, tomoyo_mode[r->mode], gpid,
-                (pid_t) sys_getpid(), (pid_t) sys_getppid(),
+                task_tgid_vnr(current), ppid,
                 current_uid(), current_gid(), current_euid(),
                 current_egid(), current_suid(), current_sgid(),
                 current_fsuid(), current_fsgid());
@@ -2047,13 +2053,22 @@ void tomoyo_check_profile(void)
                const u8 profile = domain->profile;
                if (tomoyo_profile_ptr[profile])
                        continue;
+               printk(KERN_ERR "You need to define profile %u before using it.\n",
+                      profile);
+               printk(KERN_ERR "Please see http://tomoyo.sourceforge.jp/2.3/ "
+                      "for more information.\n");
                panic("Profile %u (used by '%s') not defined.\n",
                      profile, domain->domainname->name);
        }
        tomoyo_read_unlock(idx);
-       if (tomoyo_profile_version != 20090903)
+       if (tomoyo_profile_version != 20090903) {
+               printk(KERN_ERR "You need to install userland programs for "
+                      "TOMOYO 2.3 and initialize policy configuration.\n");
+               printk(KERN_ERR "Please see http://tomoyo.sourceforge.jp/2.3/ "
+                      "for more information.\n");
                panic("Profile version %u is not supported.\n",
                      tomoyo_profile_version);
+       }
        printk(KERN_INFO "TOMOYO: 2.3.0\n");
        printk(KERN_INFO "Mandatory Access Control activated.\n");
 }
index 04454cb7b24a534e84c8873a8babeb9614a8da60..7c66bd898782ce0c6fa8ea037cd19dfd78fc8bfe 100644 (file)
@@ -689,9 +689,6 @@ struct tomoyo_profile {
 
 /********** Function prototypes. **********/
 
-extern asmlinkage long sys_getpid(void);
-extern asmlinkage long sys_getppid(void);
-
 /* Check whether the given string starts with the given keyword. */
 bool tomoyo_str_starts(char **src, const char *find);
 /* Get tomoyo_realpath() of current process. */
index 070aab4901914870a0af43faef27e82e761acc32..45a818002d990f664cffadd066488a21c76fedd7 100644 (file)
@@ -31,6 +31,7 @@
 
 /* max number of user-defined controls */
 #define MAX_USER_CONTROLS      32
+#define MAX_CONTROL_COUNT      1028
 
 struct snd_kctl_ioctl {
        struct list_head list;          /* list of all ioctls */
@@ -195,6 +196,10 @@ static struct snd_kcontrol *snd_ctl_new(struct snd_kcontrol *control,
        
        if (snd_BUG_ON(!control || !control->count))
                return NULL;
+
+       if (control->count > MAX_CONTROL_COUNT)
+               return NULL;
+
        kctl = kzalloc(sizeof(*kctl) + sizeof(struct snd_kcontrol_volatile) * control->count, GFP_KERNEL);
        if (kctl == NULL) {
                snd_printk(KERN_ERR "Cannot allocate control instance\n");
index 204af48c5cc17f2f59632bb2089aa03c7b593664..ac242a377aea8068f859bce49aae6825aae5d001 100644 (file)
@@ -372,14 +372,17 @@ static void snd_pcm_substream_proc_hw_params_read(struct snd_info_entry *entry,
                                                  struct snd_info_buffer *buffer)
 {
        struct snd_pcm_substream *substream = entry->private_data;
-       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct snd_pcm_runtime *runtime;
+
+       mutex_lock(&substream->pcm->open_mutex);
+       runtime = substream->runtime;
        if (!runtime) {
                snd_iprintf(buffer, "closed\n");
-               return;
+               goto unlock;
        }
        if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
                snd_iprintf(buffer, "no setup\n");
-               return;
+               goto unlock;
        }
        snd_iprintf(buffer, "access: %s\n", snd_pcm_access_name(runtime->access));
        snd_iprintf(buffer, "format: %s\n", snd_pcm_format_name(runtime->format));
@@ -398,20 +401,25 @@ static void snd_pcm_substream_proc_hw_params_read(struct snd_info_entry *entry,
                snd_iprintf(buffer, "OSS period frames: %lu\n", (unsigned long)runtime->oss.period_frames);
        }
 #endif
+ unlock:
+       mutex_unlock(&substream->pcm->open_mutex);
 }
 
 static void snd_pcm_substream_proc_sw_params_read(struct snd_info_entry *entry,
                                                  struct snd_info_buffer *buffer)
 {
        struct snd_pcm_substream *substream = entry->private_data;
-       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct snd_pcm_runtime *runtime;
+
+       mutex_lock(&substream->pcm->open_mutex);
+       runtime = substream->runtime;
        if (!runtime) {
                snd_iprintf(buffer, "closed\n");
-               return;
+               goto unlock;
        }
        if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
                snd_iprintf(buffer, "no setup\n");
-               return;
+               goto unlock;
        }
        snd_iprintf(buffer, "tstamp_mode: %s\n", snd_pcm_tstamp_mode_name(runtime->tstamp_mode));
        snd_iprintf(buffer, "period_step: %u\n", runtime->period_step);
@@ -421,24 +429,29 @@ static void snd_pcm_substream_proc_sw_params_read(struct snd_info_entry *entry,
        snd_iprintf(buffer, "silence_threshold: %lu\n", runtime->silence_threshold);
        snd_iprintf(buffer, "silence_size: %lu\n", runtime->silence_size);
        snd_iprintf(buffer, "boundary: %lu\n", runtime->boundary);
+ unlock:
+       mutex_unlock(&substream->pcm->open_mutex);
 }
 
 static void snd_pcm_substream_proc_status_read(struct snd_info_entry *entry,
                                               struct snd_info_buffer *buffer)
 {
        struct snd_pcm_substream *substream = entry->private_data;
-       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct snd_pcm_runtime *runtime;
        struct snd_pcm_status status;
        int err;
+
+       mutex_lock(&substream->pcm->open_mutex);
+       runtime = substream->runtime;
        if (!runtime) {
                snd_iprintf(buffer, "closed\n");
-               return;
+               goto unlock;
        }
        memset(&status, 0, sizeof(status));
        err = snd_pcm_status(substream, &status);
        if (err < 0) {
                snd_iprintf(buffer, "error %d\n", err);
-               return;
+               goto unlock;
        }
        snd_iprintf(buffer, "state: %s\n", snd_pcm_state_name(status.state));
        snd_iprintf(buffer, "owner_pid   : %d\n", pid_vnr(substream->pid));
@@ -452,6 +465,8 @@ static void snd_pcm_substream_proc_status_read(struct snd_info_entry *entry,
        snd_iprintf(buffer, "-----\n");
        snd_iprintf(buffer, "hw_ptr      : %ld\n", runtime->status->hw_ptr);
        snd_iprintf(buffer, "appl_ptr    : %ld\n", runtime->control->appl_ptr);
+ unlock:
+       mutex_unlock(&substream->pcm->open_mutex);
 }
 
 #ifdef CONFIG_SND_PCM_XRUN_DEBUG
index 134fc6c2e08dc01eeda84a730545b0532f0588fe..d4eb2ef8078416cc8d06e5d80f3ec3bb467a4f1b 100644 (file)
@@ -1992,6 +1992,8 @@ void snd_pcm_release_substream(struct snd_pcm_substream *substream)
                substream->ops->close(substream);
                substream->hw_opened = 0;
        }
+       if (pm_qos_request_active(&substream->latency_pm_qos_req))
+               pm_qos_remove_request(&substream->latency_pm_qos_req);
        if (substream->pcm_release) {
                substream->pcm_release(substream);
                substream->pcm_release = NULL;
index eb68326c37d47626b53d7970fe6e88027edc0e13..cbbed0db9e560315ae0559c7e5e97786387a5371 100644 (file)
@@ -535,13 +535,15 @@ static int snd_rawmidi_release(struct inode *inode, struct file *file)
 {
        struct snd_rawmidi_file *rfile;
        struct snd_rawmidi *rmidi;
+       struct module *module;
 
        rfile = file->private_data;
        rmidi = rfile->rmidi;
        rawmidi_release_priv(rfile);
        kfree(rfile);
+       module = rmidi->card->module;
        snd_card_file_remove(rmidi->card, file);
-       module_put(rmidi->card->module);
+       module_put(module);
        return 0;
 }
 
@@ -829,6 +831,8 @@ static int snd_rawmidi_control_ioctl(struct snd_card *card,
                
                if (get_user(device, (int __user *)argp))
                        return -EFAULT;
+               if (device >= SNDRV_RAWMIDI_DEVICES) /* next device is -1 */
+                       device = SNDRV_RAWMIDI_DEVICES - 1;
                mutex_lock(&register_mutex);
                device = device < 0 ? 0 : device + 1;
                while (device < SNDRV_RAWMIDI_DEVICES) {
index 685712276ac95ab0d57985489b86f4f840bcf9b1..69cd7b3c362d19f4b0ac989b8bc107e49e59ff1b 100644 (file)
@@ -281,13 +281,10 @@ snd_seq_oss_open(struct file *file, int level)
        return 0;
 
  _error:
-       snd_seq_oss_writeq_delete(dp->writeq);
-       snd_seq_oss_readq_delete(dp->readq);
        snd_seq_oss_synth_cleanup(dp);
        snd_seq_oss_midi_cleanup(dp);
-       delete_port(dp);
        delete_seq_queue(dp->queue);
-       kfree(dp);
+       delete_port(dp);
 
        return rc;
 }
@@ -350,8 +347,10 @@ create_port(struct seq_oss_devinfo *dp)
 static int
 delete_port(struct seq_oss_devinfo *dp)
 {
-       if (dp->port < 0)
+       if (dp->port < 0) {
+               kfree(dp);
                return 0;
+       }
 
        debug_printk(("delete_port %i\n", dp->port));
        return snd_seq_event_port_detach(dp->cseq, dp->port);
index 1adb8a3c2b62db229f9ba0b580d930d288e74c14..42d7844ecd0bfa66ddf3db96527ccc18c79f7485 100644 (file)
@@ -900,7 +900,7 @@ static int proc_init(struct snd_akm4xxx *ak)
        return 0;
 }
 #else /* !CONFIG_PROC_FS */
-static int proc_init(struct snd_akm4xxx *ak) {}
+static int proc_init(struct snd_akm4xxx *ak) { return 0; }
 #endif
 
 int snd_akm4xxx_build_controls(struct snd_akm4xxx *ak)
index 5f3e68401f905dbf2b8a03bc12fd657450b739d4..91d6023a63e57c6b14227e158c171b8edf4b8087 100644 (file)
@@ -764,9 +764,9 @@ static long io[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
 static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ;
 static long mem[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
 
+#ifndef MSND_CLASSIC
 static long cfg[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
 
-#ifndef MSND_CLASSIC
 /* Extra Peripheral Configuration (Default: Disable) */
 static long ide_io0[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
 static long ide_io1[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
@@ -894,7 +894,11 @@ static int __devinit snd_msnd_isa_probe(struct device *pdev, unsigned int idx)
        struct snd_card *card;
        struct snd_msnd *chip;
 
-       if (has_isapnp(idx) || cfg[idx] == SNDRV_AUTO_PORT) {
+       if (has_isapnp(idx)
+#ifndef MSND_CLASSIC
+           || cfg[idx] == SNDRV_AUTO_PORT
+#endif
+           ) {
                printk(KERN_INFO LOGNAME ": Assuming PnP mode\n");
                return -ENODEV;
        }
index 92aa762ffb7e97c998db3ca1da3a5cddc2968139..07f803e6d203a41615db0923cf073f7eadc283b6 100644 (file)
@@ -391,11 +391,11 @@ static long sound_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        case SND_DEV_DSP:
        case SND_DEV_DSP16:
        case SND_DEV_AUDIO:
-               return audio_ioctl(dev, file, cmd, p);
+               ret = audio_ioctl(dev, file, cmd, p);
                break;
 
        case SND_DEV_MIDIN:
-               return MIDIbuf_ioctl(dev, file, cmd, p);
+               ret = MIDIbuf_ioctl(dev, file, cmd, p);
                break;
 
        }
index 3827092cc1d2802e0902a2c809032cac6f6ef9da..14829210ef0bf4b1bac018d4f5e31a1d1c4728b9 100644 (file)
@@ -4536,7 +4536,7 @@ int snd_hda_parse_pin_def_config(struct hda_codec *codec,
                        cfg->hp_outs--;
                        memmove(cfg->hp_pins + i, cfg->hp_pins + i + 1,
                                sizeof(cfg->hp_pins[0]) * (cfg->hp_outs - i));
-                       memmove(sequences_hp + i - 1, sequences_hp + i,
+                       memmove(sequences_hp + i, sequences_hp + i + 1,
                                sizeof(sequences_hp[0]) * (cfg->hp_outs - i));
                }
        }
index 1053fff4bd0a7bc5b1dbe07de0c1a7a3d0df68db..34940a07905192590a056efe734e155d66f575f7 100644 (file)
@@ -126,6 +126,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"
                         "{Intel, ICH10},"
                         "{Intel, PCH},"
                         "{Intel, CPT},"
+                        "{Intel, PBG},"
                         "{Intel, SCH},"
                         "{ATI, SB450},"
                         "{ATI, SB600},"
@@ -2749,6 +2750,8 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
        { PCI_DEVICE(0x8086, 0x3b57), .driver_data = AZX_DRIVER_ICH },
        /* CPT */
        { PCI_DEVICE(0x8086, 0x1c20), .driver_data = AZX_DRIVER_PCH },
+       /* PBG */
+       { PCI_DEVICE(0x8086, 0x1d20), .driver_data = AZX_DRIVER_PCH },
        /* SCH */
        { PCI_DEVICE(0x8086, 0x811b), .driver_data = AZX_DRIVER_SCH },
        /* ATI SB 450/600 */
index b697fd2a6f8b8cd19a84840fd8e05369226981db..10bbbaf6ebc3d2b0531ba2fb1ab7e3f2be1f8994 100644 (file)
@@ -3641,6 +3641,7 @@ static struct snd_pci_quirk ad1984_cfg_tbl[] = {
        /* Lenovo Thinkpad T61/X61 */
        SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1984_THINKPAD),
        SND_PCI_QUIRK(0x1028, 0x0214, "Dell T3400", AD1984_DELL_DESKTOP),
+       SND_PCI_QUIRK(0x1028, 0x0233, "Dell Latitude E6400", AD1984_DELL_DESKTOP),
        {}
 };
 
index 4ef5efaaaef1a81d29290d5d9618097634957c10..488fd9ade1ba2bf7b306bf6e48ae106ef823294b 100644 (file)
@@ -972,6 +972,53 @@ static struct hda_verb cs_coef_init_verbs[] = {
        {} /* terminator */
 };
 
+/* Errata: CS4207 rev C0/C1/C2 Silicon
+ *
+ * http://www.cirrus.com/en/pubs/errata/ER880C3.pdf
+ *
+ * 6. At high temperature (TA > +85°C), the digital supply current (IVD)
+ * may be excessive (up to an additional 200 Î¼A), which is most easily
+ * observed while the part is being held in reset (RESET# active low).
+ *
+ * Root Cause: At initial powerup of the device, the logic that drives
+ * the clock and write enable to the S/PDIF SRC RAMs is not properly
+ * initialized.
+ * Certain random patterns will cause a steady leakage current in those
+ * RAM cells. The issue will resolve once the SRCs are used (turned on).
+ *
+ * Workaround: The following verb sequence briefly turns on the S/PDIF SRC
+ * blocks, which will alleviate the issue.
+ */
+
+static struct hda_verb cs_errata_init_verbs[] = {
+       {0x01, AC_VERB_SET_POWER_STATE, 0x00}, /* AFG: D0 */
+       {0x11, AC_VERB_SET_PROC_STATE, 0x01},  /* VPW: processing on */
+
+       {0x11, AC_VERB_SET_COEF_INDEX, 0x0008},
+       {0x11, AC_VERB_SET_PROC_COEF, 0x9999},
+       {0x11, AC_VERB_SET_COEF_INDEX, 0x0017},
+       {0x11, AC_VERB_SET_PROC_COEF, 0xa412},
+       {0x11, AC_VERB_SET_COEF_INDEX, 0x0001},
+       {0x11, AC_VERB_SET_PROC_COEF, 0x0009},
+
+       {0x07, AC_VERB_SET_POWER_STATE, 0x00}, /* S/PDIF Rx: D0 */
+       {0x08, AC_VERB_SET_POWER_STATE, 0x00}, /* S/PDIF Tx: D0 */
+
+       {0x11, AC_VERB_SET_COEF_INDEX, 0x0017},
+       {0x11, AC_VERB_SET_PROC_COEF, 0x2412},
+       {0x11, AC_VERB_SET_COEF_INDEX, 0x0008},
+       {0x11, AC_VERB_SET_PROC_COEF, 0x0000},
+       {0x11, AC_VERB_SET_COEF_INDEX, 0x0001},
+       {0x11, AC_VERB_SET_PROC_COEF, 0x0008},
+       {0x11, AC_VERB_SET_PROC_STATE, 0x00},
+
+       {0x07, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Rx: D3 */
+       {0x08, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Tx: D3 */
+       /*{0x01, AC_VERB_SET_POWER_STATE, 0x03},*/ /* AFG: D3 This is already handled */
+
+       {} /* terminator */
+};
+
 /* SPDIF setup */
 static void init_digital(struct hda_codec *codec)
 {
@@ -991,6 +1038,9 @@ static int cs_init(struct hda_codec *codec)
 {
        struct cs_spec *spec = codec->spec;
 
+       /* init_verb sequence for C0/C1/C2 errata*/
+       snd_hda_sequence_write(codec, cs_errata_init_verbs);
+
        snd_hda_sequence_write(codec, cs_coef_init_verbs);
 
        if (spec->gpio_mask) {
index 5cdb80edbd7f06d0cc00db6dd43cc325a1ef7779..972e7c453b3d6c8320d58b6aebe6ef41fc259920 100644 (file)
@@ -116,6 +116,7 @@ struct conexant_spec {
        unsigned int dell_vostro:1;
        unsigned int ideapad:1;
        unsigned int thinkpad:1;
+       unsigned int hp_laptop:1;
 
        unsigned int ext_mic_present;
        unsigned int recording;
@@ -2299,6 +2300,18 @@ static void cxt5066_ideapad_automic(struct hda_codec *codec)
        }
 }
 
+/* toggle input of built-in digital mic and mic jack appropriately */
+static void cxt5066_hp_laptop_automic(struct hda_codec *codec)
+{
+       unsigned int present;
+
+       present = snd_hda_jack_detect(codec, 0x1b);
+       snd_printdd("CXT5066: external microphone present=%d\n", present);
+       snd_hda_codec_write(codec, 0x17, 0, AC_VERB_SET_CONNECT_SEL,
+                           present ? 1 : 3);
+}
+
+
 /* toggle input of built-in digital mic and mic jack appropriately
    order is: external mic -> dock mic -> interal mic */
 static void cxt5066_thinkpad_automic(struct hda_codec *codec)
@@ -2407,6 +2420,20 @@ static void cxt5066_ideapad_event(struct hda_codec *codec, unsigned int res)
        }
 }
 
+/* unsolicited event for jack sensing */
+static void cxt5066_hp_laptop_event(struct hda_codec *codec, unsigned int res)
+{
+       snd_printdd("CXT5066_hp_laptop: unsol event %x (%x)\n", res, res >> 26);
+       switch (res >> 26) {
+       case CONEXANT_HP_EVENT:
+               cxt5066_hp_automute(codec);
+               break;
+       case CONEXANT_MIC_EVENT:
+               cxt5066_hp_laptop_automic(codec);
+               break;
+       }
+}
+
 /* unsolicited event for jack sensing */
 static void cxt5066_thinkpad_event(struct hda_codec *codec, unsigned int res)
 {
@@ -2989,6 +3016,14 @@ static struct hda_verb cxt5066_init_verbs_portd_lo[] = {
        { } /* end */
 };
 
+
+static struct hda_verb cxt5066_init_verbs_hp_laptop[] = {
+       {0x14, AC_VERB_SET_CONNECT_SEL, 0x0},
+       {0x19, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_HP_EVENT},
+       {0x1b, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_MIC_EVENT},
+       { } /* end */
+};
+
 /* initialize jack-sensing, too */
 static int cxt5066_init(struct hda_codec *codec)
 {
@@ -3004,6 +3039,8 @@ static int cxt5066_init(struct hda_codec *codec)
                        cxt5066_ideapad_automic(codec);
                else if (spec->thinkpad)
                        cxt5066_thinkpad_automic(codec);
+               else if (spec->hp_laptop)
+                       cxt5066_hp_laptop_automic(codec);
        }
        cxt5066_set_mic_boost(codec);
        return 0;
@@ -3031,6 +3068,7 @@ enum {
        CXT5066_DELL_VOSTO,     /* Dell Vostro 1015i */
        CXT5066_IDEAPAD,        /* Lenovo IdeaPad U150 */
        CXT5066_THINKPAD,       /* Lenovo ThinkPad T410s, others? */
+       CXT5066_HP_LAPTOP,      /* HP Laptop */
        CXT5066_MODELS
 };
 
@@ -3041,6 +3079,7 @@ static const char *cxt5066_models[CXT5066_MODELS] = {
        [CXT5066_DELL_VOSTO]    = "dell-vostro",
        [CXT5066_IDEAPAD]       = "ideapad",
        [CXT5066_THINKPAD]      = "thinkpad",
+       [CXT5066_HP_LAPTOP]     = "hp-laptop",
 };
 
 static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
@@ -3052,8 +3091,11 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTO),
        SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO),
        SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
+       SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP),
+       SND_PCI_QUIRK(0x1179, 0xff1e, "Toshiba Satellite C650D", CXT5066_IDEAPAD),
        SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
        SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5),
+       SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD),
        SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD),
        SND_PCI_QUIRK(0x17aa, 0x21b3, "Thinkpad Edge 13 (197)", CXT5066_IDEAPAD),
        SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD),
@@ -3116,6 +3158,23 @@ static int patch_cxt5066(struct hda_codec *codec)
                spec->num_init_verbs++;
                spec->dell_automute = 1;
                break;
+       case CXT5066_HP_LAPTOP:
+               codec->patch_ops.init = cxt5066_init;
+               codec->patch_ops.unsol_event = cxt5066_hp_laptop_event;
+               spec->init_verbs[spec->num_init_verbs] =
+                       cxt5066_init_verbs_hp_laptop;
+               spec->num_init_verbs++;
+               spec->hp_laptop = 1;
+               spec->mixers[spec->num_mixers++] = cxt5066_mixer_master;
+               spec->mixers[spec->num_mixers++] = cxt5066_mixers;
+               /* no S/PDIF out */
+               spec->multiout.dig_out_nid = 0;
+               /* input source automatically selected */
+               spec->input_mux = NULL;
+               spec->port_d_mode = 0;
+               spec->mic_boost = 3; /* default 30dB gain */
+               break;
+
        case CXT5066_OLPC_XO_1_5:
                codec->patch_ops.init = cxt5066_olpc_init;
                codec->patch_ops.unsol_event = cxt5066_olpc_unsol_event;
index 69b950d527c31d966846a3c6e8f0ff30bdbe886c..baa108b9d6aacaf88be2999aa35515d2400bb9fd 100644 (file)
@@ -84,7 +84,7 @@ static struct hda_verb nvhdmi_basic_init_7x[] = {
 #else
 /* support all rates and formats */
 #define SUPPORTED_RATES \
-       (SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\
+       (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\
        SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |\
         SNDRV_PCM_RATE_192000)
 #define SUPPORTED_MAXBPS       24
index 627bf99633681483242a7559a9eaef878c3f7605..a432e6efd19bbe7bbce3ad9b6607c44661f912cf 100644 (file)
@@ -1594,12 +1594,22 @@ static void alc_auto_parse_digital(struct hda_codec *codec)
        }
 
        if (spec->autocfg.dig_in_pin) {
-               hda_nid_t dig_nid;
-               err = snd_hda_get_connections(codec,
-                                             spec->autocfg.dig_in_pin,
-                                             &dig_nid, 1);
-               if (err > 0)
-                       spec->dig_in_nid = dig_nid;
+               dig_nid = codec->start_nid;
+               for (i = 0; i < codec->num_nodes; i++, dig_nid++) {
+                       unsigned int wcaps = get_wcaps(codec, dig_nid);
+                       if (get_wcaps_type(wcaps) != AC_WID_AUD_IN)
+                               continue;
+                       if (!(wcaps & AC_WCAP_DIGITAL))
+                               continue;
+                       if (!(wcaps & AC_WCAP_CONN_LIST))
+                               continue;
+                       err = get_connection_index(codec, dig_nid,
+                                                  spec->autocfg.dig_in_pin);
+                       if (err >= 0) {
+                               spec->dig_in_nid = dig_nid;
+                               break;
+                       }
+               }
        }
 }
 
@@ -5334,6 +5344,7 @@ static void fillup_priv_adc_nids(struct hda_codec *codec, hda_nid_t *nids,
 
 static struct snd_pci_quirk beep_white_list[] = {
        SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1),
+       SND_PCI_QUIRK(0x1043, 0x83ce, "EeePC", 1),
        SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1),
        {}
 };
@@ -14452,6 +14463,7 @@ static void alc269_auto_init(struct hda_codec *codec)
 
 enum {
        ALC269_FIXUP_SONY_VAIO,
+       ALC269_FIXUP_DELL_M101Z,
 };
 
 static const struct hda_verb alc269_sony_vaio_fixup_verbs[] = {
@@ -14463,11 +14475,20 @@ static const struct alc_fixup alc269_fixups[] = {
        [ALC269_FIXUP_SONY_VAIO] = {
                .verbs = alc269_sony_vaio_fixup_verbs
        },
+       [ALC269_FIXUP_DELL_M101Z] = {
+               .verbs = (const struct hda_verb[]) {
+                       /* Enables internal speaker */
+                       {0x20, AC_VERB_SET_COEF_INDEX, 13},
+                       {0x20, AC_VERB_SET_PROC_COEF, 0x4040},
+                       {}
+               }
+       },
 };
 
 static struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x104d, 0x9071, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
        SND_PCI_QUIRK(0x104d, 0x9077, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
+       SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
        {}
 };
 
index 95148e58026cfb045793d3ba26d570f26108c284..c16c5ba0fda0fe61387d6924a6b84307b0dedcbb 100644 (file)
@@ -1747,6 +1747,8 @@ static struct snd_pci_quirk stac92hd71bxx_cfg_tbl[] = {
                      "HP dv6", STAC_HP_DV5),
        SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3061,
                      "HP dv6", STAC_HP_DV5), /* HP dv6-1110ax */
+       SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x363e,
+                     "HP DV6", STAC_HP_DV5),
        SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x7010,
                      "HP", STAC_HP_DV5),
        SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0233,
index 289cb4dacfc79ec012b6efb174054efaa0afe855..6c0a11adb2a84511b83b23b69d5efce9fb2085c6 100644 (file)
@@ -543,6 +543,10 @@ static int __devinit get_oxygen_model(struct oxygen *chip,
                chip->model.suspend = claro_suspend;
                chip->model.resume = claro_resume;
                chip->model.set_adc_params = set_ak5385_params;
+               chip->model.device_config = PLAYBACK_0_TO_I2S |
+                                           PLAYBACK_1_TO_SPDIF |
+                                           CAPTURE_0_FROM_I2S_2 |
+                                           CAPTURE_1_FROM_SPDIF;
                break;
        }
        if (id->driver_data == MODEL_MERIDIAN ||
index 6147216af74412f5ff47af36b04ce9fd51013f6b..a3409edcfb5094791c33563f8ad3911826ce3e26 100644 (file)
@@ -155,6 +155,7 @@ void oxygen_pci_remove(struct pci_dev *pci);
 int oxygen_pci_suspend(struct pci_dev *pci, pm_message_t state);
 int oxygen_pci_resume(struct pci_dev *pci);
 #endif
+void oxygen_pci_shutdown(struct pci_dev *pci);
 
 /* oxygen_mixer.c */
 
index fad03d64e3ad0c936aca79a257f6c484b067f657..7e93cf884437d0b5844c9515250c49994390e54c 100644 (file)
@@ -519,16 +519,21 @@ static void oxygen_init(struct oxygen *chip)
        }
 }
 
-static void oxygen_card_free(struct snd_card *card)
+static void oxygen_shutdown(struct oxygen *chip)
 {
-       struct oxygen *chip = card->private_data;
-
        spin_lock_irq(&chip->reg_lock);
        chip->interrupt_mask = 0;
        chip->pcm_running = 0;
        oxygen_write16(chip, OXYGEN_DMA_STATUS, 0);
        oxygen_write16(chip, OXYGEN_INTERRUPT_MASK, 0);
        spin_unlock_irq(&chip->reg_lock);
+}
+
+static void oxygen_card_free(struct snd_card *card)
+{
+       struct oxygen *chip = card->private_data;
+
+       oxygen_shutdown(chip);
        if (chip->irq >= 0)
                free_irq(chip->irq, chip);
        flush_scheduled_work();
@@ -778,3 +783,13 @@ int oxygen_pci_resume(struct pci_dev *pci)
 }
 EXPORT_SYMBOL(oxygen_pci_resume);
 #endif /* CONFIG_PM */
+
+void oxygen_pci_shutdown(struct pci_dev *pci)
+{
+       struct snd_card *card = pci_get_drvdata(pci);
+       struct oxygen *chip = card->private_data;
+
+       oxygen_shutdown(chip);
+       chip->model.cleanup(chip);
+}
+EXPORT_SYMBOL(oxygen_pci_shutdown);
index f03a2f2cffee88911e9c11db08c5dd7dd8087b55..06c863e86e3d3d24a5dfd0f7b43f56a47b120719 100644 (file)
@@ -95,6 +95,7 @@ static struct pci_driver xonar_driver = {
        .suspend = oxygen_pci_suspend,
        .resume = oxygen_pci_resume,
 #endif
+       .shutdown = oxygen_pci_shutdown,
 };
 
 static int __init alsa_card_xonar_init(void)
index dbc4b89d74e43ffa3311af7c9386fb5aae38d9a9..b82c1cfa96f5334554435a6eeebc6e82fa63857d 100644 (file)
@@ -53,6 +53,8 @@ struct xonar_wm87x6 {
        struct xonar_generic generic;
        u16 wm8776_regs[0x17];
        u16 wm8766_regs[0x10];
+       struct snd_kcontrol *line_adcmux_control;
+       struct snd_kcontrol *mic_adcmux_control;
        struct snd_kcontrol *lc_controls[13];
 };
 
@@ -193,6 +195,7 @@ static void xonar_ds_init(struct oxygen *chip)
 static void xonar_ds_cleanup(struct oxygen *chip)
 {
        xonar_disable_output(chip);
+       wm8776_write(chip, WM8776_RESET, 0);
 }
 
 static void xonar_ds_suspend(struct oxygen *chip)
@@ -603,6 +606,7 @@ static int wm8776_input_mux_put(struct snd_kcontrol *ctl,
 {
        struct oxygen *chip = ctl->private_data;
        struct xonar_wm87x6 *data = chip->model_data;
+       struct snd_kcontrol *other_ctl;
        unsigned int mux_bit = ctl->private_value;
        u16 reg;
        int changed;
@@ -610,8 +614,18 @@ static int wm8776_input_mux_put(struct snd_kcontrol *ctl,
        mutex_lock(&chip->mutex);
        reg = data->wm8776_regs[WM8776_ADCMUX];
        if (value->value.integer.value[0]) {
-               reg &= ~0x003;
                reg |= mux_bit;
+               /* line-in and mic-in are exclusive */
+               mux_bit ^= 3;
+               if (reg & mux_bit) {
+                       reg &= ~mux_bit;
+                       if (mux_bit == 1)
+                               other_ctl = data->line_adcmux_control;
+                       else
+                               other_ctl = data->mic_adcmux_control;
+                       snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
+                                      &other_ctl->id);
+               }
        } else
                reg &= ~mux_bit;
        changed = reg != data->wm8776_regs[WM8776_ADCMUX];
@@ -963,7 +977,13 @@ static int xonar_ds_mixer_init(struct oxygen *chip)
                err = snd_ctl_add(chip->card, ctl);
                if (err < 0)
                        return err;
+               if (!strcmp(ctl->id.name, "Line Capture Switch"))
+                       data->line_adcmux_control = ctl;
+               else if (!strcmp(ctl->id.name, "Mic Capture Switch"))
+                       data->mic_adcmux_control = ctl;
        }
+       if (!data->line_adcmux_control || !data->mic_adcmux_control)
+               return -ENXIO;
        BUILD_BUG_ON(ARRAY_SIZE(lc_controls) != ARRAY_SIZE(data->lc_controls));
        for (i = 0; i < ARRAY_SIZE(lc_controls); ++i) {
                ctl = snd_ctl_new1(&lc_controls[i], chip);
index b92adef8e81e7ae61eef5681925610b87f03a502..d6fa7bfd9aa123d7f8bb8142a8def39888129434 100644 (file)
@@ -4609,6 +4609,7 @@ static int snd_hdsp_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigne
                if (err < 0)
                        return err;
 
+               memset(&info, 0, sizeof(info));
                spin_lock_irqsave(&hdsp->lock, flags);
                info.pref_sync_ref = (unsigned char)hdsp_pref_sync_ref(hdsp);
                info.wordclock_sync_check = (unsigned char)hdsp_wc_sync_check(hdsp);
index 547b713d720449a7bdd746ca04fbf5a6cbe84931..0c98ef9156d8fd919f81711fe7b798be7c0ca7d8 100644 (file)
@@ -4127,6 +4127,7 @@ static int snd_hdspm_hwdep_ioctl(struct snd_hwdep * hw, struct file *file,
 
        case SNDRV_HDSPM_IOCTL_GET_CONFIG_INFO:
 
+               memset(&info, 0, sizeof(info));
                spin_lock_irq(&hdspm->lock);
                info.pref_sync_ref = hdspm_pref_sync_ref(hdspm);
                info.wordclock_sync_check = hdspm_wc_sync_check(hdspm);
index 2f12da4da561f6eeec98a028d7163c68e112b5dd..581a670e826192ee7267859144cc70e89f9a7a8e 100644 (file)
@@ -579,7 +579,7 @@ static int snd_ps3_delay_to_bytes(struct snd_pcm_substream *substream,
                                  rate * delay_ms / 1000)
                * substream->runtime->channels;
 
-       pr_debug(KERN_ERR "%s: time=%d rate=%d bytes=%ld, frames=%d, ret=%d\n",
+       pr_debug("%s: time=%d rate=%d bytes=%ld, frames=%d, ret=%d\n",
                 __func__,
                 delay_ms,
                 rate,
index 1b61c23ff300be0ef2c593cf5e813e20aeb2cb5d..f1b1bc4bacfb7134d588469cbcd0b5032747f0ae 100644 (file)
@@ -94,8 +94,7 @@ static void s3c_dma_enqueue(struct snd_pcm_substream *substream)
 
                if ((pos + len) > prtd->dma_end) {
                        len  = prtd->dma_end - pos;
-                       pr_debug(KERN_DEBUG "%s: corrected dma len %ld\n",
-                              __func__, len);
+                       pr_debug("%s: corrected dma len %ld\n", __func__, len);
                }
 
                ret = s3c2410_dma_enqueue(prtd->params->channel,
index b823a5c9b9bc81d81b8f64f4847483eae3f253bc..87e2b7fcbf176d9f429506b285dcc2f6ac05d7bc 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/firmware.h>
 #include <linux/module.h>
 
+#include <asm/clkdev.h>
 #include <asm/clock.h>
 
 #include <cpu/sh7722.h>
@@ -40,12 +41,12 @@ static struct clk_ops siumckb_clk_ops = {
 };
 
 static struct clk siumckb_clk = {
-       .name           = "siumckb_clk",
-       .id             = -1,
        .ops            = &siumckb_clk_ops,
        .rate           = 0, /* initialised at run-time */
 };
 
+static struct clk_lookup *siumckb_lookup;
+
 static int migor_hw_params(struct snd_pcm_substream *substream,
                           struct snd_pcm_hw_params *params)
 {
@@ -180,6 +181,13 @@ static int __init migor_init(void)
        if (ret < 0)
                return ret;
 
+       siumckb_lookup = clkdev_alloc(&siumckb_clk, "siumckb_clk", NULL);
+       if (!siumckb_lookup) {
+               ret = -ENOMEM;
+               goto eclkdevalloc;
+       }
+       clkdev_add(siumckb_lookup);
+
        /* Port number used on this machine: port B */
        migor_snd_device = platform_device_alloc("soc-audio", 1);
        if (!migor_snd_device) {
@@ -200,12 +208,15 @@ static int __init migor_init(void)
 epdevadd:
        platform_device_put(migor_snd_device);
 epdevalloc:
+       clkdev_drop(siumckb_lookup);
+eclkdevalloc:
        clk_unregister(&siumckb_clk);
        return ret;
 }
 
 static void __exit migor_exit(void)
 {
+       clkdev_drop(siumckb_lookup);
        clk_unregister(&siumckb_clk);
        platform_device_unregister(migor_snd_device);
 }
index adbc68ce90508221cc919121fc19e062fd309f44..f6b0d2829ea96d438d1e84550272c6a7bda80b4c 100644 (file)
@@ -203,8 +203,9 @@ static int snd_soc_8_16_write(struct snd_soc_codec *codec, unsigned int reg,
        data[1] = (value >> 8) & 0xff;
        data[2] = value & 0xff;
 
-       if (!snd_soc_codec_volatile_register(codec, reg))
-               reg_cache[reg] = value;
+       if (!snd_soc_codec_volatile_register(codec, reg)
+               && reg < codec->reg_cache_size)
+                       reg_cache[reg] = value;
 
        if (codec->cache_only) {
                codec->cache_sync = 1;
index 9feb00c831a02b791227b75294e6dc8e15975f1f..4eabafa5b037db66b250db64cc0ce05aa783f595 100644 (file)
@@ -126,7 +126,7 @@ static void snd_usb_stream_disconnect(struct list_head *head)
        for (idx = 0; idx < 2; idx++) {
                subs = &as->substream[idx];
                if (!subs->num_formats)
-                       return;
+                       continue;
                snd_usb_release_substream_urbs(subs, 1);
                subs->interface = -1;
        }
@@ -216,6 +216,11 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
        }
 
        switch (protocol) {
+       default:
+               snd_printdd(KERN_WARNING "unknown interface protocol %#02x, assuming v1\n",
+                           protocol);
+               /* fall through */
+
        case UAC_VERSION_1: {
                struct uac1_ac_header_descriptor *h1 = control_header;
 
@@ -253,10 +258,6 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
 
                break;
        }
-
-       default:
-               snd_printk(KERN_ERR "unknown protocol version 0x%02x\n", protocol);
-               return -EINVAL;
        }
 
        return 0;
@@ -465,7 +466,13 @@ static void *snd_usb_audio_probe(struct usb_device *dev,
                        goto __error;
        }
 
-       chip->ctrl_intf = alts;
+       /*
+        * For devices with more than one control interface, we assume the
+        * first contains the audio controls. We might need a more specific
+        * check here in the future.
+        */
+       if (!chip->ctrl_intf)
+               chip->ctrl_intf = alts;
 
        if (err > 0) {
                /* create normal USB audio interfaces */
index b853f8df794f6a35ceb8e39576abf3d1cb91e36c..7754a10345451109a9ff9bd03d271148160861b9 100644 (file)
@@ -295,12 +295,11 @@ int snd_usb_init_sample_rate(struct snd_usb_audio *chip, int iface,
 
        switch (altsd->bInterfaceProtocol) {
        case UAC_VERSION_1:
+       default:
                return set_sample_rate_v1(chip, iface, alts, fmt, rate);
 
        case UAC_VERSION_2:
                return set_sample_rate_v2(chip, iface, alts, fmt, rate);
        }
-
-       return -EINVAL;
 }
 
index 1a701f1e8f501b358029f84bedcbe1268e9f543f..ef0a07e34844ae4d53ffd12e54b16a203da02a19 100644 (file)
@@ -275,6 +275,12 @@ int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no)
 
                /* get audio formats */
                switch (protocol) {
+               default:
+                       snd_printdd(KERN_WARNING "%d:%u:%d: unknown interface protocol %#02x, assuming v1\n",
+                                   dev->devnum, iface_no, altno, protocol);
+                       protocol = UAC_VERSION_1;
+                       /* fall through */
+
                case UAC_VERSION_1: {
                        struct uac1_as_header_descriptor *as =
                                snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_AS_GENERAL);
@@ -336,11 +342,6 @@ int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no)
                                   dev->devnum, iface_no, altno, as->bTerminalLink);
                        continue;
                }
-
-               default:
-                       snd_printk(KERN_ERR "%d:%u:%d : unknown interface protocol %04x\n",
-                                  dev->devnum, iface_no, altno, protocol);
-                       continue;
                }
 
                /* get format type */
index 3a1375459c06a49d0d48a309460af30a0fcfd8c0..69148212aa70e66f9e1193040bfb41aad5548b6b 100644 (file)
@@ -49,7 +49,8 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
        u64 pcm_formats;
 
        switch (protocol) {
-       case UAC_VERSION_1: {
+       case UAC_VERSION_1:
+       default: {
                struct uac_format_type_i_discrete_descriptor *fmt = _fmt;
                sample_width = fmt->bBitResolution;
                sample_bytes = fmt->bSubframeSize;
@@ -64,9 +65,6 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
                format <<= 1;
                break;
        }
-
-       default:
-               return -EINVAL;
        }
 
        pcm_formats = 0;
@@ -384,6 +382,10 @@ static int parse_audio_format_i(struct snd_usb_audio *chip,
         * audio class v2 uses class specific EP0 range requests for that.
         */
        switch (protocol) {
+       default:
+               snd_printdd(KERN_WARNING "%d:%u:%d : invalid protocol version %d, assuming v1\n",
+                          chip->dev->devnum, fp->iface, fp->altsetting, protocol);
+               /* fall through */
        case UAC_VERSION_1:
                fp->channels = fmt->bNrChannels;
                ret = parse_audio_format_rates_v1(chip, fp, (unsigned char *) fmt, 7);
@@ -392,10 +394,6 @@ static int parse_audio_format_i(struct snd_usb_audio *chip,
                /* fp->channels is already set in this case */
                ret = parse_audio_format_rates_v2(chip, fp);
                break;
-       default:
-               snd_printk(KERN_ERR "%d:%u:%d : invalid protocol version %d\n",
-                          chip->dev->devnum, fp->iface, fp->altsetting, protocol);
-               return -EINVAL;
        }
 
        if (fp->channels < 1) {
@@ -438,6 +436,10 @@ static int parse_audio_format_ii(struct snd_usb_audio *chip,
        fp->channels = 1;
 
        switch (protocol) {
+       default:
+               snd_printdd(KERN_WARNING "%d:%u:%d : invalid protocol version %d, assuming v1\n",
+                          chip->dev->devnum, fp->iface, fp->altsetting, protocol);
+               /* fall through */
        case UAC_VERSION_1: {
                struct uac_format_type_ii_discrete_descriptor *fmt = _fmt;
                brate = le16_to_cpu(fmt->wMaxBitRate);
@@ -456,10 +458,6 @@ static int parse_audio_format_ii(struct snd_usb_audio *chip,
                ret = parse_audio_format_rates_v2(chip, fp);
                break;
        }
-       default:
-               snd_printk(KERN_ERR "%d:%u:%d : invalid protocol version %d\n",
-                          chip->dev->devnum, fp->iface, fp->altsetting, protocol);
-               return -EINVAL;
        }
 
        return ret;
index c166db0057d3e0e613954b21f8ca5274f0242741..3ed3901369ce1a9ca58473ecef450623cf95a7b2 100644 (file)
@@ -2175,7 +2175,15 @@ int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif,
        }
 
        host_iface = &usb_ifnum_to_if(chip->dev, ctrlif)->altsetting[0];
-       mixer->protocol = get_iface_desc(host_iface)->bInterfaceProtocol;
+       switch (get_iface_desc(host_iface)->bInterfaceProtocol) {
+       case UAC_VERSION_1:
+       default:
+               mixer->protocol = UAC_VERSION_1;
+               break;
+       case UAC_VERSION_2:
+               mixer->protocol = UAC_VERSION_2;
+               break;
+       }
 
        if ((err = snd_usb_mixer_controls(mixer)) < 0 ||
            (err = snd_usb_mixer_status_create(mixer)) < 0)
index 3634cedf93061629619125690fb19dc7d0beae64..3b5135c930628fc092cad54e67ce2a7645c59c02 100644 (file)
@@ -173,13 +173,12 @@ int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface,
 
        switch (altsd->bInterfaceProtocol) {
        case UAC_VERSION_1:
+       default:
                return init_pitch_v1(chip, iface, alts, fmt);
 
        case UAC_VERSION_2:
                return init_pitch_v2(chip, iface, alts, fmt);
        }
-
-       return -EINVAL;
 }
 
 /*
index 5164a655c39f60b578c8642f2caabe8af66fa016..b2c63309a65165b471822e99268c828bbdb07777 100644 (file)
@@ -8,7 +8,7 @@ perf-annotate - Read perf.data (created by perf record) and display annotated co
 SYNOPSIS
 --------
 [verse]
-'perf annotate' [-i <file> | --input=file] symbol_name
+'perf annotate' [-i <file> | --input=file] [symbol_name]
 
 DESCRIPTION
 -----------
@@ -24,6 +24,13 @@ OPTIONS
 --input=::
         Input file name. (default: perf.data)
 
+--stdio:: Use the stdio interface.
+
+--tui:: Use the TUI interface Use of --tui requires a tty, if one is not
+       present, as when piping to other commands, the stdio interface is
+       used. This interfaces starts by centering on the line with more
+       samples, TAB/UNTAB cycles thru the lines with more samples.
+
 SEE ALSO
 --------
-linkperf:perf-record[1]
+linkperf:perf-record[1], linkperf:perf-report[1]
index abfabe9147a4f2a48b6fd47bfdb758de2a3f3eea..12052c9ed0babfc3a1c93cc01758ec3b7747ee10 100644 (file)
@@ -65,6 +65,13 @@ OPTIONS
                 the tree is considered as a new profiled object. +
        Default: fractal,0.5.
 
+--stdio:: Use the stdio interface.
+
+--tui:: Use the TUI interface, that is integrated with annotate and allows
+        zooming into DSOs or threads, among other features. Use of --tui
+       requires a tty, if one is not present, as when piping to other
+       commands, the stdio interface is used.
+
 SEE ALSO
 --------
 linkperf:perf-stat[1]
index 4f1fa77c1feb0b7a854ab8a85bd21682cbc66377..d1db0f676a4bf14850fa0264e78fe3d482d376dc 100644 (file)
@@ -313,6 +313,9 @@ TEST_PROGRAMS =
 
 SCRIPT_SH += perf-archive.sh
 
+grep-libs = $(filter -l%,$(1))
+strip-libs = $(filter-out -l%,$(1))
+
 #
 # No Perl scripts right now:
 #
@@ -588,14 +591,17 @@ endif
 ifdef NO_LIBPERL
        BASIC_CFLAGS += -DNO_LIBPERL
 else
-       PERL_EMBED_LDOPTS = `perl -MExtUtils::Embed -e ldopts 2>/dev/null`
+       PERL_EMBED_LDOPTS = $(shell perl -MExtUtils::Embed -e ldopts 2>/dev/null)
+       PERL_EMBED_LDFLAGS = $(call strip-libs,$(PERL_EMBED_LDOPTS))
+       PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS))
        PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null`
        FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)
 
        ifneq ($(call try-cc,$(SOURCE_PERL_EMBED),$(FLAGS_PERL_EMBED)),y)
                BASIC_CFLAGS += -DNO_LIBPERL
        else
-               ALL_LDFLAGS += $(PERL_EMBED_LDOPTS)
+               ALL_LDFLAGS += $(PERL_EMBED_LDFLAGS)
+               EXTLIBS += $(PERL_EMBED_LIBADD)
                LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-perl.o
                LIB_OBJS += $(OUTPUT)scripts/perl/Perf-Trace-Util/Context.o
        endif
@@ -604,13 +610,16 @@ endif
 ifdef NO_LIBPYTHON
        BASIC_CFLAGS += -DNO_LIBPYTHON
 else
-       PYTHON_EMBED_LDOPTS = `python-config --ldflags 2>/dev/null`
+       PYTHON_EMBED_LDOPTS = $(shell python-config --ldflags 2>/dev/null)
+       PYTHON_EMBED_LDFLAGS = $(call strip-libs,$(PYTHON_EMBED_LDOPTS))
+       PYTHON_EMBED_LIBADD = $(call grep-libs,$(PYTHON_EMBED_LDOPTS))
        PYTHON_EMBED_CCOPTS = `python-config --cflags 2>/dev/null`
        FLAGS_PYTHON_EMBED=$(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
        ifneq ($(call try-cc,$(SOURCE_PYTHON_EMBED),$(FLAGS_PYTHON_EMBED)),y)
                BASIC_CFLAGS += -DNO_LIBPYTHON
        else
-               ALL_LDFLAGS += $(PYTHON_EMBED_LDOPTS)
+               ALL_LDFLAGS += $(PYTHON_EMBED_LDFLAGS)
+               EXTLIBS += $(PYTHON_EMBED_LIBADD)
                LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-python.o
                LIB_OBJS += $(OUTPUT)scripts/python/Perf-Trace-Util/Context.o
        endif
@@ -653,6 +662,15 @@ else
        endif
 endif
 
+
+ifdef NO_STRLCPY
+       BASIC_CFLAGS += -DNO_STRLCPY
+else
+       ifneq ($(call try-cc,$(SOURCE_STRLCPY),),y)
+               BASIC_CFLAGS += -DNO_STRLCPY
+       endif
+endif
+
 ifndef CC_LD_DYNPATH
        ifdef NO_R_TO_GCC_LINKER
                # Some gcc does not accept and pass -R to the linker to specify
@@ -910,8 +928,8 @@ $(OUTPUT)perf.o: perf.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
                $(ALL_CFLAGS) -c $(filter %.c,$^) -o $@
 
 $(OUTPUT)perf$X: $(OUTPUT)perf.o $(BUILTIN_OBJS) $(PERFLIBS)
-       $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(OUTPUT)perf.o \
-               $(BUILTIN_OBJS) $(ALL_LDFLAGS) $(LIBS)
+       $(QUIET_LINK)$(CC) $(ALL_CFLAGS) $(ALL_LDFLAGS) $(OUTPUT)perf.o \
+               $(BUILTIN_OBJS) $(LIBS) -o $@
 
 $(OUTPUT)builtin-help.o: builtin-help.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
        $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \
@@ -1017,7 +1035,7 @@ builtin-revert.o wt-status.o: wt-status.h
 # we compile into subdirectories. if the target directory is not the source directory, they might not exists. So
 # we depend the various files onto their directories.
 DIRECTORY_DEPS = $(LIB_OBJS) $(BUILTIN_OBJS) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h
-$(DIRECTORY_DEPS): $(sort $(dir $(DIRECTORY_DEPS)))
+$(DIRECTORY_DEPS): $(sort $(dir $(DIRECTORY_DEPS)))
 # In the second step, we make a rule to actually create these directories
 $(sort $(dir $(DIRECTORY_DEPS))):
        $(QUIET_MKDIR)$(MKDIR) -p $@ 2>/dev/null
index 1478dc64bf157fc0f226cba51eef80ec8646c15f..6d5604d8df9599acb55d87017f5d58e19d906395 100644 (file)
@@ -28,7 +28,7 @@
 
 static char            const *input_name = "perf.data";
 
-static bool            force;
+static bool            force, use_tui, use_stdio;
 
 static bool            full_paths;
 
@@ -321,7 +321,7 @@ static int hist_entry__tty_annotate(struct hist_entry *he)
 
 static void hists__find_annotations(struct hists *self)
 {
-       struct rb_node *first = rb_first(&self->entries), *nd = first;
+       struct rb_node *nd = rb_first(&self->entries), *next;
        int key = KEY_RIGHT;
 
        while (nd) {
@@ -343,20 +343,19 @@ find_next:
 
                if (use_browser > 0) {
                        key = hist_entry__tui_annotate(he);
-                       if (is_exit_key(key))
-                               break;
                        switch (key) {
                        case KEY_RIGHT:
-                       case '\t':
-                               nd = rb_next(nd);
+                               next = rb_next(nd);
                                break;
                        case KEY_LEFT:
-                               if (nd == first)
-                                       continue;
-                               nd = rb_prev(nd);
-                       default:
+                               next = rb_prev(nd);
                                break;
+                       default:
+                               return;
                        }
+
+                       if (next != NULL)
+                               nd = next;
                } else {
                        hist_entry__tty_annotate(he);
                        nd = rb_next(nd);
@@ -428,6 +427,8 @@ static const struct option options[] = {
                    "be more verbose (show symbol address, etc)"),
        OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
                    "dump raw trace in ASCII"),
+       OPT_BOOLEAN(0, "tui", &use_tui, "Use the TUI interface"),
+       OPT_BOOLEAN(0, "stdio", &use_stdio, "Use the stdio interface"),
        OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
                   "file", "vmlinux pathname"),
        OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
@@ -443,6 +444,11 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used)
 {
        argc = parse_options(argc, argv, options, annotate_usage, 0);
 
+       if (use_stdio)
+               use_browser = 0;
+       else if (use_tui)
+               use_browser = 1;
+
        setup_browser();
 
        symbol_conf.priv_size = sizeof(struct sym_priv);
index 55fc1f46892a6a920411db7dc91226bcbe6a7f82..5de405d452300318541338293563d8ebc41ccb87 100644 (file)
@@ -32,7 +32,7 @@
 
 static char            const *input_name = "perf.data";
 
-static bool            force;
+static bool            force, use_tui, use_stdio;
 static bool            hide_unresolved;
 static bool            dont_use_callchains;
 
@@ -107,7 +107,8 @@ static int perf_session__add_hist_entry(struct perf_session *self,
                goto out_free_syms;
        err = 0;
        if (symbol_conf.use_callchain) {
-               err = append_chain(he->callchain, data->callchain, syms, data->period);
+               err = callchain_append(he->callchain, data->callchain, syms,
+                                      data->period);
                if (err)
                        goto out_free_syms;
        }
@@ -450,6 +451,8 @@ static const struct option options[] = {
                    "Show per-thread event counters"),
        OPT_STRING(0, "pretty", &pretty_printing_style, "key",
                   "pretty printing style key: normal raw"),
+       OPT_BOOLEAN(0, "tui", &use_tui, "Use the TUI interface"),
+       OPT_BOOLEAN(0, "stdio", &use_stdio, "Use the stdio interface"),
        OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
                   "sort by key(s): pid, comm, dso, symbol, parent"),
        OPT_BOOLEAN(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
@@ -482,8 +485,15 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
 {
        argc = parse_options(argc, argv, options, report_usage, 0);
 
+       if (use_stdio)
+               use_browser = 0;
+       else if (use_tui)
+               use_browser = 1;
+
        if (strcmp(input_name, "-") != 0)
                setup_browser();
+       else
+               use_browser = 0;
        /*
         * Only in the newt browser we are doing integrated annotation,
         * so don't allocate extra space that won't be used in the stdio
index 7a7b6085905382c791834b0c0f35ed5f39658e26..b253db634f04b7e8ddfddd1cc33bb3ce8343a49a 100644 (file)
@@ -110,6 +110,17 @@ int main(void)
 }
 endef
 
+define SOURCE_STRLCPY
+#include <stdlib.h>
+extern size_t strlcpy(char *dest, const char *src, size_t size);
+
+int main(void)
+{
+       strlcpy(NULL, NULL, 0);
+       return 0;
+}
+endef
+
 # try-cc
 # Usage: option = $(call try-cc, source-to-build, cc-options)
 try-cc = $(shell sh -c                                           \
index ef7aa0a0c5265191e8120e76f9f134b5e241fa53..95aaf565c704fb6ea67cee78d49e177f0ba8f595 100644 (file)
@@ -73,6 +73,18 @@ void get_term_dimensions(struct winsize *ws);
 #define cpu_relax()    asm volatile("":::"memory")
 #endif
 
+#ifdef __mips__
+#include "../../arch/mips/include/asm/unistd.h"
+#define rmb()          asm volatile(                                   \
+                               ".set   mips2\n\t"                      \
+                               "sync\n\t"                              \
+                               ".set   mips0"                          \
+                               : /* no output */                       \
+                               : /* no input */                        \
+                               : "memory")
+#define cpu_relax()    asm volatile("" ::: "memory")
+#endif
+
 #include <time.h>
 #include <unistd.h>
 #include <sys/types.h>
diff --git a/tools/perf/scripts/python/bin/netdev-times-record b/tools/perf/scripts/python/bin/netdev-times-record
new file mode 100644 (file)
index 0000000..d931a82
--- /dev/null
@@ -0,0 +1,8 @@
+#!/bin/bash
+perf record -a -e net:net_dev_xmit -e net:net_dev_queue                \
+               -e net:netif_receive_skb -e net:netif_rx                \
+               -e skb:consume_skb -e skb:kfree_skb                     \
+               -e skb:skb_copy_datagram_iovec -e napi:napi_poll        \
+               -e irq:irq_handler_entry -e irq:irq_handler_exit        \
+               -e irq:softirq_entry -e irq:softirq_exit                \
+               -e irq:softirq_raise $@
diff --git a/tools/perf/scripts/python/bin/netdev-times-report b/tools/perf/scripts/python/bin/netdev-times-report
new file mode 100644 (file)
index 0000000..c3d0a63
--- /dev/null
@@ -0,0 +1,5 @@
+#!/bin/bash
+# description: display a process of packet and processing time
+# args: [tx] [rx] [dev=] [debug]
+
+perf trace -s ~/libexec/perf-core/scripts/python/netdev-times.py $@
diff --git a/tools/perf/scripts/python/netdev-times.py b/tools/perf/scripts/python/netdev-times.py
new file mode 100644 (file)
index 0000000..9aa0a32
--- /dev/null
@@ -0,0 +1,464 @@
+# Display a process of packets and processed time.
+# It helps us to investigate networking or network device.
+#
+# options
+# tx: show only tx chart
+# rx: show only rx chart
+# dev=: show only thing related to specified device
+# debug: work with debug mode. It shows buffer status.
+
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+       '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+from Util import *
+
+all_event_list = []; # insert all tracepoint event related with this script
+irq_dic = {}; # key is cpu and value is a list which stacks irqs
+              # which raise NET_RX softirq
+net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
+                # and a list which stacks receive
+receive_hunk_list = []; # a list which include a sequence of receive events
+rx_skb_list = []; # received packet list for matching
+                      # skb_copy_datagram_iovec
+
+buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
+                      # tx_xmit_list
+of_count_rx_skb_list = 0; # overflow count
+
+tx_queue_list = []; # list of packets which pass through dev_queue_xmit
+of_count_tx_queue_list = 0; # overflow count
+
+tx_xmit_list = [];  # list of packets which pass through dev_hard_start_xmit
+of_count_tx_xmit_list = 0; # overflow count
+
+tx_free_list = [];  # list of packets which is freed
+
+# options
+show_tx = 0;
+show_rx = 0;
+dev = 0; # store a name of device specified by option "dev="
+debug = 0;
+
+# indices of event_info tuple
+EINFO_IDX_NAME=   0
+EINFO_IDX_CONTEXT=1
+EINFO_IDX_CPU=    2
+EINFO_IDX_TIME=   3
+EINFO_IDX_PID=    4
+EINFO_IDX_COMM=   5
+
+# Calculate a time interval(msec) from src(nsec) to dst(nsec)
+def diff_msec(src, dst):
+       return (dst - src) / 1000000.0
+
+# Display a process of transmitting a packet
+def print_transmit(hunk):
+       if dev != 0 and hunk['dev'].find(dev) < 0:
+               return
+       print "%7s %5d %6d.%06dsec %12.3fmsec      %12.3fmsec" % \
+               (hunk['dev'], hunk['len'],
+               nsecs_secs(hunk['queue_t']),
+               nsecs_nsecs(hunk['queue_t'])/1000,
+               diff_msec(hunk['queue_t'], hunk['xmit_t']),
+               diff_msec(hunk['xmit_t'], hunk['free_t']))
+
+# Format for displaying rx packet processing
+PF_IRQ_ENTRY= "  irq_entry(+%.3fmsec irq=%d:%s)"
+PF_SOFT_ENTRY="  softirq_entry(+%.3fmsec)"
+PF_NAPI_POLL= "  napi_poll_exit(+%.3fmsec %s)"
+PF_JOINT=     "         |"
+PF_WJOINT=    "         |            |"
+PF_NET_RECV=  "         |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
+PF_NET_RX=    "         |---netif_rx(+%.3fmsec skb=%x)"
+PF_CPY_DGRAM= "         |      skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
+PF_KFREE_SKB= "         |      kfree_skb(+%.3fmsec location=%x)"
+PF_CONS_SKB=  "         |      consume_skb(+%.3fmsec)"
+
+# Display a process of received packets and interrputs associated with
+# a NET_RX softirq
+def print_receive(hunk):
+       show_hunk = 0
+       irq_list = hunk['irq_list']
+       cpu = irq_list[0]['cpu']
+       base_t = irq_list[0]['irq_ent_t']
+       # check if this hunk should be showed
+       if dev != 0:
+               for i in range(len(irq_list)):
+                       if irq_list[i]['name'].find(dev) >= 0:
+                               show_hunk = 1
+                               break
+       else:
+               show_hunk = 1
+       if show_hunk == 0:
+               return
+
+       print "%d.%06dsec cpu=%d" % \
+               (nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
+       for i in range(len(irq_list)):
+               print PF_IRQ_ENTRY % \
+                       (diff_msec(base_t, irq_list[i]['irq_ent_t']),
+                       irq_list[i]['irq'], irq_list[i]['name'])
+               print PF_JOINT
+               irq_event_list = irq_list[i]['event_list']
+               for j in range(len(irq_event_list)):
+                       irq_event = irq_event_list[j]
+                       if irq_event['event'] == 'netif_rx':
+                               print PF_NET_RX % \
+                                       (diff_msec(base_t, irq_event['time']),
+                                       irq_event['skbaddr'])
+                               print PF_JOINT
+       print PF_SOFT_ENTRY % \
+               diff_msec(base_t, hunk['sirq_ent_t'])
+       print PF_JOINT
+       event_list = hunk['event_list']
+       for i in range(len(event_list)):
+               event = event_list[i]
+               if event['event_name'] == 'napi_poll':
+                       print PF_NAPI_POLL % \
+                           (diff_msec(base_t, event['event_t']), event['dev'])
+                       if i == len(event_list) - 1:
+                               print ""
+                       else:
+                               print PF_JOINT
+               else:
+                       print PF_NET_RECV % \
+                           (diff_msec(base_t, event['event_t']), event['skbaddr'],
+                               event['len'])
+                       if 'comm' in event.keys():
+                               print PF_WJOINT
+                               print PF_CPY_DGRAM % \
+                                       (diff_msec(base_t, event['comm_t']),
+                                       event['pid'], event['comm'])
+                       elif 'handle' in event.keys():
+                               print PF_WJOINT
+                               if event['handle'] == "kfree_skb":
+                                       print PF_KFREE_SKB % \
+                                               (diff_msec(base_t,
+                                               event['comm_t']),
+                                               event['location'])
+                               elif event['handle'] == "consume_skb":
+                                       print PF_CONS_SKB % \
+                                               diff_msec(base_t,
+                                                       event['comm_t'])
+                       print PF_JOINT
+
+def trace_begin():
+       global show_tx
+       global show_rx
+       global dev
+       global debug
+
+       for i in range(len(sys.argv)):
+               if i == 0:
+                       continue
+               arg = sys.argv[i]
+               if arg == 'tx':
+                       show_tx = 1
+               elif arg =='rx':
+                       show_rx = 1
+               elif arg.find('dev=',0, 4) >= 0:
+                       dev = arg[4:]
+               elif arg == 'debug':
+                       debug = 1
+       if show_tx == 0  and show_rx == 0:
+               show_tx = 1
+               show_rx = 1
+
+def trace_end():
+       # order all events in time
+       all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
+                                           b[EINFO_IDX_TIME]))
+       # process all events
+       for i in range(len(all_event_list)):
+               event_info = all_event_list[i]
+               name = event_info[EINFO_IDX_NAME]
+               if name == 'irq__softirq_exit':
+                       handle_irq_softirq_exit(event_info)
+               elif name == 'irq__softirq_entry':
+                       handle_irq_softirq_entry(event_info)
+               elif name == 'irq__softirq_raise':
+                       handle_irq_softirq_raise(event_info)
+               elif name == 'irq__irq_handler_entry':
+                       handle_irq_handler_entry(event_info)
+               elif name == 'irq__irq_handler_exit':
+                       handle_irq_handler_exit(event_info)
+               elif name == 'napi__napi_poll':
+                       handle_napi_poll(event_info)
+               elif name == 'net__netif_receive_skb':
+                       handle_netif_receive_skb(event_info)
+               elif name == 'net__netif_rx':
+                       handle_netif_rx(event_info)
+               elif name == 'skb__skb_copy_datagram_iovec':
+                       handle_skb_copy_datagram_iovec(event_info)
+               elif name == 'net__net_dev_queue':
+                       handle_net_dev_queue(event_info)
+               elif name == 'net__net_dev_xmit':
+                       handle_net_dev_xmit(event_info)
+               elif name == 'skb__kfree_skb':
+                       handle_kfree_skb(event_info)
+               elif name == 'skb__consume_skb':
+                       handle_consume_skb(event_info)
+       # display receive hunks
+       if show_rx:
+               for i in range(len(receive_hunk_list)):
+                       print_receive(receive_hunk_list[i])
+       # display transmit hunks
+       if show_tx:
+               print "   dev    len      Qdisc        " \
+                       "       netdevice             free"
+               for i in range(len(tx_free_list)):
+                       print_transmit(tx_free_list[i])
+       if debug:
+               print "debug buffer status"
+               print "----------------------------"
+               print "xmit Qdisc:remain:%d overflow:%d" % \
+                       (len(tx_queue_list), of_count_tx_queue_list)
+               print "xmit netdevice:remain:%d overflow:%d" % \
+                       (len(tx_xmit_list), of_count_tx_xmit_list)
+               print "receive:remain:%d overflow:%d" % \
+                       (len(rx_skb_list), of_count_rx_skb_list)
+
+# called from perf, when it finds a correspoinding event
+def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
+       if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
+               return
+       event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
+       all_event_list.append(event_info)
+
+def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
+       if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
+               return
+       event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
+       all_event_list.append(event_info)
+
+def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
+       if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
+               return
+       event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
+       all_event_list.append(event_info)
+
+def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
+                       irq, irq_name):
+       event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+                       irq, irq_name)
+       all_event_list.append(event_info)
+
+def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
+       event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
+       all_event_list.append(event_info)
+
+def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
+       event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+                       napi, dev_name)
+       all_event_list.append(event_info)
+
+def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
+                       skblen, dev_name):
+       event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+                       skbaddr, skblen, dev_name)
+       all_event_list.append(event_info)
+
+def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
+                       skblen, dev_name):
+       event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+                       skbaddr, skblen, dev_name)
+       all_event_list.append(event_info)
+
+def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
+                       skbaddr, skblen, dev_name):
+       event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+                       skbaddr, skblen, dev_name)
+       all_event_list.append(event_info)
+
+def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
+                       skbaddr, skblen, rc, dev_name):
+       event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+                       skbaddr, skblen, rc ,dev_name)
+       all_event_list.append(event_info)
+
+def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
+                       skbaddr, protocol, location):
+       event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+                       skbaddr, protocol, location)
+       all_event_list.append(event_info)
+
+def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
+       event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+                       skbaddr)
+       all_event_list.append(event_info)
+
+def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
+       skbaddr, skblen):
+       event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+                       skbaddr, skblen)
+       all_event_list.append(event_info)
+
+def handle_irq_handler_entry(event_info):
+       (name, context, cpu, time, pid, comm, irq, irq_name) = event_info
+       if cpu not in irq_dic.keys():
+               irq_dic[cpu] = []
+       irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
+       irq_dic[cpu].append(irq_record)
+
+def handle_irq_handler_exit(event_info):
+       (name, context, cpu, time, pid, comm, irq, ret) = event_info
+       if cpu not in irq_dic.keys():
+               return
+       irq_record = irq_dic[cpu].pop()
+       if irq != irq_record['irq']:
+               return
+       irq_record.update({'irq_ext_t':time})
+       # if an irq doesn't include NET_RX softirq, drop.
+       if 'event_list' in irq_record.keys():
+               irq_dic[cpu].append(irq_record)
+
+def handle_irq_softirq_raise(event_info):
+       (name, context, cpu, time, pid, comm, vec) = event_info
+       if cpu not in irq_dic.keys() \
+       or len(irq_dic[cpu]) == 0:
+               return
+       irq_record = irq_dic[cpu].pop()
+       if 'event_list' in irq_record.keys():
+               irq_event_list = irq_record['event_list']
+       else:
+               irq_event_list = []
+       irq_event_list.append({'time':time, 'event':'sirq_raise'})
+       irq_record.update({'event_list':irq_event_list})
+       irq_dic[cpu].append(irq_record)
+
+def handle_irq_softirq_entry(event_info):
+       (name, context, cpu, time, pid, comm, vec) = event_info
+       net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
+
+def handle_irq_softirq_exit(event_info):
+       (name, context, cpu, time, pid, comm, vec) = event_info
+       irq_list = []
+       event_list = 0
+       if cpu in irq_dic.keys():
+               irq_list = irq_dic[cpu]
+               del irq_dic[cpu]
+       if cpu in net_rx_dic.keys():
+               sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
+               event_list = net_rx_dic[cpu]['event_list']
+               del net_rx_dic[cpu]
+       if irq_list == [] or event_list == 0:
+               return
+       rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
+                   'irq_list':irq_list, 'event_list':event_list}
+       # merge information realted to a NET_RX softirq
+       receive_hunk_list.append(rec_data)
+
+def handle_napi_poll(event_info):
+       (name, context, cpu, time, pid, comm, napi, dev_name) = event_info
+       if cpu in net_rx_dic.keys():
+               event_list = net_rx_dic[cpu]['event_list']
+               rec_data = {'event_name':'napi_poll',
+                               'dev':dev_name, 'event_t':time}
+               event_list.append(rec_data)
+
+def handle_netif_rx(event_info):
+       (name, context, cpu, time, pid, comm,
+               skbaddr, skblen, dev_name) = event_info
+       if cpu not in irq_dic.keys() \
+       or len(irq_dic[cpu]) == 0:
+               return
+       irq_record = irq_dic[cpu].pop()
+       if 'event_list' in irq_record.keys():
+               irq_event_list = irq_record['event_list']
+       else:
+               irq_event_list = []
+       irq_event_list.append({'time':time, 'event':'netif_rx',
+               'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
+       irq_record.update({'event_list':irq_event_list})
+       irq_dic[cpu].append(irq_record)
+
+def handle_netif_receive_skb(event_info):
+       global of_count_rx_skb_list
+
+       (name, context, cpu, time, pid, comm,
+               skbaddr, skblen, dev_name) = event_info
+       if cpu in net_rx_dic.keys():
+               rec_data = {'event_name':'netif_receive_skb',
+                           'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
+               event_list = net_rx_dic[cpu]['event_list']
+               event_list.append(rec_data)
+               rx_skb_list.insert(0, rec_data)
+               if len(rx_skb_list) > buffer_budget:
+                       rx_skb_list.pop()
+                       of_count_rx_skb_list += 1
+
+def handle_net_dev_queue(event_info):
+       global of_count_tx_queue_list
+
+       (name, context, cpu, time, pid, comm,
+               skbaddr, skblen, dev_name) = event_info
+       skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
+       tx_queue_list.insert(0, skb)
+       if len(tx_queue_list) > buffer_budget:
+               tx_queue_list.pop()
+               of_count_tx_queue_list += 1
+
+def handle_net_dev_xmit(event_info):
+       global of_count_tx_xmit_list
+
+       (name, context, cpu, time, pid, comm,
+               skbaddr, skblen, rc, dev_name) = event_info
+       if rc == 0: # NETDEV_TX_OK
+               for i in range(len(tx_queue_list)):
+                       skb = tx_queue_list[i]
+                       if skb['skbaddr'] == skbaddr:
+                               skb['xmit_t'] = time
+                               tx_xmit_list.insert(0, skb)
+                               del tx_queue_list[i]
+                               if len(tx_xmit_list) > buffer_budget:
+                                       tx_xmit_list.pop()
+                                       of_count_tx_xmit_list += 1
+                               return
+
+def handle_kfree_skb(event_info):
+       (name, context, cpu, time, pid, comm,
+               skbaddr, protocol, location) = event_info
+       for i in range(len(tx_queue_list)):
+               skb = tx_queue_list[i]
+               if skb['skbaddr'] == skbaddr:
+                       del tx_queue_list[i]
+                       return
+       for i in range(len(tx_xmit_list)):
+               skb = tx_xmit_list[i]
+               if skb['skbaddr'] == skbaddr:
+                       skb['free_t'] = time
+                       tx_free_list.append(skb)
+                       del tx_xmit_list[i]
+                       return
+       for i in range(len(rx_skb_list)):
+               rec_data = rx_skb_list[i]
+               if rec_data['skbaddr'] == skbaddr:
+                       rec_data.update({'handle':"kfree_skb",
+                                       'comm':comm, 'pid':pid, 'comm_t':time})
+                       del rx_skb_list[i]
+                       return
+
+def handle_consume_skb(event_info):
+       (name, context, cpu, time, pid, comm, skbaddr) = event_info
+       for i in range(len(tx_xmit_list)):
+               skb = tx_xmit_list[i]
+               if skb['skbaddr'] == skbaddr:
+                       skb['free_t'] = time
+                       tx_free_list.append(skb)
+                       del tx_xmit_list[i]
+                       return
+
+def handle_skb_copy_datagram_iovec(event_info):
+       (name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
+       for i in range(len(rx_skb_list)):
+               rec_data = rx_skb_list[i]
+               if skbaddr == rec_data['skbaddr']:
+                       rec_data.update({'handle':"skb_copy_datagram_iovec",
+                                       'comm':comm, 'pid':pid, 'comm_t':time})
+                       del rx_skb_list[i]
+                       return
index 27e9ebe4076e0efbf4117a46f2dbbcc74a1dcc3e..a7729797fd96254bc35326077337a71f919c19b5 100644 (file)
@@ -82,6 +82,8 @@ extern char *perf_path(const char *fmt, ...) __attribute__((format (printf, 1, 2
 extern char *perf_pathdup(const char *fmt, ...)
        __attribute__((format (printf, 1, 2)));
 
+#ifdef NO_STRLCPY
 extern size_t strlcpy(char *dest, const char *src, size_t size);
+#endif
 
 #endif /* __PERF_CACHE_H */
index f231f43424d27930a286cb52902c21cb4534a068..e12d539417b2cc4644e2d5a919cfb8a23e8ae163 100644 (file)
@@ -28,6 +28,9 @@ bool ip_callchain__valid(struct ip_callchain *chain, const event_t *event)
 #define chain_for_each_child(child, parent)    \
        list_for_each_entry(child, &parent->children, brothers)
 
+#define chain_for_each_child_safe(child, next, parent) \
+       list_for_each_entry_safe(child, next, &parent->children, brothers)
+
 static void
 rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
                    enum chain_mode mode)
@@ -86,10 +89,10 @@ __sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node,
  * sort them by hit
  */
 static void
-sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node,
+sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root,
                u64 min_hit, struct callchain_param *param __used)
 {
-       __sort_chain_flat(rb_root, node, min_hit);
+       __sort_chain_flat(rb_root, &root->node, min_hit);
 }
 
 static void __sort_chain_graph_abs(struct callchain_node *node,
@@ -108,11 +111,11 @@ static void __sort_chain_graph_abs(struct callchain_node *node,
 }
 
 static void
-sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_node *chain_root,
+sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_root *chain_root,
                     u64 min_hit, struct callchain_param *param __used)
 {
-       __sort_chain_graph_abs(chain_root, min_hit);
-       rb_root->rb_node = chain_root->rb_root.rb_node;
+       __sort_chain_graph_abs(&chain_root->node, min_hit);
+       rb_root->rb_node = chain_root->node.rb_root.rb_node;
 }
 
 static void __sort_chain_graph_rel(struct callchain_node *node,
@@ -133,11 +136,11 @@ static void __sort_chain_graph_rel(struct callchain_node *node,
 }
 
 static void
-sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_node *chain_root,
+sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root,
                     u64 min_hit __used, struct callchain_param *param)
 {
-       __sort_chain_graph_rel(chain_root, param->min_percent / 100.0);
-       rb_root->rb_node = chain_root->rb_root.rb_node;
+       __sort_chain_graph_rel(&chain_root->node, param->min_percent / 100.0);
+       rb_root->rb_node = chain_root->node.rb_root.rb_node;
 }
 
 int register_callchain_param(struct callchain_param *param)
@@ -284,19 +287,18 @@ split_add_child(struct callchain_node *parent, struct resolved_chain *chain,
 }
 
 static int
-__append_chain(struct callchain_node *root, struct resolved_chain *chain,
-              unsigned int start, u64 period);
+append_chain(struct callchain_node *root, struct resolved_chain *chain,
+            unsigned int start, u64 period);
 
 static void
-__append_chain_children(struct callchain_node *root,
-                       struct resolved_chain *chain,
-                       unsigned int start, u64 period)
+append_chain_children(struct callchain_node *root, struct resolved_chain *chain,
+                     unsigned int start, u64 period)
 {
        struct callchain_node *rnode;
 
        /* lookup in childrens */
        chain_for_each_child(rnode, root) {
-               unsigned int ret = __append_chain(rnode, chain, start, period);
+               unsigned int ret = append_chain(rnode, chain, start, period);
 
                if (!ret)
                        goto inc_children_hit;
@@ -309,8 +311,8 @@ inc_children_hit:
 }
 
 static int
-__append_chain(struct callchain_node *root, struct resolved_chain *chain,
-              unsigned int start, u64 period)
+append_chain(struct callchain_node *root, struct resolved_chain *chain,
+            unsigned int start, u64 period)
 {
        struct callchain_list *cnode;
        unsigned int i = start;
@@ -357,7 +359,7 @@ __append_chain(struct callchain_node *root, struct resolved_chain *chain,
        }
 
        /* We match the node and still have a part remaining */
-       __append_chain_children(root, chain, i, period);
+       append_chain_children(root, chain, i, period);
 
        return 0;
 }
@@ -380,8 +382,8 @@ static void filter_context(struct ip_callchain *old, struct resolved_chain *new,
 }
 
 
-int append_chain(struct callchain_node *root, struct ip_callchain *chain,
-                struct map_symbol *syms, u64 period)
+int callchain_append(struct callchain_root *root, struct ip_callchain *chain,
+                    struct map_symbol *syms, u64 period)
 {
        struct resolved_chain *filtered;
 
@@ -398,9 +400,65 @@ int append_chain(struct callchain_node *root, struct ip_callchain *chain,
        if (!filtered->nr)
                goto end;
 
-       __append_chain_children(root, filtered, 0, period);
+       append_chain_children(&root->node, filtered, 0, period);
+
+       if (filtered->nr > root->max_depth)
+               root->max_depth = filtered->nr;
 end:
        free(filtered);
 
        return 0;
 }
+
+static int
+merge_chain_branch(struct callchain_node *dst, struct callchain_node *src,
+                  struct resolved_chain *chain)
+{
+       struct callchain_node *child, *next_child;
+       struct callchain_list *list, *next_list;
+       int old_pos = chain->nr;
+       int err = 0;
+
+       list_for_each_entry_safe(list, next_list, &src->val, list) {
+               chain->ips[chain->nr].ip = list->ip;
+               chain->ips[chain->nr].ms = list->ms;
+               chain->nr++;
+               list_del(&list->list);
+               free(list);
+       }
+
+       if (src->hit)
+               append_chain_children(dst, chain, 0, src->hit);
+
+       chain_for_each_child_safe(child, next_child, src) {
+               err = merge_chain_branch(dst, child, chain);
+               if (err)
+                       break;
+
+               list_del(&child->brothers);
+               free(child);
+       }
+
+       chain->nr = old_pos;
+
+       return err;
+}
+
+int callchain_merge(struct callchain_root *dst, struct callchain_root *src)
+{
+       struct resolved_chain *chain;
+       int err;
+
+       chain = malloc(sizeof(*chain) +
+                      src->max_depth * sizeof(struct resolved_ip));
+       if (!chain)
+               return -ENOMEM;
+
+       chain->nr = 0;
+
+       err = merge_chain_branch(&dst->node, &src->node, chain);
+
+       free(chain);
+
+       return err;
+}
index 624a96c636fdbc36a472ecaadf5fcb72c226bf38..c15fb8c24ad2b87388e97cd6346cfdebaac11dd5 100644 (file)
@@ -26,9 +26,14 @@ struct callchain_node {
        u64                     children_hit;
 };
 
+struct callchain_root {
+       u64                     max_depth;
+       struct callchain_node   node;
+};
+
 struct callchain_param;
 
-typedef void (*sort_chain_func_t)(struct rb_root *, struct callchain_node *,
+typedef void (*sort_chain_func_t)(struct rb_root *, struct callchain_root *,
                                 u64, struct callchain_param *);
 
 struct callchain_param {
@@ -44,14 +49,16 @@ struct callchain_list {
        struct list_head        list;
 };
 
-static inline void callchain_init(struct callchain_node *node)
+static inline void callchain_init(struct callchain_root *root)
 {
-       INIT_LIST_HEAD(&node->brothers);
-       INIT_LIST_HEAD(&node->children);
-       INIT_LIST_HEAD(&node->val);
+       INIT_LIST_HEAD(&root->node.brothers);
+       INIT_LIST_HEAD(&root->node.children);
+       INIT_LIST_HEAD(&root->node.val);
 
-       node->parent = NULL;
-       node->hit = 0;
+       root->node.parent = NULL;
+       root->node.hit = 0;
+       root->node.children_hit = 0;
+       root->max_depth = 0;
 }
 
 static inline u64 cumul_hits(struct callchain_node *node)
@@ -60,8 +67,9 @@ static inline u64 cumul_hits(struct callchain_node *node)
 }
 
 int register_callchain_param(struct callchain_param *param);
-int append_chain(struct callchain_node *root, struct ip_callchain *chain,
-                struct map_symbol *syms, u64 period);
+int callchain_append(struct callchain_root *root, struct ip_callchain *chain,
+                    struct map_symbol *syms, u64 period);
+int callchain_merge(struct callchain_root *dst, struct callchain_root *src);
 
 bool ip_callchain__valid(struct ip_callchain *chain, const event_t *event);
 #endif /* __PERF_CALLCHAIN_H */
index be22ae6ef0558009c0a1faaa4f55bcf2c5d828ca..2022e87409942ca4b0d133c3f889e41178a663d1 100644 (file)
@@ -87,7 +87,7 @@ static void hist_entry__add_cpumode_period(struct hist_entry *self,
 
 static struct hist_entry *hist_entry__new(struct hist_entry *template)
 {
-       size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_node) : 0;
+       size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
        struct hist_entry *self = malloc(sizeof(*self) + callchain_size);
 
        if (self != NULL) {
@@ -226,6 +226,8 @@ static bool collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
 
                if (!cmp) {
                        iter->period += he->period;
+                       if (symbol_conf.use_callchain)
+                               callchain_merge(iter->callchain, he->callchain);
                        hist_entry__free(he);
                        return false;
                }
index 58a470d036dd0917c16eda49fb8b1987703ca7b5..bd74977114242ff465af39a291d30aa7d463f3b2 100644 (file)
@@ -22,6 +22,7 @@ static const char *get_perf_dir(void)
        return ".";
 }
 
+#ifdef NO_STRLCPY
 size_t strlcpy(char *dest, const char *src, size_t size)
 {
        size_t ret = strlen(src);
@@ -33,7 +34,7 @@ size_t strlcpy(char *dest, const char *src, size_t size)
        }
        return ret;
 }
-
+#endif
 
 static char *get_pathname(void)
 {
index e72f05c3bef09258311f7192afb179a10657c450..fcc16e4349df9f3353ac03f975d9c1be9938863a 100644 (file)
@@ -1539,6 +1539,7 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev,
                goto error;
        }
        tev->point.offset = pev->point.offset;
+       tev->point.retprobe = pev->point.retprobe;
        tev->nargs = pev->nargs;
        if (tev->nargs) {
                tev->args = zalloc(sizeof(struct probe_trace_arg)
index 525136684d4ec170201b4d07c7b080b73851edf9..32b81f707ff5eb5d950a2d233d6c00c6b90fedb2 100644 (file)
@@ -686,6 +686,25 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf)
        char buf[32], *ptr;
        int ret, nscopes;
 
+       if (!is_c_varname(pf->pvar->var)) {
+               /* Copy raw parameters */
+               pf->tvar->value = strdup(pf->pvar->var);
+               if (pf->tvar->value == NULL)
+                       return -ENOMEM;
+               if (pf->pvar->type) {
+                       pf->tvar->type = strdup(pf->pvar->type);
+                       if (pf->tvar->type == NULL)
+                               return -ENOMEM;
+               }
+               if (pf->pvar->name) {
+                       pf->tvar->name = strdup(pf->pvar->name);
+                       if (pf->tvar->name == NULL)
+                               return -ENOMEM;
+               } else
+                       pf->tvar->name = NULL;
+               return 0;
+       }
+
        if (pf->pvar->name)
                pf->tvar->name = strdup(pf->pvar->name);
        else {
@@ -700,19 +719,6 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf)
        if (pf->tvar->name == NULL)
                return -ENOMEM;
 
-       if (!is_c_varname(pf->pvar->var)) {
-               /* Copy raw parameters */
-               pf->tvar->value = strdup(pf->pvar->var);
-               if (pf->tvar->value == NULL)
-                       return -ENOMEM;
-               if (pf->pvar->type) {
-                       pf->tvar->type = strdup(pf->pvar->type);
-                       if (pf->tvar->type == NULL)
-                               return -ENOMEM;
-               }
-               return 0;
-       }
-
        pr_debug("Searching '%s' variable in context.\n",
                 pf->pvar->var);
        /* Search child die for local variables and parameters. */
@@ -783,6 +789,16 @@ static int convert_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf)
                /* This function has no name. */
                tev->point.offset = (unsigned long)pf->addr;
 
+       /* Return probe must be on the head of a subprogram */
+       if (pf->pev->point.retprobe) {
+               if (tev->point.offset != 0) {
+                       pr_warning("Return probe must be on the head of"
+                                  " a real function\n");
+                       return -EINVAL;
+               }
+               tev->point.retprobe = true;
+       }
+
        pr_debug("Probe point found: %s+%lu\n", tev->point.symbol,
                 tev->point.offset);
 
index 46e531d09e8bfcbe1064a4307ce5a75ef72a6405..0b91053a7d11af888eea81a4c8de24fdd60ce6f8 100644 (file)
@@ -70,7 +70,7 @@ struct hist_entry {
                struct hist_entry *pair;
                struct rb_root    sorted_chain;
        };
-       struct callchain_node   callchain[0];
+       struct callchain_root   callchain[0];
 };
 
 enum sort_type {
index 1a367734e01693c8a93f79bb3846af4a89f9cffe..b39f499e575a604198bf1bb11d11d6280a091548 100644 (file)
@@ -388,6 +388,20 @@ size_t dso__fprintf_buildid(struct dso *self, FILE *fp)
        return fprintf(fp, "%s", sbuild_id);
 }
 
+size_t dso__fprintf_symbols_by_name(struct dso *self, enum map_type type, FILE *fp)
+{
+       size_t ret = 0;
+       struct rb_node *nd;
+       struct symbol_name_rb_node *pos;
+
+       for (nd = rb_first(&self->symbol_names[type]); nd; nd = rb_next(nd)) {
+               pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
+               fprintf(fp, "%s\n", pos->sym.name);
+       }
+
+       return ret;
+}
+
 size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp)
 {
        struct rb_node *nd;
@@ -2268,6 +2282,9 @@ static int setup_list(struct strlist **list, const char *list_str,
 
 int symbol__init(void)
 {
+       if (symbol_conf.initialized)
+               return 0;
+
        elf_version(EV_CURRENT);
        if (symbol_conf.sort_by_name)
                symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
@@ -2293,6 +2310,7 @@ int symbol__init(void)
                       symbol_conf.sym_list_str, "symbol") < 0)
                goto out_free_comm_list;
 
+       symbol_conf.initialized = true;
        return 0;
 
 out_free_dso_list:
@@ -2304,11 +2322,14 @@ out_free_comm_list:
 
 void symbol__exit(void)
 {
+       if (!symbol_conf.initialized)
+               return;
        strlist__delete(symbol_conf.sym_list);
        strlist__delete(symbol_conf.dso_list);
        strlist__delete(symbol_conf.comm_list);
        vmlinux_path__exit();
        symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
+       symbol_conf.initialized = false;
 }
 
 int machines__create_kernel_maps(struct rb_root *self, pid_t pid)
index b7a8da4af5a0a98e84e1616e55cdaceec18fcdbf..038f2201ee09579ca3f460d9f59576770ea477d2 100644 (file)
@@ -69,7 +69,8 @@ struct symbol_conf {
                        show_nr_samples,
                        use_callchain,
                        exclude_other,
-                       show_cpu_utilization;
+                       show_cpu_utilization,
+                       initialized;
        const char      *vmlinux_name,
                        *source_prefix,
                        *field_sep;
@@ -181,6 +182,7 @@ size_t machines__fprintf_dsos(struct rb_root *self, FILE *fp);
 size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_hits);
 
 size_t dso__fprintf_buildid(struct dso *self, FILE *fp);
+size_t dso__fprintf_symbols_by_name(struct dso *self, enum map_type type, FILE *fp);
 size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp);
 
 enum dso_origin {
index 7ea983acfaea521a425f7eb33250e319b0a3a960..f7af2fca965d5c206973f73ccd25b8b3607e02e5 100644 (file)
@@ -97,7 +97,7 @@ void setup_python_scripting(void)
        register_python_scripting(&python_scripting_unsupported_ops);
 }
 #else
-struct scripting_ops python_scripting_ops;
+extern struct scripting_ops python_scripting_ops;
 
 void setup_python_scripting(void)
 {
@@ -158,7 +158,7 @@ void setup_perl_scripting(void)
        register_perl_scripting(&perl_scripting_unsupported_ops);
 }
 #else
-struct scripting_ops perl_scripting_ops;
+extern struct scripting_ops perl_scripting_ops;
 
 void setup_perl_scripting(void)
 {
index 66f2d583d8c4326971dc9d37cd47df57eeb82306..6d0df809a2edab24f28af4bea093d1cda3c2614d 100644 (file)
@@ -1,16 +1,6 @@
-#define _GNU_SOURCE
-#include <stdio.h>
-#undef _GNU_SOURCE
-/*
- * slang versions <= 2.0.6 have a "#if HAVE_LONG_LONG" that breaks
- * the build if it isn't defined. Use the equivalent one that glibc
- * has on features.h.
- */
-#include <features.h>
-#ifndef HAVE_LONG_LONG
-#define HAVE_LONG_LONG __GLIBC_HAVE_LONG_LONG
-#endif
 #include <slang.h>
+#include "libslang.h"
+#include <linux/compiler.h>
 #include <linux/list.h>
 #include <linux/rbtree.h>
 #include <stdlib.h>
@@ -19,17 +9,9 @@
 #include "helpline.h"
 #include "../color.h"
 #include "../util.h"
+#include <stdio.h>
 
-#if SLANG_VERSION < 20104
-#define sltt_set_color(obj, name, fg, bg) \
-       SLtt_set_color(obj,(char *)name, (char *)fg, (char *)bg)
-#else
-#define sltt_set_color SLtt_set_color
-#endif
-
-newtComponent newt_form__new(void);
-
-int ui_browser__percent_color(double percent, bool current)
+static int ui_browser__percent_color(double percent, bool current)
 {
        if (current)
                return HE_COLORSET_SELECTED;
@@ -40,6 +22,23 @@ int ui_browser__percent_color(double percent, bool current)
        return HE_COLORSET_NORMAL;
 }
 
+void ui_browser__set_color(struct ui_browser *self __used, int color)
+{
+       SLsmg_set_color(color);
+}
+
+void ui_browser__set_percent_color(struct ui_browser *self,
+                                  double percent, bool current)
+{
+        int color = ui_browser__percent_color(percent, current);
+        ui_browser__set_color(self, color);
+}
+
+void ui_browser__gotorc(struct ui_browser *self, int y, int x)
+{
+       SLsmg_gotorc(self->y + y, self->x + x);
+}
+
 void ui_browser__list_head_seek(struct ui_browser *self, off_t offset, int whence)
 {
        struct list_head *head = self->entries;
@@ -111,7 +110,7 @@ unsigned int ui_browser__rb_tree_refresh(struct ui_browser *self)
        nd = self->top;
 
        while (nd != NULL) {
-               SLsmg_gotorc(self->y + row, self->x);
+               ui_browser__gotorc(self, row, 0);
                self->write(self, nd, row);
                if (++row == self->height)
                        break;
@@ -131,13 +130,10 @@ void ui_browser__refresh_dimensions(struct ui_browser *self)
        int cols, rows;
        newtGetScreenSize(&cols, &rows);
 
-       if (self->width > cols - 4)
-               self->width = cols - 4;
-       self->height = rows - 5;
-       if (self->height > self->nr_entries)
-               self->height = self->nr_entries;
-       self->y  = (rows - self->height) / 2;
-       self->x = (cols - self->width) / 2;
+       self->width = cols - 1;
+       self->height = rows - 2;
+       self->y = 1;
+       self->x = 0;
 }
 
 void ui_browser__reset_index(struct ui_browser *self)
@@ -146,34 +142,48 @@ void ui_browser__reset_index(struct ui_browser *self)
        self->seek(self, 0, SEEK_SET);
 }
 
+void ui_browser__add_exit_key(struct ui_browser *self, int key)
+{
+       newtFormAddHotKey(self->form, key);
+}
+
+void ui_browser__add_exit_keys(struct ui_browser *self, int keys[])
+{
+       int i = 0;
+
+       while (keys[i] && i < 64) {
+               ui_browser__add_exit_key(self, keys[i]);
+               ++i;
+       }
+}
+
 int ui_browser__show(struct ui_browser *self, const char *title,
                     const char *helpline, ...)
 {
        va_list ap;
+       int keys[] = { NEWT_KEY_UP, NEWT_KEY_DOWN, NEWT_KEY_PGUP,
+                      NEWT_KEY_PGDN, NEWT_KEY_HOME, NEWT_KEY_END, ' ',
+                      NEWT_KEY_LEFT, NEWT_KEY_ESCAPE, 'q', CTRL('c'), 0 };
 
-       if (self->form != NULL) {
+       if (self->form != NULL)
                newtFormDestroy(self->form);
-               newtPopWindow();
-       }
+
        ui_browser__refresh_dimensions(self);
-       newtCenteredWindow(self->width, self->height, title);
-       self->form = newt_form__new();
+       self->form = newtForm(NULL, NULL, 0);
        if (self->form == NULL)
                return -1;
 
-       self->sb = newtVerticalScrollbar(self->width, 0, self->height,
+       self->sb = newtVerticalScrollbar(self->width, 1, self->height,
                                         HE_COLORSET_NORMAL,
                                         HE_COLORSET_SELECTED);
        if (self->sb == NULL)
                return -1;
 
-       newtFormAddHotKey(self->form, NEWT_KEY_UP);
-       newtFormAddHotKey(self->form, NEWT_KEY_DOWN);
-       newtFormAddHotKey(self->form, NEWT_KEY_PGUP);
-       newtFormAddHotKey(self->form, NEWT_KEY_PGDN);
-       newtFormAddHotKey(self->form, NEWT_KEY_HOME);
-       newtFormAddHotKey(self->form, NEWT_KEY_END);
-       newtFormAddHotKey(self->form, ' ');
+       SLsmg_gotorc(0, 0);
+       ui_browser__set_color(self, NEWT_COLORSET_ROOT);
+       slsmg_write_nstring(title, self->width);
+
+       ui_browser__add_exit_keys(self, keys);
        newtFormAddComponent(self->form, self->sb);
 
        va_start(ap, helpline);
@@ -185,7 +195,6 @@ int ui_browser__show(struct ui_browser *self, const char *title,
 void ui_browser__hide(struct ui_browser *self)
 {
        newtFormDestroy(self->form);
-       newtPopWindow();
        self->form = NULL;
        ui_helpline__pop();
 }
@@ -196,28 +205,28 @@ int ui_browser__refresh(struct ui_browser *self)
 
        newtScrollbarSet(self->sb, self->index, self->nr_entries - 1);
        row = self->refresh(self);
-       SLsmg_set_color(HE_COLORSET_NORMAL);
+       ui_browser__set_color(self, HE_COLORSET_NORMAL);
        SLsmg_fill_region(self->y + row, self->x,
                          self->height - row, self->width, ' ');
 
        return 0;
 }
 
-int ui_browser__run(struct ui_browser *self, struct newtExitStruct *es)
+int ui_browser__run(struct ui_browser *self)
 {
+       struct newtExitStruct es;
+
        if (ui_browser__refresh(self) < 0)
                return -1;
 
        while (1) {
                off_t offset;
 
-               newtFormRun(self->form, es);
+               newtFormRun(self->form, &es);
 
-               if (es->reason != NEWT_EXIT_HOTKEY)
+               if (es.reason != NEWT_EXIT_HOTKEY)
                        break;
-               if (is_exit_key(es->u.key))
-                       return es->u.key;
-               switch (es->u.key) {
+               switch (es.u.key) {
                case NEWT_KEY_DOWN:
                        if (self->index == self->nr_entries - 1)
                                break;
@@ -274,12 +283,12 @@ int ui_browser__run(struct ui_browser *self, struct newtExitStruct *es)
                        self->seek(self, -offset, SEEK_END);
                        break;
                default:
-                       return es->u.key;
+                       return es.u.key;
                }
                if (ui_browser__refresh(self) < 0)
                        return -1;
        }
-       return 0;
+       return -1;
 }
 
 unsigned int ui_browser__list_head_refresh(struct ui_browser *self)
@@ -294,7 +303,7 @@ unsigned int ui_browser__list_head_refresh(struct ui_browser *self)
        pos = self->top;
 
        list_for_each_from(pos, head) {
-               SLsmg_gotorc(self->y + row, self->x);
+               ui_browser__gotorc(self, row, 0);
                self->write(self, pos, row);
                if (++row == self->height)
                        break;
index 0b9f829214f756ec16227745835cfea55d7ee503..0dc7e4da36f52c42ef3574dc89dce7102ae8438d 100644 (file)
@@ -25,16 +25,21 @@ struct ui_browser {
 };
 
 
-int ui_browser__percent_color(double percent, bool current);
+void ui_browser__set_color(struct ui_browser *self, int color);
+void ui_browser__set_percent_color(struct ui_browser *self,
+                                  double percent, bool current);
 bool ui_browser__is_current_entry(struct ui_browser *self, unsigned row);
 void ui_browser__refresh_dimensions(struct ui_browser *self);
 void ui_browser__reset_index(struct ui_browser *self);
 
+void ui_browser__gotorc(struct ui_browser *self, int y, int x);
+void ui_browser__add_exit_key(struct ui_browser *self, int key);
+void ui_browser__add_exit_keys(struct ui_browser *self, int keys[]);
 int ui_browser__show(struct ui_browser *self, const char *title,
                     const char *helpline, ...);
 void ui_browser__hide(struct ui_browser *self);
 int ui_browser__refresh(struct ui_browser *self);
-int ui_browser__run(struct ui_browser *self, struct newtExitStruct *es);
+int ui_browser__run(struct ui_browser *self);
 
 void ui_browser__rb_tree_seek(struct ui_browser *self, off_t offset, int whence);
 unsigned int ui_browser__rb_tree_refresh(struct ui_browser *self);
index a90273e63f4fb6939ea64e074513e1afabb1f289..82b78f99251bb2b764165cf8066a85f1e6e4b97d 100644 (file)
@@ -40,14 +40,12 @@ static void annotate_browser__write(struct ui_browser *self, void *entry, int ro
 
        if (ol->offset != -1) {
                struct objdump_line_rb_node *olrb = objdump_line__rb(ol);
-               int color = ui_browser__percent_color(olrb->percent, current_entry);
-               SLsmg_set_color(color);
+               ui_browser__set_percent_color(self, olrb->percent, current_entry);
                slsmg_printf(" %7.2f ", olrb->percent);
                if (!current_entry)
-                       SLsmg_set_color(HE_COLORSET_CODE);
+                       ui_browser__set_color(self, HE_COLORSET_CODE);
        } else {
-               int color = ui_browser__percent_color(0, current_entry);
-               SLsmg_set_color(color);
+               ui_browser__set_percent_color(self, 0, current_entry);
                slsmg_write_nstring(" ", 9);
        }
 
@@ -135,32 +133,31 @@ static void annotate_browser__set_top(struct annotate_browser *self,
        self->curr_hot = nd;
 }
 
-static int annotate_browser__run(struct annotate_browser *self,
-                                struct newtExitStruct *es)
+static int annotate_browser__run(struct annotate_browser *self)
 {
        struct rb_node *nd;
        struct hist_entry *he = self->b.priv;
+       int key;
 
        if (ui_browser__show(&self->b, he->ms.sym->name,
-                            "<- or ESC: exit, TAB/shift+TAB: cycle thru samples") < 0)
+                            "<-, -> or ESC: exit, TAB/shift+TAB: cycle thru samples") < 0)
                return -1;
-
-       newtFormAddHotKey(self->b.form, NEWT_KEY_LEFT);
-       newtFormAddHotKey(self->b.form, NEWT_KEY_RIGHT);
+       /*
+        * To allow builtin-annotate to cycle thru multiple symbols by
+        * examining the exit key for this function.
+        */
+       ui_browser__add_exit_key(&self->b, NEWT_KEY_RIGHT);
 
        nd = self->curr_hot;
        if (nd) {
-               newtFormAddHotKey(self->b.form, NEWT_KEY_TAB);
-               newtFormAddHotKey(self->b.form, NEWT_KEY_UNTAB);
+               int tabs[] = { NEWT_KEY_TAB, NEWT_KEY_UNTAB, 0 };
+               ui_browser__add_exit_keys(&self->b, tabs);
        }
 
        while (1) {
-               ui_browser__run(&self->b, es);
-
-               if (es->reason != NEWT_EXIT_HOTKEY)
-                       break;
+               key = ui_browser__run(&self->b);
 
-               switch (es->u.key) {
+               switch (key) {
                case NEWT_KEY_TAB:
                        nd = rb_prev(nd);
                        if (nd == NULL)
@@ -179,12 +176,11 @@ static int annotate_browser__run(struct annotate_browser *self,
        }
 out:
        ui_browser__hide(&self->b);
-       return es->u.key;
+       return key;
 }
 
 int hist_entry__tui_annotate(struct hist_entry *self)
 {
-       struct newtExitStruct es;
        struct objdump_line *pos, *n;
        struct objdump_line_rb_node *rbpos;
        LIST_HEAD(head);
@@ -232,7 +228,7 @@ int hist_entry__tui_annotate(struct hist_entry *self)
                annotate_browser__set_top(&browser, browser.curr_hot);
 
        browser.b.width += 18; /* Percentage */
-       ret = annotate_browser__run(&browser, &es);
+       ret = annotate_browser__run(&browser);
        list_for_each_entry_safe(pos, n, &head, node) {
                list_del(&pos->node);
                objdump_line__free(pos);
index dafdf6775d77f44d69abf1980b1a9cfe4ab053dc..ebda8c3fde9e6468ddbd84fc2df7e324ba862854 100644 (file)
@@ -58,6 +58,11 @@ static char callchain_list__folded(const struct callchain_list *self)
        return map_symbol__folded(&self->ms);
 }
 
+static void map_symbol__set_folding(struct map_symbol *self, bool unfold)
+{
+       self->unfolded = unfold ? self->has_children : false;
+}
+
 static int callchain_node__count_rows_rb_tree(struct callchain_node *self)
 {
        int n = 0;
@@ -129,16 +134,16 @@ static void callchain_node__init_have_children_rb_tree(struct callchain_node *se
        for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
                struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
                struct callchain_list *chain;
-               int first = true;
+               bool first = true;
 
                list_for_each_entry(chain, &child->val, list) {
                        if (first) {
                                first = false;
                                chain->ms.has_children = chain->list.next != &child->val ||
-                                                        rb_first(&child->rb_root) != NULL;
+                                                        !RB_EMPTY_ROOT(&child->rb_root);
                        } else
                                chain->ms.has_children = chain->list.next == &child->val &&
-                                                        rb_first(&child->rb_root) != NULL;
+                                                        !RB_EMPTY_ROOT(&child->rb_root);
                }
 
                callchain_node__init_have_children_rb_tree(child);
@@ -150,7 +155,7 @@ static void callchain_node__init_have_children(struct callchain_node *self)
        struct callchain_list *chain;
 
        list_for_each_entry(chain, &self->val, list)
-               chain->ms.has_children = rb_first(&self->rb_root) != NULL;
+               chain->ms.has_children = !RB_EMPTY_ROOT(&self->rb_root);
 
        callchain_node__init_have_children_rb_tree(self);
 }
@@ -168,6 +173,7 @@ static void callchain__init_have_children(struct rb_root *self)
 static void hist_entry__init_have_children(struct hist_entry *self)
 {
        if (!self->init_have_children) {
+               self->ms.has_children = !RB_EMPTY_ROOT(&self->sorted_chain);
                callchain__init_have_children(&self->sorted_chain);
                self->init_have_children = true;
        }
@@ -195,43 +201,114 @@ static bool hist_browser__toggle_fold(struct hist_browser *self)
        return false;
 }
 
-static int hist_browser__run(struct hist_browser *self, const char *title,
-                            struct newtExitStruct *es)
+static int callchain_node__set_folding_rb_tree(struct callchain_node *self, bool unfold)
+{
+       int n = 0;
+       struct rb_node *nd;
+
+       for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
+               struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
+               struct callchain_list *chain;
+               bool has_children = false;
+
+               list_for_each_entry(chain, &child->val, list) {
+                       ++n;
+                       map_symbol__set_folding(&chain->ms, unfold);
+                       has_children = chain->ms.has_children;
+               }
+
+               if (has_children)
+                       n += callchain_node__set_folding_rb_tree(child, unfold);
+       }
+
+       return n;
+}
+
+static int callchain_node__set_folding(struct callchain_node *node, bool unfold)
+{
+       struct callchain_list *chain;
+       bool has_children = false;
+       int n = 0;
+
+       list_for_each_entry(chain, &node->val, list) {
+               ++n;
+               map_symbol__set_folding(&chain->ms, unfold);
+               has_children = chain->ms.has_children;
+       }
+
+       if (has_children)
+               n += callchain_node__set_folding_rb_tree(node, unfold);
+
+       return n;
+}
+
+static int callchain__set_folding(struct rb_root *chain, bool unfold)
+{
+       struct rb_node *nd;
+       int n = 0;
+
+       for (nd = rb_first(chain); nd; nd = rb_next(nd)) {
+               struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
+               n += callchain_node__set_folding(node, unfold);
+       }
+
+       return n;
+}
+
+static void hist_entry__set_folding(struct hist_entry *self, bool unfold)
+{
+       hist_entry__init_have_children(self);
+       map_symbol__set_folding(&self->ms, unfold);
+
+       if (self->ms.has_children) {
+               int n = callchain__set_folding(&self->sorted_chain, unfold);
+               self->nr_rows = unfold ? n : 0;
+       } else
+               self->nr_rows = 0;
+}
+
+static void hists__set_folding(struct hists *self, bool unfold)
+{
+       struct rb_node *nd;
+
+       self->nr_entries = 0;
+
+       for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
+               struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
+               hist_entry__set_folding(he, unfold);
+               self->nr_entries += 1 + he->nr_rows;
+       }
+}
+
+static void hist_browser__set_folding(struct hist_browser *self, bool unfold)
+{
+       hists__set_folding(self->hists, unfold);
+       self->b.nr_entries = self->hists->nr_entries;
+       /* Go to the start, we may be way after valid entries after a collapse */
+       ui_browser__reset_index(&self->b);
+}
+
+static int hist_browser__run(struct hist_browser *self, const char *title)
 {
-       char str[256], unit;
-       unsigned long nr_events = self->hists->stats.nr_events[PERF_RECORD_SAMPLE];
+       int key;
+       int exit_keys[] = { 'a', '?', 'h', 'C', 'd', 'D', 'E', 't',
+                           NEWT_KEY_ENTER, NEWT_KEY_RIGHT, NEWT_KEY_LEFT, 0, };
 
        self->b.entries = &self->hists->entries;
        self->b.nr_entries = self->hists->nr_entries;
 
        hist_browser__refresh_dimensions(self);
 
-       nr_events = convert_unit(nr_events, &unit);
-       snprintf(str, sizeof(str), "Events: %lu%c                            ",
-                nr_events, unit);
-       newtDrawRootText(0, 0, str);
-
        if (ui_browser__show(&self->b, title,
                             "Press '?' for help on key bindings") < 0)
                return -1;
 
-       newtFormAddHotKey(self->b.form, 'a');
-       newtFormAddHotKey(self->b.form, '?');
-       newtFormAddHotKey(self->b.form, 'h');
-       newtFormAddHotKey(self->b.form, 'd');
-       newtFormAddHotKey(self->b.form, 'D');
-       newtFormAddHotKey(self->b.form, 't');
-
-       newtFormAddHotKey(self->b.form, NEWT_KEY_LEFT);
-       newtFormAddHotKey(self->b.form, NEWT_KEY_RIGHT);
-       newtFormAddHotKey(self->b.form, NEWT_KEY_ENTER);
+       ui_browser__add_exit_keys(&self->b, exit_keys);
 
        while (1) {
-               ui_browser__run(&self->b, es);
+               key = ui_browser__run(&self->b);
 
-               if (es->reason != NEWT_EXIT_HOTKEY)
-                       break;
-               switch (es->u.key) {
+               switch (key) {
                case 'D': { /* Debug */
                        static int seq;
                        struct hist_entry *h = rb_entry(self->b.top,
@@ -245,18 +322,26 @@ static int hist_browser__run(struct hist_browser *self, const char *title,
                                           self->b.top_idx,
                                           h->row_offset, h->nr_rows);
                }
-                       continue;
+                       break;
+               case 'C':
+                       /* Collapse the whole world. */
+                       hist_browser__set_folding(self, false);
+                       break;
+               case 'E':
+                       /* Expand the whole world. */
+                       hist_browser__set_folding(self, true);
+                       break;
                case NEWT_KEY_ENTER:
                        if (hist_browser__toggle_fold(self))
                                break;
                        /* fall thru */
                default:
-                       return 0;
+                       goto out;
                }
        }
-
+out:
        ui_browser__hide(&self->b);
-       return 0;
+       return key;
 }
 
 static char *callchain_list__sym_name(struct callchain_list *self,
@@ -306,15 +391,10 @@ static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *self,
                        int color;
                        bool was_first = first;
 
-                       if (first) {
+                       if (first)
                                first = false;
-                               chain->ms.has_children = chain->list.next != &child->val ||
-                                                        rb_first(&child->rb_root) != NULL;
-                       } else {
+                       else
                                extra_offset = LEVEL_OFFSET_STEP;
-                               chain->ms.has_children = chain->list.next == &child->val &&
-                                                        rb_first(&child->rb_root) != NULL;
-                       }
 
                        folded_sign = callchain_list__folded(chain);
                        if (*row_offset != 0) {
@@ -341,8 +421,8 @@ static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *self,
                                *is_current_entry = true;
                        }
 
-                       SLsmg_set_color(color);
-                       SLsmg_gotorc(self->b.y + row, self->b.x);
+                       ui_browser__set_color(&self->b, color);
+                       ui_browser__gotorc(&self->b, row, 0);
                        slsmg_write_nstring(" ", offset + extra_offset);
                        slsmg_printf("%c ", folded_sign);
                        slsmg_write_nstring(str, width);
@@ -384,12 +464,7 @@ static int hist_browser__show_callchain_node(struct hist_browser *self,
        list_for_each_entry(chain, &node->val, list) {
                char ipstr[BITS_PER_LONG / 4 + 1], *s;
                int color;
-               /*
-                * FIXME: This should be moved to somewhere else,
-                * probably when the callchain is created, so as not to
-                * traverse it all over again
-                */
-               chain->ms.has_children = rb_first(&node->rb_root) != NULL;
+
                folded_sign = callchain_list__folded(chain);
 
                if (*row_offset != 0) {
@@ -405,8 +480,8 @@ static int hist_browser__show_callchain_node(struct hist_browser *self,
                }
 
                s = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
-               SLsmg_gotorc(self->b.y + row, self->b.x);
-               SLsmg_set_color(color);
+               ui_browser__gotorc(&self->b, row, 0);
+               ui_browser__set_color(&self->b, color);
                slsmg_write_nstring(" ", offset);
                slsmg_printf("%c ", folded_sign);
                slsmg_write_nstring(s, width - 2);
@@ -465,7 +540,7 @@ static int hist_browser__show_entry(struct hist_browser *self,
        }
 
        if (symbol_conf.use_callchain) {
-               entry->ms.has_children = !RB_EMPTY_ROOT(&entry->sorted_chain);
+               hist_entry__init_have_children(entry);
                folded_sign = hist_entry__folded(entry);
        }
 
@@ -484,8 +559,8 @@ static int hist_browser__show_entry(struct hist_browser *self,
                                color = HE_COLORSET_NORMAL;
                }
 
-               SLsmg_set_color(color);
-               SLsmg_gotorc(self->b.y + row, self->b.x);
+               ui_browser__set_color(&self->b, color);
+               ui_browser__gotorc(&self->b, row, 0);
                if (symbol_conf.use_callchain) {
                        slsmg_printf("%c ", folded_sign);
                        width -= 2;
@@ -687,8 +762,6 @@ static struct hist_browser *hist_browser__new(struct hists *hists)
 
 static void hist_browser__delete(struct hist_browser *self)
 {
-       newtFormDestroy(self->b.form);
-       newtPopWindow();
        free(self);
 }
 
@@ -702,21 +775,26 @@ static struct thread *hist_browser__selected_thread(struct hist_browser *self)
        return self->he_selection->thread;
 }
 
-static int hist_browser__title(char *bf, size_t size, const char *ev_name,
-                              const struct dso *dso, const struct thread *thread)
+static int hists__browser_title(struct hists *self, char *bf, size_t size,
+                               const char *ev_name, const struct dso *dso,
+                               const struct thread *thread)
 {
-       int printed = 0;
+       char unit;
+       int printed;
+       unsigned long nr_events = self->stats.nr_events[PERF_RECORD_SAMPLE];
+
+       nr_events = convert_unit(nr_events, &unit);
+       printed = snprintf(bf, size, "Events: %lu%c %s", nr_events, unit, ev_name);
 
        if (thread)
                printed += snprintf(bf + printed, size - printed,
-                                   "Thread: %s(%d)",
-                                   (thread->comm_set ?  thread->comm : ""),
+                                   "Thread: %s(%d)",
+                                   (thread->comm_set ? thread->comm : ""),
                                    thread->pid);
        if (dso)
                printed += snprintf(bf + printed, size - printed,
-                                   "%sDSO: %s", thread ? " " : "",
-                                   dso->short_name);
-       return printed ?: snprintf(bf, size, "Event: %s", ev_name);
+                                   ", DSO: %s", dso->short_name);
+       return printed;
 }
 
 int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
@@ -725,7 +803,6 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
        struct pstack *fstack;
        const struct thread *thread_filter = NULL;
        const struct dso *dso_filter = NULL;
-       struct newtExitStruct es;
        char msg[160];
        int key = -1;
 
@@ -738,9 +815,8 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
 
        ui_helpline__push(helpline);
 
-       hist_browser__title(msg, sizeof(msg), ev_name,
-                           dso_filter, thread_filter);
-
+       hists__browser_title(self, msg, sizeof(msg), ev_name,
+                            dso_filter, thread_filter);
        while (1) {
                const struct thread *thread;
                const struct dso *dso;
@@ -749,70 +825,63 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
                    annotate = -2, zoom_dso = -2, zoom_thread = -2,
                    browse_map = -2;
 
-               if (hist_browser__run(browser, msg, &es))
-                       break;
+               key = hist_browser__run(browser, msg);
 
                thread = hist_browser__selected_thread(browser);
                dso = browser->selection->map ? browser->selection->map->dso : NULL;
 
-               if (es.reason == NEWT_EXIT_HOTKEY) {
-                       key = es.u.key;
-
-                       switch (key) {
-                       case NEWT_KEY_F1:
-                               goto do_help;
-                       case NEWT_KEY_TAB:
-                       case NEWT_KEY_UNTAB:
-                               /*
-                                * Exit the browser, let hists__browser_tree
-                                * go to the next or previous
-                                */
-                               goto out_free_stack;
-                       default:;
-                       }
-
-                       switch (key) {
-                       case 'a':
-                               if (browser->selection->map == NULL &&
-                                   browser->selection->map->dso->annotate_warned)
-                                       continue;
-                               goto do_annotate;
-                       case 'd':
-                               goto zoom_dso;
-                       case 't':
-                               goto zoom_thread;
-                       case 'h':
-                       case '?':
-do_help:
-                               ui__help_window("->        Zoom into DSO/Threads & Annotate current symbol\n"
-                                               "<-        Zoom out\n"
-                                               "a         Annotate current symbol\n"
-                                               "h/?/F1    Show this window\n"
-                                               "d         Zoom into current DSO\n"
-                                               "t         Zoom into current Thread\n"
-                                               "q/CTRL+C  Exit browser");
+               switch (key) {
+               case NEWT_KEY_TAB:
+               case NEWT_KEY_UNTAB:
+                       /*
+                        * Exit the browser, let hists__browser_tree
+                        * go to the next or previous
+                        */
+                       goto out_free_stack;
+               case 'a':
+                       if (browser->selection->map == NULL &&
+                           browser->selection->map->dso->annotate_warned)
                                continue;
-                       default:;
-                       }
-                       if (is_exit_key(key)) {
-                               if (key == NEWT_KEY_ESCAPE &&
-                                   !ui__dialog_yesno("Do you really want to exit?"))
-                                       continue;
-                               break;
-                       }
-
-                       if (es.u.key == NEWT_KEY_LEFT) {
-                               const void *top;
+                       goto do_annotate;
+               case 'd':
+                       goto zoom_dso;
+               case 't':
+                       goto zoom_thread;
+               case NEWT_KEY_F1:
+               case 'h':
+               case '?':
+                       ui__help_window("->        Zoom into DSO/Threads & Annotate current symbol\n"
+                                       "<-        Zoom out\n"
+                                       "a         Annotate current symbol\n"
+                                       "h/?/F1    Show this window\n"
+                                       "C         Collapse all callchains\n"
+                                       "E         Expand all callchains\n"
+                                       "d         Zoom into current DSO\n"
+                                       "t         Zoom into current Thread\n"
+                                       "q/CTRL+C  Exit browser");
+                       continue;
+               case NEWT_KEY_ENTER:
+               case NEWT_KEY_RIGHT:
+                       /* menu */
+                       break;
+               case NEWT_KEY_LEFT: {
+                       const void *top;
 
-                               if (pstack__empty(fstack))
-                                       continue;
-                               top = pstack__pop(fstack);
-                               if (top == &dso_filter)
-                                       goto zoom_out_dso;
-                               if (top == &thread_filter)
-                                       goto zoom_out_thread;
+                       if (pstack__empty(fstack))
                                continue;
-                       }
+                       top = pstack__pop(fstack);
+                       if (top == &dso_filter)
+                               goto zoom_out_dso;
+                       if (top == &thread_filter)
+                               goto zoom_out_thread;
+                       continue;
+               }
+               case NEWT_KEY_ESCAPE:
+                       if (!ui__dialog_yesno("Do you really want to exit?"))
+                               continue;
+                       /* Fall thru */
+               default:
+                       goto out_free_stack;
                }
 
                if (browser->selection->sym != NULL &&
@@ -885,8 +954,8 @@ zoom_out_dso:
                                pstack__push(fstack, &dso_filter);
                        }
                        hists__filter_by_dso(self, dso_filter);
-                       hist_browser__title(msg, sizeof(msg), ev_name,
-                                           dso_filter, thread_filter);
+                       hists__browser_title(self, msg, sizeof(msg), ev_name,
+                                            dso_filter, thread_filter);
                        hist_browser__reset(browser);
                } else if (choice == zoom_thread) {
 zoom_thread:
@@ -903,8 +972,8 @@ zoom_out_thread:
                                pstack__push(fstack, &thread_filter);
                        }
                        hists__filter_by_thread(self, thread_filter);
-                       hist_browser__title(msg, sizeof(msg), ev_name,
-                                           dso_filter, thread_filter);
+                       hists__browser_title(self, msg, sizeof(msg), ev_name,
+                                            dso_filter, thread_filter);
                        hist_browser__reset(browser);
                }
        }
@@ -925,10 +994,6 @@ int hists__tui_browse_tree(struct rb_root *self, const char *help)
                const char *ev_name = __event_name(hists->type, hists->config);
 
                key = hists__browse(hists, help, ev_name);
-
-               if (is_exit_key(key))
-                       break;
-
                switch (key) {
                case NEWT_KEY_TAB:
                        next = rb_next(nd);
@@ -940,7 +1005,7 @@ int hists__tui_browse_tree(struct rb_root *self, const char *help)
                                continue;
                        nd = rb_prev(nd);
                default:
-                       break;
+                       return key;
                }
        }
 
index 142b825b42bf41d90ee013f51aaac5e79df44f5e..e35437dfa5b48aea8a0fb0237b2bf7ed7aaf90cd 100644 (file)
@@ -1,6 +1,5 @@
 #include "../libslang.h"
 #include <elf.h>
-#include <newt.h>
 #include <sys/ttydefaults.h>
 #include <ctype.h>
 #include <string.h>
@@ -47,7 +46,6 @@ out_free_form:
 struct map_browser {
        struct ui_browser b;
        struct map        *map;
-       u16               namelen;
        u8                addrlen;
 };
 
@@ -56,14 +54,16 @@ static void map_browser__write(struct ui_browser *self, void *nd, int row)
        struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
        struct map_browser *mb = container_of(self, struct map_browser, b);
        bool current_entry = ui_browser__is_current_entry(self, row);
-       int color = ui_browser__percent_color(0, current_entry);
+       int width;
 
-       SLsmg_set_color(color);
+       ui_browser__set_percent_color(self, 0, current_entry);
        slsmg_printf("%*llx %*llx %c ",
                     mb->addrlen, sym->start, mb->addrlen, sym->end,
                     sym->binding == STB_GLOBAL ? 'g' :
                     sym->binding == STB_LOCAL  ? 'l' : 'w');
-       slsmg_write_nstring(sym->name, mb->namelen);
+       width = self->width - ((mb->addrlen * 2) + 4);
+       if (width > 0)
+               slsmg_write_nstring(sym->name, width);
 }
 
 /* FIXME uber-kludgy, see comment on cmd_report... */
@@ -98,31 +98,29 @@ static int map_browser__search(struct map_browser *self)
        return 0;
 }
 
-static int map_browser__run(struct map_browser *self, struct newtExitStruct *es)
+static int map_browser__run(struct map_browser *self)
 {
+       int key;
+
        if (ui_browser__show(&self->b, self->map->dso->long_name,
                             "Press <- or ESC to exit, %s / to search",
                             verbose ? "" : "restart with -v to use") < 0)
                return -1;
 
-       newtFormAddHotKey(self->b.form, NEWT_KEY_LEFT);
-       newtFormAddHotKey(self->b.form, NEWT_KEY_ENTER);
        if (verbose)
-               newtFormAddHotKey(self->b.form, '/');
+               ui_browser__add_exit_key(&self->b, '/');
 
        while (1) {
-               ui_browser__run(&self->b, es);
+               key = ui_browser__run(&self->b);
 
-               if (es->reason != NEWT_EXIT_HOTKEY)
-                       break;
-               if (verbose && es->u.key == '/')
+               if (verbose && key == '/')
                        map_browser__search(self);
                else
                        break;
        }
 
        ui_browser__hide(&self->b);
-       return 0;
+       return key;
 }
 
 int map__browse(struct map *self)
@@ -136,7 +134,6 @@ int map__browse(struct map *self)
                },
                .map = self,
        };
-       struct newtExitStruct es;
        struct rb_node *nd;
        char tmp[BITS_PER_LONG / 4];
        u64 maxaddr = 0;
@@ -144,8 +141,6 @@ int map__browse(struct map *self)
        for (nd = rb_first(mb.b.entries); nd; nd = rb_next(nd)) {
                struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
 
-               if (mb.namelen < pos->namelen)
-                       mb.namelen = pos->namelen;
                if (maxaddr < pos->end)
                        maxaddr = pos->end;
                if (verbose) {
@@ -156,6 +151,5 @@ int map__browse(struct map *self)
        }
 
        mb.addrlen = snprintf(tmp, sizeof(tmp), "%llx", maxaddr);
-       mb.b.width += mb.addrlen * 2 + 4 + mb.namelen;
-       return map_browser__run(&mb, &es);
+       return map_browser__run(&mb);
 }
index 04600e26ceea21d08b701570494c5e72210aaa9e..9706d9d40279859321412b270c8ac3053141f4f9 100644 (file)
@@ -11,8 +11,6 @@
 #include "helpline.h"
 #include "util.h"
 
-newtComponent newt_form__new(void);
-
 static void newt_form__set_exit_keys(newtComponent self)
 {
        newtFormAddHotKey(self, NEWT_KEY_LEFT);
@@ -22,7 +20,7 @@ static void newt_form__set_exit_keys(newtComponent self)
        newtFormAddHotKey(self, CTRL('c'));
 }
 
-newtComponent newt_form__new(void)
+static newtComponent newt_form__new(void)
 {
        newtComponent self = newtForm(NULL, NULL, 0);
        if (self)
index f380fed74359034a843756256d6e7a79b0ff22b5..7562707ddd1c491755dc8ea5121637918ba1b844 100644 (file)
@@ -266,19 +266,6 @@ bool strglobmatch(const char *str, const char *pat);
 bool strlazymatch(const char *str, const char *pat);
 unsigned long convert_unit(unsigned long value, char *unit);
 
-#ifndef ESC
-#define ESC 27
-#endif
-
-static inline bool is_exit_key(int key)
-{
-       char up;
-       if (key == CTRL('c') || key == ESC)
-               return true;
-       up = toupper(key);
-       return up == 'Q';
-}
-
 #define _STR(x) #x
 #define STR(x) _STR(x)
 
index 66cf65b510b11c1c245db3b6a435e040ac3606fc..c1f1e3c6298462f8ed4d672257dac3b4da081b33 100644 (file)
@@ -218,7 +218,6 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
        events = file->f_op->poll(file, &irqfd->pt);
 
        list_add_tail(&irqfd->list, &kvm->irqfds.items);
-       spin_unlock_irq(&kvm->irqfds.lock);
 
        /*
         * Check if there was an event already pending on the eventfd
@@ -227,6 +226,8 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
        if (events & POLLIN)
                schedule_work(&irqfd->inject);
 
+       spin_unlock_irq(&kvm->irqfds.lock);
+
        /*
         * do not drop the file until the irqfd is fully initialized, otherwise
         * we might race against the POLLHUP
index b78b794c1039df394b6ccfc43ae34fb2ff6fc47a..5186e728c53ed7e27ba6d32df31d37f7aa140ebc 100644 (file)
@@ -1958,10 +1958,10 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
                       cpu);
                hardware_disable(NULL);
                break;
-       case CPU_ONLINE:
+       case CPU_STARTING:
                printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
                       cpu);
-               smp_call_function_single(cpu, hardware_enable, NULL, 1);
+               hardware_enable(NULL);
                break;
        }
        return NOTIFY_OK;
@@ -1970,10 +1970,12 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
 
 asmlinkage void kvm_handle_fault_on_reboot(void)
 {
-       if (kvm_rebooting)
+       if (kvm_rebooting) {
                /* spin while reset goes on */
+               local_irq_enable();
                while (true)
                        ;
+       }
        /* Fault while not rebooting.  We want the trace. */
        BUG();
 }
@@ -2096,7 +2098,6 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
 
 static struct notifier_block kvm_cpu_notifier = {
        .notifier_call = kvm_cpu_hotplug,
-       .priority = 20, /* must be > scheduler priority */
 };
 
 static int vm_stat_get(void *_offset, u64 *val)