]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
authorDavid S. Miller <davem@davemloft.net>
Thu, 7 Oct 2010 02:11:17 +0000 (19:11 -0700)
committerDavid S. Miller <davem@davemloft.net>
Thu, 7 Oct 2010 02:11:17 +0000 (19:11 -0700)
723 files changed:
CREDITS
Documentation/DocBook/device-drivers.tmpl
Documentation/DocBook/kernel-api.tmpl
Documentation/DocBook/kernel-locking.tmpl
Documentation/block/cfq-iosched.txt [new file with mode: 0644]
Documentation/cgroups/blkio-controller.txt
Documentation/gpio.txt
Documentation/hwmon/sysfs-interface
Documentation/kernel-doc-nano-HOWTO.txt
Documentation/mutex-design.txt
Documentation/power/regulator/overview.txt
Documentation/sound/alsa/HD-Audio-Models.txt
Documentation/workqueue.txt [new file with mode: 0644]
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/include/asm/cacheflush.h
arch/alpha/include/asm/unistd.h
arch/alpha/kernel/entry.S
arch/alpha/kernel/err_ev6.c
arch/alpha/kernel/err_marvel.c
arch/alpha/kernel/err_titan.c
arch/alpha/kernel/osf_sys.c
arch/alpha/kernel/pci-sysfs.c
arch/alpha/kernel/process.c
arch/alpha/kernel/signal.c
arch/alpha/kernel/srm_env.c
arch/alpha/kernel/systbls.S
arch/alpha/kernel/time.c
arch/alpha/kernel/traps.c
arch/arm/Kconfig
arch/arm/boot/Makefile
arch/arm/boot/compressed/Makefile
arch/arm/boot/compressed/head.S
arch/arm/common/it8152.c
arch/arm/include/asm/dma-mapping.h
arch/arm/include/asm/perf_event.h
arch/arm/include/asm/pgtable.h
arch/arm/include/asm/unistd.h
arch/arm/kernel/calls.S
arch/arm/kernel/entry-common.S
arch/arm/kernel/perf_event.c
arch/arm/mach-at91/at91sam9g45.c
arch/arm/mach-at91/at91sam9g45_devices.c
arch/arm/mach-at91/board-sam9261ek.c
arch/arm/mach-at91/clock.c
arch/arm/mach-davinci/dm355.c
arch/arm/mach-davinci/dm365.c
arch/arm/mach-davinci/dm644x.c
arch/arm/mach-davinci/dm646x.c
arch/arm/mach-dove/include/mach/io.h
arch/arm/mach-ep93xx/clock.c
arch/arm/mach-ixp4xx/common-pci.c
arch/arm/mach-ixp4xx/include/mach/hardware.h
arch/arm/mach-kirkwood/include/mach/kirkwood.h
arch/arm/mach-kirkwood/pcie.c
arch/arm/mach-mmp/include/mach/system.h
arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c
arch/arm/mach-mx25/mach-cpuimx25.c
arch/arm/mach-mx3/clock-imx35.c
arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c
arch/arm/mach-mx3/mach-cpuimx35.c
arch/arm/mach-mx5/clock-mx51.c
arch/arm/mach-pxa/cpufreq-pxa2xx.c
arch/arm/mach-pxa/cpufreq-pxa3xx.c
arch/arm/mach-pxa/include/mach/hardware.h
arch/arm/mach-pxa/include/mach/io.h
arch/arm/mach-pxa/include/mach/mfp-pxa300.h
arch/arm/mach-pxa/palm27x.c
arch/arm/mach-pxa/vpac270.c
arch/arm/mach-s3c64xx/dev-spi.c
arch/arm/mach-s3c64xx/mach-real6410.c
arch/arm/mach-s5pv210/clock.c
arch/arm/mach-s5pv210/cpu.c
arch/arm/mach-shmobile/Makefile
arch/arm/mach-shmobile/board-ap4evb.c
arch/arm/mach-shmobile/clock-sh7372.c
arch/arm/mach-shmobile/clock.c
arch/arm/mach-shmobile/pm_runtime.c [new file with mode: 0644]
arch/arm/mach-u300/include/mach/gpio.h
arch/arm/mach-vexpress/ct-ca9x4.c
arch/arm/mm/Kconfig
arch/arm/mm/alignment.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/mmu.c
arch/arm/mm/proc-v7.S
arch/arm/oprofile/common.c
arch/arm/plat-mxc/Kconfig
arch/arm/plat-mxc/include/mach/eukrea-baseboards.h
arch/arm/plat-mxc/tzic.c
arch/arm/plat-nomadik/timer.c
arch/arm/plat-omap/Kconfig
arch/arm/plat-omap/mcbsp.c
arch/arm/plat-omap/sram.c
arch/arm/plat-pxa/pwm.c
arch/arm/plat-s5p/dev-fimc0.c
arch/arm/plat-s5p/dev-fimc1.c
arch/arm/plat-s5p/dev-fimc2.c
arch/arm/plat-samsung/gpio-config.c
arch/arm/plat-samsung/include/plat/gpio-cfg.h
arch/arm/tools/mach-types
arch/avr32/kernel/module.c
arch/frv/kernel/signal.c
arch/h8300/kernel/module.c
arch/ia64/include/asm/compat.h
arch/ia64/kernel/fsys.S
arch/m32r/include/asm/signal.h
arch/m32r/include/asm/unistd.h
arch/m32r/kernel/entry.S
arch/m32r/kernel/ptrace.c
arch/m32r/kernel/signal.c
arch/m68k/include/asm/unistd.h
arch/m68k/kernel/entry.S
arch/m68k/mac/macboing.c
arch/m68knommu/kernel/syscalltable.S
arch/mips/Kconfig
arch/mips/alchemy/common/prom.c
arch/mips/boot/compressed/Makefile
arch/mips/cavium-octeon/Kconfig
arch/mips/cavium-octeon/cpu.c
arch/mips/cavium-octeon/executive/Makefile
arch/mips/include/asm/atomic.h
arch/mips/include/asm/compat.h
arch/mips/include/asm/cop2.h
arch/mips/include/asm/gic.h
arch/mips/include/asm/mach-tx49xx/kmalloc.h
arch/mips/include/asm/mips-boards/maltaint.h
arch/mips/include/asm/page.h
arch/mips/include/asm/thread_info.h
arch/mips/include/asm/unistd.h
arch/mips/kernel/irq-gic.c
arch/mips/kernel/kgdb.c
arch/mips/kernel/kspd.c
arch/mips/kernel/linux32.c
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/scall64-o32.S
arch/mips/mm/dma-default.c
arch/mips/mm/sc-rm7k.c
arch/mips/mti-malta/malta-int.c
arch/mips/pci/pci-rc32434.c
arch/mips/pnx8550/common/reset.c
arch/mips/pnx8550/common/setup.c
arch/mn10300/Kconfig
arch/mn10300/Kconfig.debug
arch/mn10300/include/asm/bitops.h
arch/mn10300/include/asm/signal.h
arch/mn10300/kernel/mn10300-serial.c
arch/mn10300/kernel/module.c
arch/mn10300/kernel/signal.c
arch/mn10300/mm/Makefile
arch/mn10300/mm/cache-disabled.c [new file with mode: 0644]
arch/mn10300/mm/cache.c
arch/parisc/include/asm/compat.h
arch/parisc/kernel/module.c
arch/powerpc/include/asm/compat.h
arch/powerpc/include/asm/fsldma.h
arch/powerpc/kernel/module.c
arch/powerpc/kernel/signal.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/platforms/512x/clock.c
arch/powerpc/platforms/52xx/efika.c
arch/powerpc/platforms/52xx/mpc52xx_common.c
arch/s390/include/asm/compat.h
arch/s390/kernel/module.c
arch/sh/kernel/module.c
arch/sparc/include/asm/compat.h
arch/sparc/kernel/perf_event.c
arch/sparc/kernel/signal32.c
arch/sparc/kernel/signal_32.c
arch/sparc/kernel/signal_64.c
arch/sparc/kernel/sys_sparc_32.c
arch/sparc/kernel/unaligned_32.c
arch/sparc/kernel/windows.c
arch/tile/include/arch/chip_tile64.h
arch/tile/include/arch/chip_tilepro.h
arch/tile/include/asm/compat.h
arch/tile/include/asm/io.h
arch/tile/include/asm/processor.h
arch/tile/include/asm/ptrace.h
arch/tile/include/asm/sigcontext.h
arch/tile/include/asm/signal.h
arch/tile/include/asm/syscalls.h
arch/tile/kernel/intvec_32.S
arch/tile/kernel/process.c
arch/tile/kernel/signal.c
arch/tile/kernel/stack.c
arch/um/kernel/exec.c
arch/um/kernel/internal.h
arch/um/kernel/syscall.c
arch/x86/Makefile
arch/x86/boot/early_serial_console.c
arch/x86/ia32/ia32entry.S
arch/x86/include/asm/amd_iommu_proto.h
arch/x86/include/asm/amd_iommu_types.h
arch/x86/include/asm/bitops.h
arch/x86/include/asm/compat.h
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/hpet.h
arch/x86/include/asm/hw_breakpoint.h
arch/x86/include/asm/iomap.h
arch/x86/include/asm/kvm_emulate.h
arch/x86/kernel/Makefile
arch/x86/kernel/acpi/cstate.c
arch/x86/kernel/amd_iommu.c
arch/x86/kernel/amd_iommu_init.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/cpu.h
arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/mcheck/mce_amd.c
arch/x86/kernel/cpu/mcheck/therm_throt.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_p4.c
arch/x86/kernel/cpu/scattered.c
arch/x86/kernel/early-quirks.c
arch/x86/kernel/hpet.c
arch/x86/kernel/hw_breakpoint.c
arch/x86/kernel/module.c
arch/x86/kernel/trampoline.c
arch/x86/kernel/tsc.c
arch/x86/kvm/emulate.c
arch/x86/kvm/i8259.c
arch/x86/kvm/irq.h
arch/x86/lguest/boot.c
arch/x86/mm/iomap_32.c
arch/x86/oprofile/nmi_int.c
arch/x86/xen/time.c
block/blk-cgroup.c
block/blk-core.c
block/blk-map.c
block/blk-merge.c
block/blk-sysfs.c
block/blk.h
block/cfq-iosched.c
block/elevator.c
drivers/Makefile
drivers/acpi/Kconfig
drivers/acpi/acpi_pad.c
drivers/acpi/acpica/aclocal.h
drivers/acpi/acpica/exutils.c
drivers/acpi/acpica/rsutils.c
drivers/acpi/apei/Kconfig
drivers/acpi/apei/apei-base.c
drivers/acpi/apei/einj.c
drivers/acpi/apei/erst-dbg.c
drivers/acpi/apei/erst.c
drivers/acpi/apei/ghes.c
drivers/acpi/apei/hest.c
drivers/acpi/atomicio.c
drivers/acpi/battery.c
drivers/acpi/blacklist.c
drivers/acpi/bus.c
drivers/acpi/fan.c
drivers/acpi/processor_core.c
drivers/acpi/processor_driver.c
drivers/acpi/processor_perflib.c
drivers/acpi/sleep.c
drivers/acpi/sysfs.c
drivers/acpi/video_detect.c
drivers/ata/ahci.c
drivers/ata/ahci.h
drivers/ata/ahci_platform.c
drivers/ata/ata_piix.c
drivers/ata/libahci.c
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/ata/libata-sff.c
drivers/ata/pata_artop.c
drivers/ata/pata_via.c
drivers/ata/sata_mv.c
drivers/base/power/main.c
drivers/block/cciss.c
drivers/block/loop.c
drivers/block/mg_disk.c
drivers/block/pktcdvd.c
drivers/char/agp/intel-agp.c
drivers/char/agp/intel-agp.h
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/mem.c
drivers/char/virtio_console.c
drivers/char/vt_ioctl.c
drivers/cpuidle/governors/menu.c
drivers/dma/mv_xor.c
drivers/dma/shdma.c
drivers/edac/edac_mc.c
drivers/edac/i7core_edac.c
drivers/firewire/ohci.c
drivers/gpio/sx150x.c
drivers/gpu/drm/drm_buffer.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_info.c
drivers/gpu/drm/drm_pci.c
drivers/gpu/drm/drm_platform.c
drivers/gpu/drm/drm_sysfs.c
drivers/gpu/drm/drm_vm.c
drivers/gpu/drm/i810/i810_dma.c
drivers/gpu/drm/i830/i830_dma.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dvo.c
drivers/gpu/drm/i915/intel_fb.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/nouveau/nouveau_notifier.c
drivers/gpu/drm/radeon/atombios.h
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_blit_kms.c
drivers/gpu/drm/radeon/r600_blit_shaders.h
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_combios.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_fb.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_page_alloc.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/vga/vgaarb.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-mosart.c
drivers/hid/hid-topseed.c
drivers/hid/usbhid/hid-core.c
drivers/hid/usbhid/hid-quirks.c
drivers/hid/usbhid/hiddev.c
drivers/hid/usbhid/usbhid.h
drivers/hwmon/Kconfig
drivers/hwmon/adm1031.c
drivers/hwmon/coretemp.c
drivers/hwmon/emc1403.c
drivers/hwmon/f71882fg.c
drivers/hwmon/f75375s.c
drivers/hwmon/hp_accel.c
drivers/hwmon/lis3lv02d.c
drivers/hwmon/lis3lv02d_i2c.c
drivers/hwmon/lis3lv02d_spi.c
drivers/hwmon/lm95241.c
drivers/hwmon/pkgtemp.c
drivers/hwmon/w83627ehf.c
drivers/i2c/busses/i2c-davinci.c
drivers/i2c/busses/i2c-octeon.c
drivers/i2c/busses/i2c-omap.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/ide/ide-probe.c
drivers/idle/intel_idle.c [changed mode: 0755->0644]
drivers/infiniband/hw/cxgb3/cxio_hal.h
drivers/infiniband/hw/cxgb3/iwch_cm.c
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/nes/nes_hw.c
drivers/infiniband/hw/nes/nes_hw.h
drivers/infiniband/hw/nes/nes_nic.c
drivers/input/input.c
drivers/input/mouse/bcm5974.c
drivers/input/serio/i8042.c
drivers/input/tablet/wacom_wac.c
drivers/leds/leds-ns2.c
drivers/md/md.c
drivers/mfd/max8925-core.c
drivers/mfd/wm831x-irq.c
drivers/misc/Kconfig
drivers/misc/Makefile
drivers/misc/vmw_balloon.c [moved from drivers/misc/vmware_balloon.c with 100% similarity]
drivers/mmc/core/sdio.c
drivers/mmc/host/at91_mci.c
drivers/mmc/host/imxmmc.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/s3cmci.c
drivers/mmc/host/sdhci-s3c.c
drivers/mmc/host/tmio_mmc.c
drivers/mmc/host/tmio_mmc.h
drivers/mtd/nand/bf5xx_nand.c
drivers/mtd/nand/mxc_nand.c
drivers/mtd/nand/omap2.c
drivers/mtd/nand/pxa3xx_nand.c
drivers/mtd/onenand/samsung.c
drivers/net/ll_temac_main.c
drivers/net/ll_temac_mdio.c
drivers/net/pcmcia/pcnet_cs.c
drivers/oprofile/buffer_sync.c
drivers/oprofile/cpu_buffer.c
drivers/pci/intel-iommu.c
drivers/pci/iov.c
drivers/pci/pci.h
drivers/pci/quirks.c
drivers/pcmcia/pcmcia_resource.c
drivers/pcmcia/pd6729.c
drivers/platform/x86/thinkpad_acpi.c
drivers/power/apm_power.c
drivers/power/intel_mid_battery.c
drivers/regulator/88pm8607.c
drivers/regulator/ab3100.c
drivers/regulator/ab8500.c
drivers/regulator/ad5398.c
drivers/regulator/core.c
drivers/regulator/isl6271a-regulator.c
drivers/regulator/max1586.c
drivers/regulator/max8649.c
drivers/regulator/max8998.c
drivers/regulator/tps6507x-regulator.c
drivers/regulator/tps6586x-regulator.c
drivers/regulator/wm831x-ldo.c
drivers/regulator/wm8350-regulator.c
drivers/rtc/rtc-ab3100.c
drivers/rtc/rtc-bfin.c
drivers/rtc/rtc-m41t80.c
drivers/rtc/rtc-pl031.c
drivers/rtc/rtc-s3c.c
drivers/s390/char/tape_block.c
drivers/scsi/be2iscsi/be_iscsi.c
drivers/scsi/be2iscsi/be_mgmt.c
drivers/scsi/constants.c
drivers/scsi/hpsa.c
drivers/scsi/osd/osd_initiator.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_dbg.h
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_mid.c
drivers/scsi/qla2xxx/qla_nx.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_version.h
drivers/scsi/scsi_lib.c
drivers/scsi/sd.c
drivers/scsi/sym53c8xx_2/sym_hipd.c
drivers/serial/amba-pl010.c
drivers/serial/mfd.c
drivers/serial/mpc52xx_uart.c
drivers/serial/mrst_max3110.c
drivers/serial/serial_cs.c
drivers/spi/amba-pl022.c
drivers/spi/dw_spi.c
drivers/spi/spi.c
drivers/spi/spi_gpio.c
drivers/spi/spi_mpc8xxx.c
drivers/spi/spi_s3c64xx.c
drivers/staging/batman-adv/hard-interface.c
drivers/staging/batman-adv/send.c
drivers/staging/ti-st/st.h
drivers/staging/ti-st/st_core.c
drivers/staging/ti-st/st_core.h
drivers/staging/ti-st/st_kim.c
drivers/staging/vt6655/wpactl.c
drivers/usb/core/Kconfig
drivers/usb/core/file.c
drivers/usb/core/message.c
drivers/usb/host/ehci-pci.c
drivers/usb/musb/cppi_dma.c
drivers/usb/musb/musb_debugfs.c
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/musb_gadget.h
drivers/usb/musb/musb_gadget_ep0.c
drivers/usb/musb/musb_host.c
drivers/usb/otg/twl4030-usb.c
drivers/usb/serial/mos7720.c
drivers/usb/serial/mos7840.c
drivers/video/console/fbcon.c
drivers/video/efifb.c
drivers/video/pxa168fb.c
drivers/video/sis/sis_main.c
drivers/video/via/ioctl.c
drivers/watchdog/Kconfig
drivers/watchdog/sb_wdog.c
drivers/watchdog/ts72xx_wdt.c
drivers/xen/xenbus/xenbus_probe.c
fs/9p/vfs_dir.c
fs/9p/vfs_inode.c
fs/9p/vfs_super.c
fs/aio.c
fs/binfmt_misc.c
fs/bio-integrity.c
fs/ceph/Kconfig
fs/ceph/addr.c
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/inode.c
fs/ceph/mds_client.c
fs/ceph/pagelist.c
fs/ceph/snap.c
fs/ceph/super.h
fs/char_dev.c
fs/cifs/Kconfig
fs/cifs/asn1.c
fs/cifs/cifsencrypt.c
fs/cifs/cifsglob.h
fs/cifs/cifspdu.h
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/inode.c
fs/cifs/netmisc.c
fs/cifs/ntlmssp.h
fs/cifs/sess.c
fs/cifs/transport.c
fs/coda/psdev.c
fs/compat.c
fs/direct-io.c
fs/exec.c
fs/fcntl.c
fs/fs-writeback.c
fs/fuse/dev.c
fs/fuse/file.c
fs/gfs2/log.c
fs/minix/namei.c
fs/nfs/Kconfig
fs/nfs/client.c
fs/nfs/file.c
fs/nfs/super.c
fs/nfsd/Kconfig
fs/nfsd/nfs4state.c
fs/ocfs2/acl.c
fs/ocfs2/alloc.c
fs/ocfs2/blockcheck.c
fs/ocfs2/cluster/tcp.c
fs/ocfs2/dir.c
fs/ocfs2/dlm/dlmcommon.h
fs/ocfs2/dlm/dlmdebug.c
fs/ocfs2/dlm/dlmdomain.c
fs/ocfs2/dlm/dlmmaster.c
fs/ocfs2/dlmglue.h
fs/ocfs2/file.c
fs/ocfs2/inode.c
fs/ocfs2/mmap.c
fs/ocfs2/namei.c
fs/ocfs2/ocfs2_fs.h
fs/ocfs2/ocfs2_ioctl.h
fs/ocfs2/refcounttree.c
fs/ocfs2/reservations.c
fs/ocfs2/suballoc.c
fs/ocfs2/suballoc.h
fs/ocfs2/symlink.c
fs/ocfs2/xattr.c
fs/proc/base.c
fs/proc/page.c
fs/proc/task_mmu.c
fs/proc/vmcore.c
fs/reiserfs/ioctl.c
fs/xfs/linux-2.6/xfs_buf.c
fs/xfs/linux-2.6/xfs_ioctl.c
fs/xfs/xfs_log_cil.c
fs/xfs/xfs_log_priv.h
include/acpi/acpixf.h
include/asm-generic/gpio.h
include/drm/drmP.h
include/drm/drm_crtc.h
include/drm/drm_pciids.h
include/linux/cgroup.h
include/linux/compat.h
include/linux/cpuidle.h
include/linux/dma-mapping.h
include/linux/dmaengine.h
include/linux/elevator.h
include/linux/fs.h
include/linux/gpio.h
include/linux/i2c/sx150x.h
include/linux/io-mapping.h
include/linux/kfifo.h
include/linux/ksm.h
include/linux/lglock.h
include/linux/libata.h
include/linux/mm.h
include/linux/mmc/sdio.h
include/linux/mmzone.h
include/linux/module.h
include/linux/mutex.h
include/linux/pci_ids.h
include/linux/quotaops.h
include/linux/rcupdate.h
include/linux/semaphore.h
include/linux/spi/dw_spi.h
include/linux/sunrpc/clnt.h
include/linux/swap.h
include/linux/vmstat.h
include/linux/wait.h
include/linux/workqueue.h
ipc/sem.c
kernel/cgroup.c
kernel/compat.c
kernel/debug/kdb/kdb_bp.c
kernel/fork.c
kernel/gcov/fs.c
kernel/groups.c
kernel/hrtimer.c
kernel/hw_breakpoint.c
kernel/kfifo.c
kernel/module.c
kernel/mutex.c
kernel/perf_event.c
kernel/pm_qos_params.c
kernel/power/hibernate.c
kernel/power/snapshot.c
kernel/power/swap.c
kernel/sched.c
kernel/sched_fair.c
kernel/smp.c
kernel/sys.c
kernel/sysctl.c
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace_event_perf.c
kernel/trace/trace_kprobe.c
kernel/watchdog.c
kernel/workqueue.c
lib/bug.c
lib/list_sort.c
lib/scatterlist.c
mm/Kconfig
mm/backing-dev.c
mm/bounce.c
mm/compaction.c
mm/fremap.c
mm/hugetlb.c
mm/ksm.c
mm/memory.c
mm/memory_hotplug.c
mm/mlock.c
mm/mmap.c
mm/mmzone.c
mm/oom_kill.c
mm/page_alloc.c
mm/percpu.c
mm/rmap.c
mm/swapfile.c
mm/vmscan.c
mm/vmstat.c
net/9p/client.c
net/9p/trans_rdma.c
net/sunrpc/auth.c
net/sunrpc/auth_gss/auth_gss.c
net/sunrpc/auth_gss/gss_krb5_mech.c
net/sunrpc/auth_gss/gss_spkm3_mech.c
net/sunrpc/clnt.c
net/sunrpc/rpc_pipe.c
samples/kfifo/dma-example.c
scripts/basic/docproc.c
scripts/kernel-doc
security/apparmor/include/resource.h
security/apparmor/lib.c
security/apparmor/lsm.c
security/apparmor/path.c
security/apparmor/policy.c
security/apparmor/resource.c
security/integrity/ima/ima.h
security/integrity/ima/ima_iint.c
security/integrity/ima/ima_main.c
security/keys/keyctl.c
security/tomoyo/common.c
security/tomoyo/common.h
sound/core/control.c
sound/core/pcm.c
sound/core/pcm_native.c
sound/core/rawmidi.c
sound/core/seq/oss/seq_oss_init.c
sound/i2c/other/ak4xxx-adda.c
sound/isa/msnd/msnd_pinnacle.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_analog.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_nvhdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/oxygen/oxygen.c
sound/pci/oxygen/oxygen.h
sound/pci/oxygen/oxygen_lib.c
sound/pci/oxygen/virtuoso.c
sound/pci/oxygen/xonar_wm87x6.c
sound/pci/rme9652/hdsp.c
sound/pci/rme9652/hdspm.c
sound/ppc/snd_ps3.c
sound/soc/s3c24xx/s3c-dma.c
sound/soc/sh/migor.c
sound/soc/soc-cache.c
sound/usb/card.c
sound/usb/clock.c
sound/usb/endpoint.c
sound/usb/format.c
sound/usb/mixer.c
sound/usb/pcm.c
tools/perf/Makefile
tools/perf/util/callchain.h
tools/perf/util/probe-event.c
tools/perf/util/probe-finder.c
tools/perf/util/symbol.c
tools/perf/util/symbol.h
tools/perf/util/trace-event-scripting.c
tools/perf/util/ui/browsers/hists.c
virt/kvm/eventfd.c
virt/kvm/kvm_main.c

diff --git a/CREDITS b/CREDITS
index 72b487869788c14cd40e9535b700f4be166aa12b..41d8e63d5165b5b786db6ab7d8c14fbc49fc0107 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -3554,12 +3554,12 @@ E: cvance@nai.com
 D: portions of the Linux Security Module (LSM) framework and security modules
 
 N: Petr Vandrovec
-E: vandrove@vc.cvut.cz
+E: petr@vandrovec.name
 D: Small contributions to ncpfs
 D: Matrox framebuffer driver
-S: Chudenicka 8
-S: 10200 Prague 10, Hostivar
-S: Czech Republic
+S: 21513 Conradia Ct
+S: Cupertino, CA 95014
+S: USA
 
 N: Thibaut Varene
 E: T-Bone@parisc-linux.org
index ecd35e9d4410a2b8c28241331f4f96f5197b6c3a..feca0758391e145a8fb76e5a7059ce7f057e79ab 100644 (file)
@@ -46,7 +46,6 @@
 
      <sect1><title>Atomic and pointer manipulation</title>
 !Iarch/x86/include/asm/atomic.h
-!Iarch/x86/include/asm/unaligned.h
      </sect1>
 
      <sect1><title>Delaying, scheduling, and timer routines</title>
index a20c6f6fffc32aabb214c24ff9f53d477a265f69..6899f471fb152ebe7b405bcf48e4ff0b3f3aef93 100644 (file)
@@ -57,7 +57,6 @@
      </para>
 
      <sect1><title>String Conversions</title>
-!Ilib/vsprintf.c
 !Elib/vsprintf.c
      </sect1>
      <sect1><title>String Manipulation</title>
index 0b1a3f97f285361a4075c8e267d42b2053747d9a..a0d479d1e1dd872bd1ae7b4d17d4582df03a384c 100644 (file)
@@ -1961,6 +1961,12 @@ machines due to caching.
    </sect1>
   </chapter>
 
+  <chapter id="apiref">
+   <title>Mutex API reference</title>
+!Iinclude/linux/mutex.h
+!Ekernel/mutex.c
+  </chapter>
+
   <chapter id="references">
    <title>Further reading</title>
 
diff --git a/Documentation/block/cfq-iosched.txt b/Documentation/block/cfq-iosched.txt
new file mode 100644 (file)
index 0000000..e578fee
--- /dev/null
@@ -0,0 +1,45 @@
+CFQ ioscheduler tunables
+========================
+
+slice_idle
+----------
+This specifies how long CFQ should idle for next request on certain cfq queues
+(for sequential workloads) and service trees (for random workloads) before
+queue is expired and CFQ selects next queue to dispatch from.
+
+By default slice_idle is a non-zero value. That means by default we idle on
+queues/service trees. This can be very helpful on highly seeky media like
+single spindle SATA/SAS disks where we can cut down on overall number of
+seeks and see improved throughput.
+
+Setting slice_idle to 0 will remove all the idling on queues/service tree
+level and one should see an overall improved throughput on faster storage
+devices like multiple SATA/SAS disks in hardware RAID configuration. The down
+side is that isolation provided from WRITES also goes down and notion of
+IO priority becomes weaker.
+
+So depending on storage and workload, it might be useful to set slice_idle=0.
+In general I think for SATA/SAS disks and software RAID of SATA/SAS disks
+keeping slice_idle enabled should be useful. For any configurations where
+there are multiple spindles behind single LUN (Host based hardware RAID
+controller or for storage arrays), setting slice_idle=0 might end up in better
+throughput and acceptable latencies.
+
+CFQ IOPS Mode for group scheduling
+===================================
+Basic CFQ design is to provide priority based time slices. Higher priority
+process gets bigger time slice and lower priority process gets smaller time
+slice. Measuring time becomes harder if storage is fast and supports NCQ and
+it would be better to dispatch multiple requests from multiple cfq queues in
+request queue at a time. In such scenario, it is not possible to measure time
+consumed by single queue accurately.
+
+What is possible though is to measure number of requests dispatched from a
+single queue and also allow dispatch from multiple cfq queue at the same time.
+This effectively becomes the fairness in terms of IOPS (IO operations per
+second).
+
+If one sets slice_idle=0 and if storage supports NCQ, CFQ internally switches
+to IOPS mode and starts providing fairness in terms of number of requests
+dispatched. Note that this mode switching takes effect only for group
+scheduling. For non-cgroup users nothing should change.
index 48e0b21b00594dac1971472bef9105e04d2bfa77..6919d62591d97580d3132f539e0edd50d9985449 100644 (file)
@@ -217,6 +217,7 @@ Details of cgroup files
 CFQ sysfs tunable
 =================
 /sys/block/<disk>/queue/iosched/group_isolation
+-----------------------------------------------
 
 If group_isolation=1, it provides stronger isolation between groups at the
 expense of throughput. By default group_isolation is 0. In general that
@@ -243,6 +244,33 @@ By default one should run with group_isolation=0. If that is not sufficient
 and one wants stronger isolation between groups, then set group_isolation=1
 but this will come at cost of reduced throughput.
 
+/sys/block/<disk>/queue/iosched/slice_idle
+------------------------------------------
+On a faster hardware CFQ can be slow, especially with sequential workload.
+This happens because CFQ idles on a single queue and single queue might not
+drive deeper request queue depths to keep the storage busy. In such scenarios
+one can try setting slice_idle=0 and that would switch CFQ to IOPS
+(IO operations per second) mode on NCQ supporting hardware.
+
+That means CFQ will not idle between cfq queues of a cfq group and hence be
+able to driver higher queue depth and achieve better throughput. That also
+means that cfq provides fairness among groups in terms of IOPS and not in
+terms of disk time.
+
+/sys/block/<disk>/queue/iosched/group_idle
+------------------------------------------
+If one disables idling on individual cfq queues and cfq service trees by
+setting slice_idle=0, group_idle kicks in. That means CFQ will still idle
+on the group in an attempt to provide fairness among groups.
+
+By default group_idle is same as slice_idle and does not do anything if
+slice_idle is enabled.
+
+One can experience an overall throughput drop if you have created multiple
+groups and put applications in that group which are not driving enough
+IO to keep disk busy. In that case set group_idle=0, and CFQ will not idle
+on individual groups and throughput should improve.
+
 What works
 ==========
 - Currently only sync IO queues are support. All the buffered writes are
index d96a6dba57489bc6bbf3e747d82cd450084e5609..9633da01ff46afb008566ccb53aa381606654e85 100644 (file)
@@ -109,17 +109,19 @@ use numbers 2000-2063 to identify GPIOs in a bank of I2C GPIO expanders.
 
 If you want to initialize a structure with an invalid GPIO number, use
 some negative number (perhaps "-EINVAL"); that will never be valid.  To
-test if a number could reference a GPIO, you may use this predicate:
+test if such number from such a structure could reference a GPIO, you
+may use this predicate:
 
        int gpio_is_valid(int number);
 
 A number that's not valid will be rejected by calls which may request
 or free GPIOs (see below).  Other numbers may also be rejected; for
-example, a number might be valid but unused on a given board.
-
-Whether a platform supports multiple GPIO controllers is currently a
-platform-specific implementation issue.
+example, a number might be valid but temporarily unused on a given board.
 
+Whether a platform supports multiple GPIO controllers is a platform-specific
+implementation issue, as are whether that support can leave "holes" in the space
+of GPIO numbers, and whether new controllers can be added at runtime.  Such issues
+can affect things including whether adjacent GPIO numbers are both valid.
 
 Using GPIOs
 -----------
@@ -480,12 +482,16 @@ To support this framework, a platform's Kconfig will "select" either
 ARCH_REQUIRE_GPIOLIB or ARCH_WANT_OPTIONAL_GPIOLIB
 and arrange that its <asm/gpio.h> includes <asm-generic/gpio.h> and defines
 three functions: gpio_get_value(), gpio_set_value(), and gpio_cansleep().
-They may also want to provide a custom value for ARCH_NR_GPIOS.
 
-ARCH_REQUIRE_GPIOLIB means that the gpio-lib code will always get compiled
+It may also provide a custom value for ARCH_NR_GPIOS, so that it better
+reflects the number of GPIOs in actual use on that platform, without
+wasting static table space.  (It should count both built-in/SoC GPIOs and
+also ones on GPIO expanders.
+
+ARCH_REQUIRE_GPIOLIB means that the gpiolib code will always get compiled
 into the kernel on that architecture.
 
-ARCH_WANT_OPTIONAL_GPIOLIB means the gpio-lib code defaults to off and the user
+ARCH_WANT_OPTIONAL_GPIOLIB means the gpiolib code defaults to off and the user
 can enable it and build it into the kernel optionally.
 
 If neither of these options are selected, the platform does not support
index ff45d1f837c89ab6706726ae525083e22d05af16..48ceabedf55df87dddff2d62864f6f93db9d315b 100644 (file)
@@ -91,12 +91,11 @@ name                The chip name.
                I2C devices get this attribute created automatically.
                RO
 
-update_rate    The rate at which the chip will update readings.
+update_interval        The interval at which the chip will update readings.
                Unit: millisecond
                RW
-               Some devices have a variable update rate. This attribute
-               can be used to change the update rate to the desired
-               frequency.
+               Some devices have a variable update rate or interval.
+               This attribute can be used to change it to the desired value.
 
 
 ************
index 27a52b35d55bf1b6009551c97f400da5c14f59bc..3d8a97747f7731c801ca7d3a1483858feeb76b6c 100644 (file)
@@ -345,5 +345,10 @@ documentation, in <filename>, for the functions listed.
 section titled <section title> from <filename>.
 Spaces are allowed in <section title>; do not quote the <section title>.
 
+!C<filename> is replaced by nothing, but makes the tools check that
+all DOC: sections and documented functions, symbols, etc. are used.
+This makes sense to use when you use !F/!P only and want to verify
+that all documentation is included.
+
 Tim.
 */ <twaugh@redhat.com>
index c91ccc0720fa97f42a1a616fd83e23628ae85ab1..38c10fd7f4110448facd7089b985c4776d264d85 100644 (file)
@@ -9,7 +9,7 @@ firstly, there's nothing wrong with semaphores. But if the simpler
 mutex semantics are sufficient for your code, then there are a couple
 of advantages of mutexes:
 
- - 'struct mutex' is smaller on most architectures: .e.g on x86,
+ - 'struct mutex' is smaller on most architectures: E.g. on x86,
    'struct semaphore' is 20 bytes, 'struct mutex' is 16 bytes.
    A smaller structure size means less RAM footprint, and better
    CPU-cache utilization.
@@ -136,3 +136,4 @@ the APIs of 'struct mutex' have been streamlined:
  void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
  int  mutex_lock_interruptible_nested(struct mutex *lock,
                                       unsigned int subclass);
+ int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
index 9363e056188ac87282c2477e25df2eebe6e837fa..8ed17587a74bdc006b2d2922b5709f0e16ec08ad 100644 (file)
@@ -13,7 +13,7 @@ regulators (where voltage output is controllable) and current sinks (where
 current limit is controllable).
 
 (C) 2008  Wolfson Microelectronics PLC.
-Author: Liam Girdwood <lg@opensource.wolfsonmicro.com>
+Author: Liam Girdwood <lrg@slimlogic.co.uk>
 
 
 Nomenclature
index ce46fa1e643e876344071d480e1e6d6d54b89856..37c6aad5e590ac1375944b8edf0656aa7b106607 100644 (file)
@@ -296,6 +296,7 @@ Conexant 5051
 Conexant 5066
 =============
   laptop       Basic Laptop config (default)
+  hp-laptop    HP laptops, e g G60
   dell-laptop  Dell laptops
   dell-vostro  Dell Vostro
   olpc-xo-1_5  OLPC XO 1.5
diff --git a/Documentation/workqueue.txt b/Documentation/workqueue.txt
new file mode 100644 (file)
index 0000000..e4498a2
--- /dev/null
@@ -0,0 +1,380 @@
+
+Concurrency Managed Workqueue (cmwq)
+
+September, 2010                Tejun Heo <tj@kernel.org>
+                       Florian Mickler <florian@mickler.org>
+
+CONTENTS
+
+1. Introduction
+2. Why cmwq?
+3. The Design
+4. Application Programming Interface (API)
+5. Example Execution Scenarios
+6. Guidelines
+
+
+1. Introduction
+
+There are many cases where an asynchronous process execution context
+is needed and the workqueue (wq) API is the most commonly used
+mechanism for such cases.
+
+When such an asynchronous execution context is needed, a work item
+describing which function to execute is put on a queue.  An
+independent thread serves as the asynchronous execution context.  The
+queue is called workqueue and the thread is called worker.
+
+While there are work items on the workqueue the worker executes the
+functions associated with the work items one after the other.  When
+there is no work item left on the workqueue the worker becomes idle.
+When a new work item gets queued, the worker begins executing again.
+
+
+2. Why cmwq?
+
+In the original wq implementation, a multi threaded (MT) wq had one
+worker thread per CPU and a single threaded (ST) wq had one worker
+thread system-wide.  A single MT wq needed to keep around the same
+number of workers as the number of CPUs.  The kernel grew a lot of MT
+wq users over the years and with the number of CPU cores continuously
+rising, some systems saturated the default 32k PID space just booting
+up.
+
+Although MT wq wasted a lot of resource, the level of concurrency
+provided was unsatisfactory.  The limitation was common to both ST and
+MT wq albeit less severe on MT.  Each wq maintained its own separate
+worker pool.  A MT wq could provide only one execution context per CPU
+while a ST wq one for the whole system.  Work items had to compete for
+those very limited execution contexts leading to various problems
+including proneness to deadlocks around the single execution context.
+
+The tension between the provided level of concurrency and resource
+usage also forced its users to make unnecessary tradeoffs like libata
+choosing to use ST wq for polling PIOs and accepting an unnecessary
+limitation that no two polling PIOs can progress at the same time.  As
+MT wq don't provide much better concurrency, users which require
+higher level of concurrency, like async or fscache, had to implement
+their own thread pool.
+
+Concurrency Managed Workqueue (cmwq) is a reimplementation of wq with
+focus on the following goals.
+
+* Maintain compatibility with the original workqueue API.
+
+* Use per-CPU unified worker pools shared by all wq to provide
+  flexible level of concurrency on demand without wasting a lot of
+  resource.
+
+* Automatically regulate worker pool and level of concurrency so that
+  the API users don't need to worry about such details.
+
+
+3. The Design
+
+In order to ease the asynchronous execution of functions a new
+abstraction, the work item, is introduced.
+
+A work item is a simple struct that holds a pointer to the function
+that is to be executed asynchronously.  Whenever a driver or subsystem
+wants a function to be executed asynchronously it has to set up a work
+item pointing to that function and queue that work item on a
+workqueue.
+
+Special purpose threads, called worker threads, execute the functions
+off of the queue, one after the other.  If no work is queued, the
+worker threads become idle.  These worker threads are managed in so
+called thread-pools.
+
+The cmwq design differentiates between the user-facing workqueues that
+subsystems and drivers queue work items on and the backend mechanism
+which manages thread-pool and processes the queued work items.
+
+The backend is called gcwq.  There is one gcwq for each possible CPU
+and one gcwq to serve work items queued on unbound workqueues.
+
+Subsystems and drivers can create and queue work items through special
+workqueue API functions as they see fit. They can influence some
+aspects of the way the work items are executed by setting flags on the
+workqueue they are putting the work item on. These flags include
+things like CPU locality, reentrancy, concurrency limits and more. To
+get a detailed overview refer to the API description of
+alloc_workqueue() below.
+
+When a work item is queued to a workqueue, the target gcwq is
+determined according to the queue parameters and workqueue attributes
+and appended on the shared worklist of the gcwq.  For example, unless
+specifically overridden, a work item of a bound workqueue will be
+queued on the worklist of exactly that gcwq that is associated to the
+CPU the issuer is running on.
+
+For any worker pool implementation, managing the concurrency level
+(how many execution contexts are active) is an important issue.  cmwq
+tries to keep the concurrency at a minimal but sufficient level.
+Minimal to save resources and sufficient in that the system is used at
+its full capacity.
+
+Each gcwq bound to an actual CPU implements concurrency management by
+hooking into the scheduler.  The gcwq is notified whenever an active
+worker wakes up or sleeps and keeps track of the number of the
+currently runnable workers.  Generally, work items are not expected to
+hog a CPU and consume many cycles.  That means maintaining just enough
+concurrency to prevent work processing from stalling should be
+optimal.  As long as there are one or more runnable workers on the
+CPU, the gcwq doesn't start execution of a new work, but, when the
+last running worker goes to sleep, it immediately schedules a new
+worker so that the CPU doesn't sit idle while there are pending work
+items.  This allows using a minimal number of workers without losing
+execution bandwidth.
+
+Keeping idle workers around doesn't cost other than the memory space
+for kthreads, so cmwq holds onto idle ones for a while before killing
+them.
+
+For an unbound wq, the above concurrency management doesn't apply and
+the gcwq for the pseudo unbound CPU tries to start executing all work
+items as soon as possible.  The responsibility of regulating
+concurrency level is on the users.  There is also a flag to mark a
+bound wq to ignore the concurrency management.  Please refer to the
+API section for details.
+
+Forward progress guarantee relies on that workers can be created when
+more execution contexts are necessary, which in turn is guaranteed
+through the use of rescue workers.  All work items which might be used
+on code paths that handle memory reclaim are required to be queued on
+wq's that have a rescue-worker reserved for execution under memory
+pressure.  Else it is possible that the thread-pool deadlocks waiting
+for execution contexts to free up.
+
+
+4. Application Programming Interface (API)
+
+alloc_workqueue() allocates a wq.  The original create_*workqueue()
+functions are deprecated and scheduled for removal.  alloc_workqueue()
+takes three arguments - @name, @flags and @max_active.  @name is the
+name of the wq and also used as the name of the rescuer thread if
+there is one.
+
+A wq no longer manages execution resources but serves as a domain for
+forward progress guarantee, flush and work item attributes.  @flags
+and @max_active control how work items are assigned execution
+resources, scheduled and executed.
+
+@flags:
+
+  WQ_NON_REENTRANT
+
+       By default, a wq guarantees non-reentrance only on the same
+       CPU.  A work item may not be executed concurrently on the same
+       CPU by multiple workers but is allowed to be executed
+       concurrently on multiple CPUs.  This flag makes sure
+       non-reentrance is enforced across all CPUs.  Work items queued
+       to a non-reentrant wq are guaranteed to be executed by at most
+       one worker system-wide at any given time.
+
+  WQ_UNBOUND
+
+       Work items queued to an unbound wq are served by a special
+       gcwq which hosts workers which are not bound to any specific
+       CPU.  This makes the wq behave as a simple execution context
+       provider without concurrency management.  The unbound gcwq
+       tries to start execution of work items as soon as possible.
+       Unbound wq sacrifices locality but is useful for the following
+       cases.
+
+       * Wide fluctuation in the concurrency level requirement is
+         expected and using bound wq may end up creating large number
+         of mostly unused workers across different CPUs as the issuer
+         hops through different CPUs.
+
+       * Long running CPU intensive workloads which can be better
+         managed by the system scheduler.
+
+  WQ_FREEZEABLE
+
+       A freezeable wq participates in the freeze phase of the system
+       suspend operations.  Work items on the wq are drained and no
+       new work item starts execution until thawed.
+
+  WQ_RESCUER
+
+       All wq which might be used in the memory reclaim paths _MUST_
+       have this flag set.  This reserves one worker exclusively for
+       the execution of this wq under memory pressure.
+
+  WQ_HIGHPRI
+
+       Work items of a highpri wq are queued at the head of the
+       worklist of the target gcwq and start execution regardless of
+       the current concurrency level.  In other words, highpri work
+       items will always start execution as soon as execution
+       resource is available.
+
+       Ordering among highpri work items is preserved - a highpri
+       work item queued after another highpri work item will start
+       execution after the earlier highpri work item starts.
+
+       Although highpri work items are not held back by other
+       runnable work items, they still contribute to the concurrency
+       level.  Highpri work items in runnable state will prevent
+       non-highpri work items from starting execution.
+
+       This flag is meaningless for unbound wq.
+
+  WQ_CPU_INTENSIVE
+
+       Work items of a CPU intensive wq do not contribute to the
+       concurrency level.  In other words, runnable CPU intensive
+       work items will not prevent other work items from starting
+       execution.  This is useful for bound work items which are
+       expected to hog CPU cycles so that their execution is
+       regulated by the system scheduler.
+
+       Although CPU intensive work items don't contribute to the
+       concurrency level, start of their executions is still
+       regulated by the concurrency management and runnable
+       non-CPU-intensive work items can delay execution of CPU
+       intensive work items.
+
+       This flag is meaningless for unbound wq.
+
+  WQ_HIGHPRI | WQ_CPU_INTENSIVE
+
+       This combination makes the wq avoid interaction with
+       concurrency management completely and behave as a simple
+       per-CPU execution context provider.  Work items queued on a
+       highpri CPU-intensive wq start execution as soon as resources
+       are available and don't affect execution of other work items.
+
+@max_active:
+
+@max_active determines the maximum number of execution contexts per
+CPU which can be assigned to the work items of a wq.  For example,
+with @max_active of 16, at most 16 work items of the wq can be
+executing at the same time per CPU.
+
+Currently, for a bound wq, the maximum limit for @max_active is 512
+and the default value used when 0 is specified is 256.  For an unbound
+wq, the limit is higher of 512 and 4 * num_possible_cpus().  These
+values are chosen sufficiently high such that they are not the
+limiting factor while providing protection in runaway cases.
+
+The number of active work items of a wq is usually regulated by the
+users of the wq, more specifically, by how many work items the users
+may queue at the same time.  Unless there is a specific need for
+throttling the number of active work items, specifying '0' is
+recommended.
+
+Some users depend on the strict execution ordering of ST wq.  The
+combination of @max_active of 1 and WQ_UNBOUND is used to achieve this
+behavior.  Work items on such wq are always queued to the unbound gcwq
+and only one work item can be active at any given time thus achieving
+the same ordering property as ST wq.
+
+
+5. Example Execution Scenarios
+
+The following example execution scenarios try to illustrate how cmwq
+behave under different configurations.
+
+ Work items w0, w1, w2 are queued to a bound wq q0 on the same CPU.
+ w0 burns CPU for 5ms then sleeps for 10ms then burns CPU for 5ms
+ again before finishing.  w1 and w2 burn CPU for 5ms then sleep for
+ 10ms.
+
+Ignoring all other tasks, works and processing overhead, and assuming
+simple FIFO scheduling, the following is one highly simplified version
+of possible sequences of events with the original wq.
+
+ TIME IN MSECS EVENT
+ 0             w0 starts and burns CPU
+ 5             w0 sleeps
+ 15            w0 wakes up and burns CPU
+ 20            w0 finishes
+ 20            w1 starts and burns CPU
+ 25            w1 sleeps
+ 35            w1 wakes up and finishes
+ 35            w2 starts and burns CPU
+ 40            w2 sleeps
+ 50            w2 wakes up and finishes
+
+And with cmwq with @max_active >= 3,
+
+ TIME IN MSECS EVENT
+ 0             w0 starts and burns CPU
+ 5             w0 sleeps
+ 5             w1 starts and burns CPU
+ 10            w1 sleeps
+ 10            w2 starts and burns CPU
+ 15            w2 sleeps
+ 15            w0 wakes up and burns CPU
+ 20            w0 finishes
+ 20            w1 wakes up and finishes
+ 25            w2 wakes up and finishes
+
+If @max_active == 2,
+
+ TIME IN MSECS EVENT
+ 0             w0 starts and burns CPU
+ 5             w0 sleeps
+ 5             w1 starts and burns CPU
+ 10            w1 sleeps
+ 15            w0 wakes up and burns CPU
+ 20            w0 finishes
+ 20            w1 wakes up and finishes
+ 20            w2 starts and burns CPU
+ 25            w2 sleeps
+ 35            w2 wakes up and finishes
+
+Now, let's assume w1 and w2 are queued to a different wq q1 which has
+WQ_HIGHPRI set,
+
+ TIME IN MSECS EVENT
+ 0             w1 and w2 start and burn CPU
+ 5             w1 sleeps
+ 10            w2 sleeps
+ 10            w0 starts and burns CPU
+ 15            w0 sleeps
+ 15            w1 wakes up and finishes
+ 20            w2 wakes up and finishes
+ 25            w0 wakes up and burns CPU
+ 30            w0 finishes
+
+If q1 has WQ_CPU_INTENSIVE set,
+
+ TIME IN MSECS EVENT
+ 0             w0 starts and burns CPU
+ 5             w0 sleeps
+ 5             w1 and w2 start and burn CPU
+ 10            w1 sleeps
+ 15            w2 sleeps
+ 15            w0 wakes up and burns CPU
+ 20            w0 finishes
+ 20            w1 wakes up and finishes
+ 25            w2 wakes up and finishes
+
+
+6. Guidelines
+
+* Do not forget to use WQ_RESCUER if a wq may process work items which
+  are used during memory reclaim.  Each wq with WQ_RESCUER set has one
+  rescuer thread reserved for it.  If there is dependency among
+  multiple work items used during memory reclaim, they should be
+  queued to separate wq each with WQ_RESCUER.
+
+* Unless strict ordering is required, there is no need to use ST wq.
+
+* Unless there is a specific need, using 0 for @max_active is
+  recommended.  In most use cases, concurrency level usually stays
+  well under the default limit.
+
+* A wq serves as a domain for forward progress guarantee (WQ_RESCUER),
+  flush and work item attributes.  Work items which are not involved
+  in memory reclaim and don't need to be flushed as a part of a group
+  of work items, and don't require any special attribute, can use one
+  of the system wq.  There is no difference in execution
+  characteristics between using a dedicated wq and a system wq.
+
+* Unless work items are expected to consume a huge amount of CPU
+  cycles, using a bound wq is usually beneficial due to the increased
+  level of locality in wq operations and work item execution.
index 20a03b93bda5aa1e494956a87eedcee1f4de02f9..ae95fd4efbd4701c4e1b49ef4bd173fdb3470b3f 100644 (file)
@@ -962,6 +962,13 @@ W: http://www.fluff.org/ben/linux/
 S:     Maintained
 F:     arch/arm/mach-s3c6410/
 
+ARM/S5P ARM ARCHITECTURES
+M:     Kukjin Kim <kgene.kim@samsung.com>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
+S:     Maintained
+F:     arch/arm/mach-s5p*/
+
 ARM/SHMOBILE ARM ARCHITECTURE
 M:     Paul Mundt <lethal@linux-sh.org>
 M:     Magnus Damm <magnus.damm@gmail.com>
@@ -1220,7 +1227,7 @@ F:        drivers/auxdisplay/
 F:     include/linux/cfag12864b.h
 
 AVR32 ARCHITECTURE
-M:     Haavard Skinnemoen <hskinnemoen@atmel.com>
+M:     Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>
 W:     http://www.atmel.com/products/AVR32/
 W:     http://avr32linux.org/
 W:     http://avrfreaks.net/
@@ -1228,7 +1235,7 @@ S:        Supported
 F:     arch/avr32/
 
 AVR32/AT32AP MACHINE SUPPORT
-M:     Haavard Skinnemoen <hskinnemoen@atmel.com>
+M:     Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>
 S:     Supported
 F:     arch/avr32/mach-at32ap/
 
@@ -2199,6 +2206,12 @@ W:       http://acpi4asus.sf.net
 S:     Maintained
 F:     drivers/platform/x86/eeepc-laptop.c
 
+EFIFB FRAMEBUFFER DRIVER
+L:     linux-fbdev@vger.kernel.org
+M:     Peter Jones <pjones@redhat.com>
+S:     Maintained
+F:     drivers/video/efifb.c
+
 EFS FILESYSTEM
 W:     http://aeschi.ch.eu.org/efs/
 S:     Orphan
@@ -2657,9 +2670,14 @@ S:       Maintained
 F:     drivers/media/video/gspca/
 
 HARDWARE MONITORING
+M:     Jean Delvare <khali@linux-fr.org>
+M:     Guenter Roeck <guenter.roeck@ericsson.com>
 L:     lm-sensors@lm-sensors.org
 W:     http://www.lm-sensors.org/
-S:     Orphan
+T:     quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
+T:     quilt kernel.org/pub/linux/kernel/people/groeck/linux-staging/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
+S:     Maintained
 F:     Documentation/hwmon/
 F:     drivers/hwmon/
 F:     include/linux/hwmon*.h
@@ -2797,11 +2815,6 @@ S:       Maintained
 F:     arch/x86/kernel/hpet.c
 F:     arch/x86/include/asm/hpet.h
 
-HPET:  ACPI
-M:     Bob Picco <bob.picco@hp.com>
-S:     Maintained
-F:     drivers/char/hpet.c
-
 HPFS FILESYSTEM
 M:     Mikulas Patocka <mikulas@artax.karlin.mff.cuni.cz>
 W:     http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi
@@ -3426,7 +3439,7 @@ F:        drivers/s390/kvm/
 
 KEXEC
 M:     Eric Biederman <ebiederm@xmission.com>
-W:     http://ftp.kernel.org/pub/linux/kernel/people/horms/kexec-tools/
+W:     http://kernel.org/pub/linux/utils/kernel/kexec/
 L:     kexec@lists.infradead.org
 S:     Maintained
 F:     include/linux/kexec.h
@@ -3787,9 +3800,8 @@ W:        http://www.syskonnect.com
 S:     Supported
 
 MATROX FRAMEBUFFER DRIVER
-M:     Petr Vandrovec <vandrove@vc.cvut.cz>
 L:     linux-fbdev@vger.kernel.org
-S:     Maintained
+S:     Orphan
 F:     drivers/video/matrox/matroxfb_*
 F:     include/linux/matroxfb.h
 
@@ -3913,10 +3925,8 @@ F:       Documentation/serial/moxa-smartio
 F:     drivers/char/mxser.*
 
 MSI LAPTOP SUPPORT
-M:     Lennart Poettering <mzxreary@0pointer.de>
+M:     Lee, Chun-Yi <jlee@novell.com>
 L:     platform-driver-x86@vger.kernel.org
-W:     https://tango.0pointer.de/mailman/listinfo/s270-linux
-W:     http://0pointer.de/lennart/tchibo.html
 S:     Maintained
 F:     drivers/platform/x86/msi-laptop.c
 
@@ -3933,8 +3943,10 @@ S:       Supported
 F:     drivers/mfd/
 
 MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM
-S:     Orphan
+M:     Chris Ball <cjb@laptop.org>
 L:     linux-mmc@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git
+S:     Maintained
 F:     drivers/mmc/
 F:     include/linux/mmc/
 
@@ -3956,7 +3968,7 @@ F:        drivers/char/isicom.c
 F:     include/linux/isicom.h
 
 MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
-M:     Felipe Balbi <felipe.balbi@nokia.com>
+M:     Felipe Balbi <balbi@ti.com>
 L:     linux-usb@vger.kernel.org
 T:     git git://gitorious.org/usb/usb.git
 S:     Maintained
@@ -3976,8 +3988,8 @@ S:        Maintained
 F:     drivers/net/natsemi.c
 
 NCP FILESYSTEM
-M:     Petr Vandrovec <vandrove@vc.cvut.cz>
-S:     Maintained
+M:     Petr Vandrovec <petr@vandrovec.name>
+S:     Odd Fixes
 F:     fs/ncpfs/
 
 NCR DUAL 700 SCSI DRIVER (MICROCHANNEL)
@@ -4254,7 +4266,7 @@ S:        Maintained
 F:     drivers/char/hw_random/omap-rng.c
 
 OMAP USB SUPPORT
-M:     Felipe Balbi <felipe.balbi@nokia.com>
+M:     Felipe Balbi <balbi@ti.com>
 M:     David Brownell <dbrownell@users.sourceforge.net>
 L:     linux-usb@vger.kernel.org
 L:     linux-omap@vger.kernel.org
@@ -4832,6 +4844,7 @@ RCUTORTURE MODULE
 M:     Josh Triplett <josh@freedesktop.org>
 M:     "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
 S:     Supported
+T:     git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
 F:     Documentation/RCU/torture.txt
 F:     kernel/rcutorture.c
 
@@ -4856,6 +4869,7 @@ M:        Dipankar Sarma <dipankar@in.ibm.com>
 M:     "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
 W:     http://www.rdrop.com/users/paulmck/rclock/
 S:     Supported
+T:     git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
 F:     Documentation/RCU/
 F:     include/linux/rcu*
 F:     include/linux/srcu*
@@ -4863,12 +4877,10 @@ F:      kernel/rcu*
 F:     kernel/srcu*
 X:     kernel/rcutorture.c
 
-REAL TIME CLOCK DRIVER
+REAL TIME CLOCK DRIVER (LEGACY)
 M:     Paul Gortmaker <p_gortmaker@yahoo.com>
 S:     Maintained
-F:     Documentation/rtc.txt
-F:     drivers/rtc/
-F:     include/linux/rtc.h
+F:     drivers/char/rtc.c
 
 REAL TIME CLOCK (RTC) SUBSYSTEM
 M:     Alessandro Zummo <a.zummo@towertech.it>
@@ -5105,8 +5117,10 @@ S:       Maintained
 F:     drivers/mmc/host/sdricoh_cs.c
 
 SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER
-S:     Orphan
+M:     Chris Ball <cjb@laptop.org>
 L:     linux-mmc@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git
+S:     Maintained
 F:     drivers/mmc/host/sdhci.*
 
 SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF)
index 4df9873f83b275a48037e86e058208fa9c6baffc..77b5c6ed0ce5bf64764b97d2a45bd2e7b9fd3b11 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 36
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc7
 NAME = Sheep on Meth
 
 # *DOCUMENTATION*
index 4877a8c8ee1697599289f35107824f95ba8daf84..fe48fc7a3ebaf85c9bf8c56391bf20d9c9b90bbf 100644 (file)
@@ -32,8 +32,9 @@ config HAVE_OPROFILE
 
 config KPROBES
        bool "Kprobes"
-       depends on KALLSYMS && MODULES
+       depends on MODULES
        depends on HAVE_KPROBES
+       select KALLSYMS
        help
          Kprobes allows you to trap at almost any kernel address and
          execute a callback function.  register_kprobe() establishes
@@ -45,7 +46,6 @@ config OPTPROBES
        def_bool y
        depends on KPROBES && HAVE_OPTPROBES
        depends on !PREEMPT
-       select KALLSYMS_ALL
 
 config HAVE_EFFICIENT_UNALIGNED_ACCESS
        bool
index 01d71e1c8a9eb6df57fe8bc13e1f94f634a8eaf7..012f1243b1c1a0024af9cffe0afdfc204f4dcf11 100644 (file)
@@ -43,6 +43,8 @@ extern void smp_imb(void);
 /* ??? Ought to use this in arch/alpha/kernel/signal.c too.  */
 
 #ifndef CONFIG_SMP
+#include <linux/sched.h>
+
 extern void __load_new_mm_context(struct mm_struct *);
 static inline void
 flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
index 804e5311c84188fbc14a62b7afb97bc35a232003..058937bf5a77718983126f2ab3813a023cb8c1d0 100644 (file)
 #define __NR_pwritev                   491
 #define __NR_rt_tgsigqueueinfo         492
 #define __NR_perf_event_open           493
+#define __NR_fanotify_init             494
+#define __NR_fanotify_mark             495
+#define __NR_prlimit64                 496
 
 #ifdef __KERNEL__
 
-#define NR_SYSCALLS                    494
+#define NR_SYSCALLS                    497
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_SYS_OLD_GETRLIMIT
 #define __ARCH_WANT_SYS_OLDUMOUNT
 #define __ARCH_WANT_SYS_SIGPENDING
+#define __ARCH_WANT_SYS_RT_SIGSUSPEND
 
 /* "Conditional" syscalls.  What we want is
 
index b45d913a51c368881b4311d1f1887566829b09a8..6d159cee5f2f43a48bc9aac8355069a3e588ef7f 100644 (file)
@@ -73,8 +73,6 @@
        ldq     $20, HAE_REG($19);      \
        stq     $21, HAE_CACHE($19);    \
        stq     $21, 0($20);            \
-       ldq     $0, 0($sp);             \
-       ldq     $1, 8($sp);             \
 99:;                                   \
        ldq     $19, 72($sp);           \
        ldq     $20, 80($sp);           \
@@ -316,19 +314,24 @@ ret_from_sys_call:
        cmovne  $26, 0, $19             /* $19 = 0 => non-restartable */
        ldq     $0, SP_OFF($sp)
        and     $0, 8, $0
-       beq     $0, restore_all
-ret_from_reschedule:
+       beq     $0, ret_to_kernel
+ret_to_user:
        /* Make sure need_resched and sigpending don't change between
                sampling and the rti.  */
        lda     $16, 7
        call_pal PAL_swpipl
        ldl     $5, TI_FLAGS($8)
        and     $5, _TIF_WORK_MASK, $2
-       bne     $5, work_pending
+       bne     $2, work_pending
 restore_all:
        RESTORE_ALL
        call_pal PAL_rti
 
+ret_to_kernel:
+       lda     $16, 7
+       call_pal PAL_swpipl
+       br restore_all
+
        .align 3
 $syscall_error:
        /*
@@ -363,7 +366,7 @@ $ret_success:
  *       $8: current.
  *      $19: The old syscall number, or zero if this is not a return
  *           from a syscall that errored and is possibly restartable.
- *      $20: Error indication.
+ *      $20: The old a3 value
  */
 
        .align  4
@@ -392,12 +395,18 @@ $work_resched:
 
 $work_notifysig:
        mov     $sp, $16
-       b     $1, do_switch_stack
+       bsr     $1, do_switch_stack
        mov     $sp, $17
        mov     $5, $18
+       mov     $19, $9         /* save old syscall number */
+       mov     $20, $10        /* save old a3 */
+       and     $5, _TIF_SIGPENDING, $2
+       cmovne  $2, 0, $9       /* we don't want double syscall restarts */
        jsr     $26, do_notify_resume
+       mov     $9, $19
+       mov     $10, $20
        bsr     $1, undo_switch_stack
-       br      restore_all
+       br      ret_to_user
 .end work_pending
 
 /*
@@ -430,6 +439,7 @@ strace:
        beq     $1, 1f
        ldq     $27, 0($2)
 1:     jsr     $26, ($27), sys_gettimeofday
+ret_from_straced:
        ldgp    $gp, 0($26)
 
        /* check return.. */
@@ -650,7 +660,7 @@ kernel_thread:
        /* We don't actually care for a3 success widgetry in the kernel.
           Not for positive errno values.  */
        stq     $0, 0($sp)              /* $0 */
-       br      restore_all
+       br      ret_to_kernel
 .end kernel_thread
 
 /*
@@ -757,11 +767,15 @@ sys_vfork:
        .ent    sys_sigreturn
 sys_sigreturn:
        .prologue 0
+       lda     $9, ret_from_straced
+       cmpult  $26, $9, $9
        mov     $sp, $17
        lda     $18, -SWITCH_STACK_SIZE($sp)
        lda     $sp, -SWITCH_STACK_SIZE($sp)
        jsr     $26, do_sigreturn
-       br      $1, undo_switch_stack
+       bne     $9, 1f
+       jsr     $26, syscall_trace
+1:     br      $1, undo_switch_stack
        br      ret_from_sys_call
 .end sys_sigreturn
 
@@ -770,46 +784,18 @@ sys_sigreturn:
        .ent    sys_rt_sigreturn
 sys_rt_sigreturn:
        .prologue 0
+       lda     $9, ret_from_straced
+       cmpult  $26, $9, $9
        mov     $sp, $17
        lda     $18, -SWITCH_STACK_SIZE($sp)
        lda     $sp, -SWITCH_STACK_SIZE($sp)
        jsr     $26, do_rt_sigreturn
-       br      $1, undo_switch_stack
+       bne     $9, 1f
+       jsr     $26, syscall_trace
+1:     br      $1, undo_switch_stack
        br      ret_from_sys_call
 .end sys_rt_sigreturn
 
-       .align  4
-       .globl  sys_sigsuspend
-       .ent    sys_sigsuspend
-sys_sigsuspend:
-       .prologue 0
-       mov     $sp, $17
-       br      $1, do_switch_stack
-       mov     $sp, $18
-       subq    $sp, 16, $sp
-       stq     $26, 0($sp)
-       jsr     $26, do_sigsuspend
-       ldq     $26, 0($sp)
-       lda     $sp, SWITCH_STACK_SIZE+16($sp)
-       ret
-.end sys_sigsuspend
-
-       .align  4
-       .globl  sys_rt_sigsuspend
-       .ent    sys_rt_sigsuspend
-sys_rt_sigsuspend:
-       .prologue 0
-       mov     $sp, $18
-       br      $1, do_switch_stack
-       mov     $sp, $19
-       subq    $sp, 16, $sp
-       stq     $26, 0($sp)
-       jsr     $26, do_rt_sigsuspend
-       ldq     $26, 0($sp)
-       lda     $sp, SWITCH_STACK_SIZE+16($sp)
-       ret
-.end sys_rt_sigsuspend
-
        .align  4
        .globl  sys_sethae
        .ent    sys_sethae
@@ -928,15 +914,6 @@ sys_execve:
        jmp     $31, do_sys_execve
 .end sys_execve
 
-       .align  4
-       .globl  osf_sigprocmask
-       .ent    osf_sigprocmask
-osf_sigprocmask:
-       .prologue 0
-       mov     $sp, $18
-       jmp     $31, sys_osf_sigprocmask
-.end osf_sigprocmask
-
        .align  4
        .globl  alpha_ni_syscall
        .ent    alpha_ni_syscall
index 8ca6345bf13167842d3c0fb60637e51e18e3db93..253cf1a87481e815ad9a724dde1fef51b5616d09 100644 (file)
@@ -90,11 +90,13 @@ static int
 ev6_parse_cbox(u64 c_addr, u64 c1_syn, u64 c2_syn, 
               u64 c_stat, u64 c_sts, int print)
 {
-       char *sourcename[] = { "UNKNOWN", "UNKNOWN", "UNKNOWN",
-                              "MEMORY", "BCACHE", "DCACHE", 
-                              "BCACHE PROBE", "BCACHE PROBE" };
-       char *streamname[] = { "D", "I" };
-       char *bitsname[] = { "SINGLE", "DOUBLE" };
+       static const char * const sourcename[] = {
+               "UNKNOWN", "UNKNOWN", "UNKNOWN",
+               "MEMORY", "BCACHE", "DCACHE",
+               "BCACHE PROBE", "BCACHE PROBE"
+       };
+       static const char * const streamname[] = { "D", "I" };
+       static const char * const bitsname[] = { "SINGLE", "DOUBLE" };
        int status = MCHK_DISPOSITION_REPORT;
        int source = -1, stream = -1, bits = -1;
 
index 5c905aaaeccd82861ea62d9186517475f0d7019c..648ae88aeb8ae0cce7dcedbe215bf56417e5eebc 100644 (file)
@@ -589,22 +589,23 @@ marvel_print_pox_spl_cmplt(u64 spl_cmplt)
 static void
 marvel_print_pox_trans_sum(u64 trans_sum)
 {
-       char *pcix_cmd[] = { "Interrupt Acknowledge",
-                            "Special Cycle",
-                            "I/O Read",
-                            "I/O Write",
-                            "Reserved",
-                            "Reserved / Device ID Message",
-                            "Memory Read",
-                            "Memory Write",
-                            "Reserved / Alias to Memory Read Block",
-                            "Reserved / Alias to Memory Write Block",
-                            "Configuration Read",
-                            "Configuration Write",
-                            "Memory Read Multiple / Split Completion",
-                            "Dual Address Cycle",
-                            "Memory Read Line / Memory Read Block",
-                            "Memory Write and Invalidate / Memory Write Block"
+       static const char * const pcix_cmd[] = {
+               "Interrupt Acknowledge",
+               "Special Cycle",
+               "I/O Read",
+               "I/O Write",
+               "Reserved",
+               "Reserved / Device ID Message",
+               "Memory Read",
+               "Memory Write",
+               "Reserved / Alias to Memory Read Block",
+               "Reserved / Alias to Memory Write Block",
+               "Configuration Read",
+               "Configuration Write",
+               "Memory Read Multiple / Split Completion",
+               "Dual Address Cycle",
+               "Memory Read Line / Memory Read Block",
+               "Memory Write and Invalidate / Memory Write Block"
        };
 
 #define IO7__POX_TRANSUM__PCI_ADDR__S          (0)
index f7ed97ce0dfd72458dde026292b89702be518477..c3b3781a03de01045ebd270319173ae2b5d347ee 100644 (file)
@@ -75,8 +75,12 @@ titan_parse_p_serror(int which, u64 serror, int print)
        int status = MCHK_DISPOSITION_REPORT;
 
 #ifdef CONFIG_VERBOSE_MCHECK
-       char *serror_src[] = {"GPCI", "APCI", "AGP HP", "AGP LP"};
-       char *serror_cmd[] = {"DMA Read", "DMA RMW", "SGTE Read", "Reserved"};
+       static const char * const serror_src[] = {
+               "GPCI", "APCI", "AGP HP", "AGP LP"
+       };
+       static const char * const serror_cmd[] = {
+               "DMA Read", "DMA RMW", "SGTE Read", "Reserved"
+       };
 #endif /* CONFIG_VERBOSE_MCHECK */
 
 #define TITAN__PCHIP_SERROR__LOST_UECC (1UL << 0)
@@ -140,14 +144,15 @@ titan_parse_p_perror(int which, int port, u64 perror, int print)
        int status = MCHK_DISPOSITION_REPORT;
 
 #ifdef CONFIG_VERBOSE_MCHECK
-       char *perror_cmd[] = { "Interrupt Acknowledge", "Special Cycle",
-                              "I/O Read",              "I/O Write",
-                              "Reserved",              "Reserved",
-                              "Memory Read",           "Memory Write",
-                              "Reserved",              "Reserved",
-                              "Configuration Read",    "Configuration Write",
-                              "Memory Read Multiple",  "Dual Address Cycle",
-                              "Memory Read Line","Memory Write and Invalidate"
+       static const char * const perror_cmd[] = {
+               "Interrupt Acknowledge", "Special Cycle",
+               "I/O Read",             "I/O Write",
+               "Reserved",             "Reserved",
+               "Memory Read",          "Memory Write",
+               "Reserved",             "Reserved",
+               "Configuration Read",   "Configuration Write",
+               "Memory Read Multiple", "Dual Address Cycle",
+               "Memory Read Line",     "Memory Write and Invalidate"
        };
 #endif /* CONFIG_VERBOSE_MCHECK */
 
@@ -273,11 +278,11 @@ titan_parse_p_agperror(int which, u64 agperror, int print)
        int cmd, len;
        unsigned long addr;
 
-       char *agperror_cmd[] = { "Read (low-priority)", "Read (high-priority)",
-                                "Write (low-priority)",
-                                "Write (high-priority)",
-                                "Reserved",            "Reserved",
-                                "Flush",               "Fence"
+       static const char * const agperror_cmd[] = {
+               "Read (low-priority)",  "Read (high-priority)",
+               "Write (low-priority)", "Write (high-priority)",
+               "Reserved",             "Reserved",
+               "Flush",                "Fence"
        };
 #endif /* CONFIG_VERBOSE_MCHECK */
 
index 5d1e6d6ce6843b136fb7810e74c86df16d86daf4..547e8b84b2f794ab546aca3b1927c80bdc1104bf 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/smp.h>
-#include <linux/smp_lock.h>
 #include <linux/stddef.h>
 #include <linux/syscalls.h>
 #include <linux/unistd.h>
@@ -69,7 +68,6 @@ SYSCALL_DEFINE4(osf_set_program_attributes, unsigned long, text_start,
 {
        struct mm_struct *mm;
 
-       lock_kernel();
        mm = current->mm;
        mm->end_code = bss_start + bss_len;
        mm->start_brk = bss_start + bss_len;
@@ -78,7 +76,6 @@ SYSCALL_DEFINE4(osf_set_program_attributes, unsigned long, text_start,
        printk("set_program_attributes(%lx %lx %lx %lx)\n",
                text_start, text_len, bss_start, bss_len);
 #endif
-       unlock_kernel();
        return 0;
 }
 
@@ -517,7 +514,6 @@ SYSCALL_DEFINE2(osf_proplist_syscall, enum pl_code, code,
        long error;
        int __user *min_buf_size_ptr;
 
-       lock_kernel();
        switch (code) {
        case PL_SET:
                if (get_user(error, &args->set.nbytes))
@@ -547,7 +543,6 @@ SYSCALL_DEFINE2(osf_proplist_syscall, enum pl_code, code,
                error = -EOPNOTSUPP;
                break;
        };
-       unlock_kernel();
        return error;
 }
 
@@ -594,7 +589,7 @@ SYSCALL_DEFINE2(osf_sigstack, struct sigstack __user *, uss,
 
 SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
 {
-       char *sysinfo_table[] = {
+       const char *sysinfo_table[] = {
                utsname()->sysname,
                utsname()->nodename,
                utsname()->release,
@@ -606,7 +601,7 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
                "dummy",        /* secure RPC domain */
        };
        unsigned long offset;
-       char *res;
+       const char *res;
        long len, err = -EINVAL;
 
        offset = command-1;
index 738fc824e2ea373f85b3ae04fb7eb1346ecacdac..b899e95f79fdb424ea9b07d31153edb0e824ac8e 100644 (file)
@@ -66,7 +66,7 @@ static int pci_mmap_resource(struct kobject *kobj,
 {
        struct pci_dev *pdev = to_pci_dev(container_of(kobj,
                                                       struct device, kobj));
-       struct resource *res = (struct resource *)attr->private;
+       struct resource *res = attr->private;
        enum pci_mmap_state mmap_type;
        struct pci_bus_region bar;
        int i;
index 842dba308eab3065510857e50312496c674c1097..3ec35066f1dc51c09367c32bea106453a109953d 100644 (file)
@@ -356,7 +356,7 @@ dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti)
        dest[27] = pt->r27;
        dest[28] = pt->r28;
        dest[29] = pt->gp;
-       dest[30] = rdusp();
+       dest[30] = ti == current_thread_info() ? rdusp() : ti->pcb.usp;
        dest[31] = pt->pc;
 
        /* Once upon a time this was the PS value.  Which is stupid
index 0932dbb1ef8eff444943645e15b0802b1d4cf5d5..6f7feb5db27193f33e24e962e4d1be257d8464ef 100644 (file)
@@ -41,46 +41,20 @@ static void do_signal(struct pt_regs *, struct switch_stack *,
 /*
  * The OSF/1 sigprocmask calling sequence is different from the
  * C sigprocmask() sequence..
- *
- * how:
- * 1 - SIG_BLOCK
- * 2 - SIG_UNBLOCK
- * 3 - SIG_SETMASK
- *
- * We change the range to -1 .. 1 in order to let gcc easily
- * use the conditional move instructions.
- *
- * Note that we don't need to acquire the kernel lock for SMP
- * operation, as all of this is local to this thread.
  */
-SYSCALL_DEFINE3(osf_sigprocmask, int, how, unsigned long, newmask,
-               struct pt_regs *, regs)
+SYSCALL_DEFINE2(osf_sigprocmask, int, how, unsigned long, newmask)
 {
-       unsigned long oldmask = -EINVAL;
-
-       if ((unsigned long)how-1 <= 2) {
-               long sign = how-2;              /* -1 .. 1 */
-               unsigned long block, unblock;
-
-               newmask &= _BLOCKABLE;
-               spin_lock_irq(&current->sighand->siglock);
-               oldmask = current->blocked.sig[0];
-
-               unblock = oldmask & ~newmask;
-               block = oldmask | newmask;
-               if (!sign)
-                       block = unblock;
-               if (sign <= 0)
-                       newmask = block;
-               if (_NSIG_WORDS > 1 && sign > 0)
-                       sigemptyset(&current->blocked);
-               current->blocked.sig[0] = newmask;
-               recalc_sigpending();
-               spin_unlock_irq(&current->sighand->siglock);
-
-               regs->r0 = 0;           /* special no error return */
+       sigset_t oldmask;
+       sigset_t mask;
+       unsigned long res;
+
+       siginitset(&mask, newmask & _BLOCKABLE);
+       res = sigprocmask(how, &mask, &oldmask);
+       if (!res) {
+               force_successful_syscall_return();
+               res = oldmask.sig[0];
        }
-       return oldmask;
+       return res;
 }
 
 SYSCALL_DEFINE3(osf_sigaction, int, sig,
@@ -94,9 +68,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig,
                old_sigset_t mask;
                if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
                    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
-                   __get_user(new_ka.sa.sa_flags, &act->sa_flags))
+                   __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
+                   __get_user(mask, &act->sa_mask))
                        return -EFAULT;
-               __get_user(mask, &act->sa_mask);
                siginitset(&new_ka.sa.sa_mask, mask);
                new_ka.ka_restorer = NULL;
        }
@@ -106,9 +80,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig,
        if (!ret && oact) {
                if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
                    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
-                   __put_user(old_ka.sa.sa_flags, &oact->sa_flags))
+                   __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
+                   __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
                        return -EFAULT;
-               __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
        }
 
        return ret;
@@ -144,8 +118,7 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act,
 /*
  * Atomically swap in the new signal mask, and wait for a signal.
  */
-asmlinkage int
-do_sigsuspend(old_sigset_t mask, struct pt_regs *regs, struct switch_stack *sw)
+SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
 {
        mask &= _BLOCKABLE;
        spin_lock_irq(&current->sighand->siglock);
@@ -154,41 +127,6 @@ do_sigsuspend(old_sigset_t mask, struct pt_regs *regs, struct switch_stack *sw)
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
 
-       /* Indicate EINTR on return from any possible signal handler,
-          which will not come back through here, but via sigreturn.  */
-       regs->r0 = EINTR;
-       regs->r19 = 1;
-
-       current->state = TASK_INTERRUPTIBLE;
-       schedule();
-       set_thread_flag(TIF_RESTORE_SIGMASK);
-       return -ERESTARTNOHAND;
-}
-
-asmlinkage int
-do_rt_sigsuspend(sigset_t __user *uset, size_t sigsetsize,
-                struct pt_regs *regs, struct switch_stack *sw)
-{
-       sigset_t set;
-
-       /* XXX: Don't preclude handling different sized sigset_t's.  */
-       if (sigsetsize != sizeof(sigset_t))
-               return -EINVAL;
-       if (copy_from_user(&set, uset, sizeof(set)))
-               return -EFAULT;
-
-       sigdelsetmask(&set, ~_BLOCKABLE);
-       spin_lock_irq(&current->sighand->siglock);
-       current->saved_sigmask = current->blocked;
-       current->blocked = set;
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-
-       /* Indicate EINTR on return from any possible signal handler,
-          which will not come back through here, but via sigreturn.  */
-       regs->r0 = EINTR;
-       regs->r19 = 1;
-
        current->state = TASK_INTERRUPTIBLE;
        schedule();
        set_thread_flag(TIF_RESTORE_SIGMASK);
@@ -239,6 +177,8 @@ restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
        unsigned long usp;
        long i, err = __get_user(regs->pc, &sc->sc_pc);
 
+       current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
        sw->r26 = (unsigned long) ret_from_sys_call;
 
        err |= __get_user(regs->r0, sc->sc_regs+0);
@@ -591,7 +531,6 @@ syscall_restart(unsigned long r0, unsigned long r19,
                regs->pc -= 4;
                break;
        case ERESTART_RESTARTBLOCK:
-               current_thread_info()->restart_block.fn = do_no_restart_syscall;
                regs->r0 = EINTR;
                break;
        }
index 4afc1a1e2e5a055ccda3d11a7d2f1b96c3670235..f0df3fbd84025eff9bf304fd179bed8785e865dc 100644 (file)
@@ -87,7 +87,7 @@ static int srm_env_proc_show(struct seq_file *m, void *v)
        srm_env_t       *entry;
        char            *page;
 
-       entry = (srm_env_t *)m->private;
+       entry = m->private;
        page = (char *)__get_free_page(GFP_USER);
        if (!page)
                return -ENOMEM;
index 09acb786e72b0d665d3aa9e38346148adbdb08c5..a6a1de9db16fd6e57d96c4e0b1f76db70f27147f 100644 (file)
@@ -58,7 +58,7 @@ sys_call_table:
        .quad sys_open                          /* 45 */
        .quad alpha_ni_syscall
        .quad sys_getxgid
-       .quad osf_sigprocmask
+       .quad sys_osf_sigprocmask
        .quad alpha_ni_syscall
        .quad alpha_ni_syscall                  /* 50 */
        .quad sys_acct
@@ -512,6 +512,9 @@ sys_call_table:
        .quad sys_pwritev
        .quad sys_rt_tgsigqueueinfo
        .quad sys_perf_event_open
+       .quad sys_fanotify_init
+       .quad sys_fanotify_mark                         /* 495 */
+       .quad sys_prlimit64
 
        .size sys_call_table, . - sys_call_table
        .type sys_call_table, @object
index eacceb26d9c8aa8bd250b409e821c277a9b51c73..396af1799ea44e3a80bd758226f31f0c3bced799 100644 (file)
@@ -191,16 +191,16 @@ irqreturn_t timer_interrupt(int irq, void *dev)
 
        write_sequnlock(&xtime_lock);
 
-#ifndef CONFIG_SMP
-       while (nticks--)
-               update_process_times(user_mode(get_irq_regs()));
-#endif
-
        if (test_perf_event_pending()) {
                clear_perf_event_pending();
                perf_event_do_pending();
        }
 
+#ifndef CONFIG_SMP
+       while (nticks--)
+               update_process_times(user_mode(get_irq_regs()));
+#endif
+
        return IRQ_HANDLED;
 }
 
index b14f015008ada5e90d580b0589d4ff5144a1602e..0414e021a91c3ba8756674dea873f0dabd6c0abd 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/sched.h>
 #include <linux/tty.h>
 #include <linux/delay.h>
-#include <linux/smp_lock.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/kallsyms.h>
@@ -623,7 +622,6 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
                return;
        }
 
-       lock_kernel();
        printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
                pc, va, opcode, reg);
        do_exit(SIGSEGV);
@@ -646,7 +644,6 @@ got_exception:
         * Yikes!  No one to forward the exception to.
         * Since the registers are in a weird format, dump them ourselves.
         */
-       lock_kernel();
 
        printk("%s(%d): unhandled unaligned exception\n",
               current->comm, task_pid_nr(current));
index 16bc8eb4901c9335b32a774082e9bc1cc77bb8b9..88c97bc7a6f5b7a751b0b628e75a17f4604f82f3 100644 (file)
@@ -271,7 +271,6 @@ config ARCH_AT91
        bool "Atmel AT91"
        select ARCH_REQUIRE_GPIOLIB
        select HAVE_CLK
-       select ARCH_USES_GETTIMEOFFSET
        help
          This enables support for systems based on the Atmel AT91RM9200,
          AT91SAM9 and AT91CAP9 processors.
@@ -1051,6 +1050,32 @@ config ARM_ERRATA_460075
          ACTLR register. Note that setting specific bits in the ACTLR register
          may not be available in non-secure mode.
 
+config ARM_ERRATA_742230
+       bool "ARM errata: DMB operation may be faulty"
+       depends on CPU_V7 && SMP
+       help
+         This option enables the workaround for the 742230 Cortex-A9
+         (r1p0..r2p2) erratum. Under rare circumstances, a DMB instruction
+         between two write operations may not ensure the correct visibility
+         ordering of the two writes. This workaround sets a specific bit in
+         the diagnostic register of the Cortex-A9 which causes the DMB
+         instruction to behave as a DSB, ensuring the correct behaviour of
+         the two writes.
+
+config ARM_ERRATA_742231
+       bool "ARM errata: Incorrect hazard handling in the SCU may lead to data corruption"
+       depends on CPU_V7 && SMP
+       help
+         This option enables the workaround for the 742231 Cortex-A9
+         (r2p0..r2p2) erratum. Under certain conditions, specific to the
+         Cortex-A9 MPCore micro-architecture, two CPUs working in SMP mode,
+         accessing some data located in the same cache line, may get corrupted
+         data due to bad handling of the address hazard when the line gets
+         replaced from one of the CPUs at the same time as another CPU is
+         accessing it. This workaround sets specific bits in the diagnostic
+         register of the Cortex-A9 which reduces the linefill issuing
+         capabilities of the processor.
+
 config PL310_ERRATA_588369
        bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
        depends on CACHE_L2X0 && ARCH_OMAP4
@@ -1576,97 +1601,6 @@ config AUTO_ZRELADDR
          0xf8000000. This assumes the zImage being placed in the first 128MB
          from start of memory.
 
-config ZRELADDR
-       hex "Physical address of the decompressed kernel image"
-       depends on !AUTO_ZRELADDR
-       default 0x00008000 if ARCH_BCMRING ||\
-               ARCH_CNS3XXX ||\
-               ARCH_DOVE ||\
-               ARCH_EBSA110 ||\
-               ARCH_FOOTBRIDGE ||\
-               ARCH_INTEGRATOR ||\
-               ARCH_IOP13XX ||\
-               ARCH_IOP33X ||\
-               ARCH_IXP2000 ||\
-               ARCH_IXP23XX ||\
-               ARCH_IXP4XX ||\
-               ARCH_KIRKWOOD ||\
-               ARCH_KS8695 ||\
-               ARCH_LOKI ||\
-               ARCH_MMP ||\
-               ARCH_MV78XX0 ||\
-               ARCH_NOMADIK ||\
-               ARCH_NUC93X ||\
-               ARCH_NS9XXX ||\
-               ARCH_ORION5X ||\
-               ARCH_SPEAR3XX ||\
-               ARCH_SPEAR6XX ||\
-               ARCH_TEGRA ||\
-               ARCH_U8500 ||\
-               ARCH_VERSATILE ||\
-               ARCH_W90X900
-       default 0x08008000 if ARCH_MX1 ||\
-               ARCH_SHARK
-       default 0x10008000 if ARCH_MSM ||\
-               ARCH_OMAP1 ||\
-               ARCH_RPC
-       default 0x20008000 if ARCH_S5P6440 ||\
-               ARCH_S5P6442 ||\
-               ARCH_S5PC100 ||\
-               ARCH_S5PV210
-       default 0x30008000 if ARCH_S3C2410 ||\
-               ARCH_S3C2400 ||\
-               ARCH_S3C2412 ||\
-               ARCH_S3C2416 ||\
-               ARCH_S3C2440 ||\
-               ARCH_S3C2443
-       default 0x40008000 if ARCH_STMP378X ||\
-               ARCH_STMP37XX ||\
-               ARCH_SH7372 ||\
-               ARCH_SH7377 ||\
-               ARCH_S5PV310
-       default 0x50008000 if ARCH_S3C64XX ||\
-               ARCH_SH7367
-       default 0x60008000 if ARCH_VEXPRESS
-       default 0x80008000 if ARCH_MX25 ||\
-               ARCH_MX3 ||\
-               ARCH_NETX ||\
-               ARCH_OMAP2PLUS ||\
-               ARCH_PNX4008
-       default 0x90008000 if ARCH_MX5 ||\
-               ARCH_MX91231
-       default 0xa0008000 if ARCH_IOP32X ||\
-               ARCH_PXA ||\
-               MACH_MX27
-       default 0xc0008000 if ARCH_LH7A40X ||\
-               MACH_MX21
-       default 0xf0008000 if ARCH_AAEC2000 ||\
-               ARCH_L7200
-       default 0xc0028000 if ARCH_CLPS711X
-       default 0x70008000 if ARCH_AT91 && (ARCH_AT91CAP9 || ARCH_AT91SAM9G45)
-       default 0x20008000 if ARCH_AT91 && !(ARCH_AT91CAP9 || ARCH_AT91SAM9G45)
-       default 0xc0008000 if ARCH_DAVINCI && ARCH_DAVINCI_DA8XX
-       default 0x80008000 if ARCH_DAVINCI && !ARCH_DAVINCI_DA8XX
-       default 0x00008000 if ARCH_EP93XX && EP93XX_SDCE3_SYNC_PHYS_OFFSET
-       default 0xc0008000 if ARCH_EP93XX && EP93XX_SDCE0_PHYS_OFFSET
-       default 0xd0008000 if ARCH_EP93XX && EP93XX_SDCE1_PHYS_OFFSET
-       default 0xe0008000 if ARCH_EP93XX && EP93XX_SDCE2_PHYS_OFFSET
-       default 0xf0008000 if ARCH_EP93XX && EP93XX_SDCE3_ASYNC_PHYS_OFFSET
-       default 0x00008000 if ARCH_GEMINI && GEMINI_MEM_SWAP
-       default 0x10008000 if ARCH_GEMINI && !GEMINI_MEM_SWAP
-       default 0x70008000 if ARCH_REALVIEW && REALVIEW_HIGH_PHYS_OFFSET
-       default 0x00008000 if ARCH_REALVIEW && !REALVIEW_HIGH_PHYS_OFFSET
-       default 0xc0208000 if ARCH_SA1100 && SA1111
-       default 0xc0008000 if ARCH_SA1100 && !SA1111
-       default 0x30108000 if ARCH_S3C2410 && PM_H1940
-       default 0x28E08000 if ARCH_U300 && MACH_U300_SINGLE_RAM
-       default 0x48008000 if ARCH_U300 && !MACH_U300_SINGLE_RAM
-       help
-         ZRELADDR is the physical address where the decompressed kernel
-         image will be placed. ZRELADDR has to be specified when the
-         assumption of AUTO_ZRELADDR is not valid, or when ZBOOT_ROM is
-         selected.
-
 endmenu
 
 menu "CPU Power Management"
index f705213caa881af9c07e181c0d2a3a1a26a5d98a..4a590f4113e2af044ea1764aeb0681ad7f50a74c 100644 (file)
 MKIMAGE         := $(srctree)/scripts/mkuboot.sh
 
 ifneq ($(MACHINE),)
--include $(srctree)/$(MACHINE)/Makefile.boot
+include $(srctree)/$(MACHINE)/Makefile.boot
 endif
 
 # Note: the following conditions must always be true:
+#   ZRELADDR == virt_to_phys(PAGE_OFFSET + TEXT_OFFSET)
 #   PARAMS_PHYS must be within 4MB of ZRELADDR
 #   INITRD_PHYS must be in RAM
+ZRELADDR    := $(zreladdr-y)
 PARAMS_PHYS := $(params_phys-y)
 INITRD_PHYS := $(initrd_phys-y)
 
-export INITRD_PHYS PARAMS_PHYS
+export ZRELADDR INITRD_PHYS PARAMS_PHYS
 
 targets := Image zImage xipImage bootpImage uImage
 
@@ -65,7 +67,7 @@ quiet_cmd_uimage = UIMAGE  $@
 ifeq ($(CONFIG_ZBOOT_ROM),y)
 $(obj)/uImage: LOADADDR=$(CONFIG_ZBOOT_ROM_TEXT)
 else
-$(obj)/uImage: LOADADDR=$(CONFIG_ZRELADDR)
+$(obj)/uImage: LOADADDR=$(ZRELADDR)
 endif
 
 ifeq ($(CONFIG_THUMB2_KERNEL),y)
index 68775e33476c2fafb4c20d88f7f676c836a8edc1..65a7c1c588a94ab4623be0ddfe02a691fb2954c6 100644 (file)
@@ -79,6 +79,10 @@ endif
 EXTRA_CFLAGS  := -fpic -fno-builtin
 EXTRA_AFLAGS  := -Wa,-march=all
 
+# Supply ZRELADDR to the decompressor via a linker symbol.
+ifneq ($(CONFIG_AUTO_ZRELADDR),y)
+LDFLAGS_vmlinux := --defsym zreladdr=$(ZRELADDR)
+endif
 ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
 LDFLAGS_vmlinux += --be8
 endif
@@ -112,5 +116,5 @@ CFLAGS_font.o := -Dstatic=
 $(obj)/font.c: $(FONTC)
        $(call cmd,shipped)
 
-$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile .config
+$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG)
        @sed "$(SEDFLAGS)" < $< > $@
index 6af9907c3b5ccad2ae2d73e37f5470c5b2f6b897..6825c34646d4e02f24b0eefe7ab4e012bca05208 100644 (file)
@@ -177,7 +177,7 @@ not_angel:
                and     r4, pc, #0xf8000000
                add     r4, r4, #TEXT_OFFSET
 #else
-               ldr     r4, =CONFIG_ZRELADDR
+               ldr     r4, =zreladdr
 #endif
                subs    r0, r0, r1              @ calculate the delta offset
 
index 6c091356245593b87860d2ccb6221650fc62855b..1bec96e851967101df7a796745b84d24bd320ab8 100644 (file)
@@ -263,6 +263,22 @@ static int it8152_pci_platform_notify_remove(struct device *dev)
        return 0;
 }
 
+int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
+{
+       dev_dbg(dev, "%s: dma_addr %08x, size %08x\n",
+               __func__, dma_addr, size);
+       return (dev->bus == &pci_bus_type) &&
+               ((dma_addr + size - PHYS_OFFSET) >= SZ_64M);
+}
+
+int dma_set_coherent_mask(struct device *dev, u64 mask)
+{
+       if (mask >= PHYS_OFFSET + SZ_64M - 1)
+               return 0;
+
+       return -EIO;
+}
+
 int __init it8152_pci_setup(int nr, struct pci_sys_data *sys)
 {
        it8152_io.start = IT8152_IO_BASE + 0x12000;
index c226fe10553e2952ec982ef3ec5fcaf8538586e0..c568da7dcae45e60e8630e2b3060599f561d6555 100644 (file)
@@ -288,15 +288,7 @@ extern void dmabounce_unregister_dev(struct device *);
  * DMA access and 1 if the buffer needs to be bounced.
  *
  */
-#ifdef CONFIG_SA1111
 extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
-#else
-static inline int dma_needs_bounce(struct device *dev, dma_addr_t addr,
-                                  size_t size)
-{
-       return 0;
-}
-#endif
 
 /*
  * The DMA API, implemented by dmabounce.c.  See below for descriptions.
index 48837e6d888722dc96f594247a80026bf9b75e29..b5799a3b7117d5480eec6456008f79e01d1064ae 100644 (file)
@@ -17,7 +17,7 @@
  * counter interrupts are regular interrupts and not an NMI. This
  * means that when we receive the interrupt we can call
  * perf_event_do_pending() that handles all of the work with
- * interrupts enabled.
+ * interrupts disabled.
  */
 static inline void
 set_perf_event_pending(void)
index ab68cf1ef80fe7ccad28bfdd5d5084bb6f1b61dc..e90b167ea8484002fffb0121e7a2d61726851fe6 100644 (file)
@@ -317,6 +317,10 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
 #define pgprot_dmacoherent(prot) \
        __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE)
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+                                    unsigned long size, pgprot_t vma_prot);
 #else
 #define pgprot_dmacoherent(prot) \
        __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)
index d02cfb683487eeafea4ef407a1a4e6f2d4ce4112..c891eb76c0e313406847e7b9fbe968bb1b8fa459 100644 (file)
 #define __NR_perf_event_open           (__NR_SYSCALL_BASE+364)
 #define __NR_recvmmsg                  (__NR_SYSCALL_BASE+365)
 #define __NR_accept4                   (__NR_SYSCALL_BASE+366)
+#define __NR_fanotify_init             (__NR_SYSCALL_BASE+367)
+#define __NR_fanotify_mark             (__NR_SYSCALL_BASE+368)
+#define __NR_prlimit64                 (__NR_SYSCALL_BASE+369)
 
 /*
  * The following SWIs are ARM private.
index afeb71fa72cb81fc0e2fb5652c653ef34e7258bb..5c26eccef9982665b1e1672416b9bc996f3b2dae 100644 (file)
                CALL(sys_perf_event_open)
 /* 365 */      CALL(sys_recvmmsg)
                CALL(sys_accept4)
+               CALL(sys_fanotify_init)
+               CALL(sys_fanotify_mark)
+               CALL(sys_prlimit64)
 #ifndef syscalls_counted
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 #define syscalls_counted
index f05a35a59694dc7af54a76ffb899e02f8b849875..7885722bdf4eff7115137ac46444c8d176f05b57 100644 (file)
@@ -48,6 +48,8 @@ work_pending:
        beq     no_work_pending
        mov     r0, sp                          @ 'regs'
        mov     r2, why                         @ 'syscall'
+       tst     r1, #_TIF_SIGPENDING            @ delivering a signal?
+       movne   why, #0                         @ prevent further restarts
        bl      do_notify_resume
        b       ret_slow_syscall                @ Check work again
 
@@ -418,11 +420,13 @@ ENDPROC(sys_clone_wrapper)
 
 sys_sigreturn_wrapper:
                add     r0, sp, #S_OFF
+               mov     why, #0         @ prevent syscall restart handling
                b       sys_sigreturn
 ENDPROC(sys_sigreturn_wrapper)
 
 sys_rt_sigreturn_wrapper:
                add     r0, sp, #S_OFF
+               mov     why, #0         @ prevent syscall restart handling
                b       sys_rt_sigreturn
 ENDPROC(sys_rt_sigreturn_wrapper)
 
index 417c392ddf1cb55066fa5f99e83e77514bd89901..ecbb0288e5dd95c80b420635dffee6ef600f86a8 100644 (file)
@@ -319,8 +319,8 @@ validate_event(struct cpu_hw_events *cpuc,
 {
        struct hw_perf_event fake_event = event->hw;
 
-       if (event->pmu && event->pmu != &pmu)
-               return 0;
+       if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
+               return 1;
 
        return armpmu->get_event_idx(cpuc, &fake_event) >= 0;
 }
@@ -1041,8 +1041,8 @@ armv6pmu_handle_irq(int irq_num,
        /*
         * Handle the pending perf events.
         *
-        * Note: this call *must* be run with interrupts enabled. For
-        * platforms that can have the PMU interrupts raised as a PMI, this
+        * Note: this call *must* be run with interrupts disabled. For
+        * platforms that can have the PMU interrupts raised as an NMI, this
         * will not work.
         */
        perf_event_do_pending();
@@ -2017,8 +2017,8 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
        /*
         * Handle the pending perf events.
         *
-        * Note: this call *must* be run with interrupts enabled. For
-        * platforms that can have the PMU interrupts raised as a PMI, this
+        * Note: this call *must* be run with interrupts disabled. For
+        * platforms that can have the PMU interrupts raised as an NMI, this
         * will not work.
         */
        perf_event_do_pending();
index 753c0d31a3d3f0b407cfe20b68f5ba66189cd1a7..c67b47f1c0fd805751cf226a65315337a2507942 100644 (file)
@@ -121,8 +121,8 @@ static struct clk ssc1_clk = {
        .pmc_mask       = 1 << AT91SAM9G45_ID_SSC1,
        .type           = CLK_TYPE_PERIPHERAL,
 };
-static struct clk tcb_clk = {
-       .name           = "tcb_clk",
+static struct clk tcb0_clk = {
+       .name           = "tcb0_clk",
        .pmc_mask       = 1 << AT91SAM9G45_ID_TCB,
        .type           = CLK_TYPE_PERIPHERAL,
 };
@@ -192,6 +192,14 @@ static struct clk ohci_clk = {
        .parent         = &uhphs_clk,
 };
 
+/* One additional fake clock for second TC block */
+static struct clk tcb1_clk = {
+       .name           = "tcb1_clk",
+       .pmc_mask       = 0,
+       .type           = CLK_TYPE_PERIPHERAL,
+       .parent         = &tcb0_clk,
+};
+
 static struct clk *periph_clocks[] __initdata = {
        &pioA_clk,
        &pioB_clk,
@@ -208,7 +216,7 @@ static struct clk *periph_clocks[] __initdata = {
        &spi1_clk,
        &ssc0_clk,
        &ssc1_clk,
-       &tcb_clk,
+       &tcb0_clk,
        &pwm_clk,
        &tsc_clk,
        &dma_clk,
@@ -221,6 +229,7 @@ static struct clk *periph_clocks[] __initdata = {
        &mmc1_clk,
        // irq0
        &ohci_clk,
+       &tcb1_clk,
 };
 
 /*
index 809114d5a5a6690ec3c4f510bb6cb88fc7fede99..1276babf84d540ad253d06eb089f29d57beab1a7 100644 (file)
@@ -46,7 +46,7 @@ static struct resource hdmac_resources[] = {
                .end    = AT91_BASE_SYS + AT91_DMA + SZ_512 - 1,
                .flags  = IORESOURCE_MEM,
        },
-       [2] = {
+       [1] = {
                .start  = AT91SAM9G45_ID_DMA,
                .end    = AT91SAM9G45_ID_DMA,
                .flags  = IORESOURCE_IRQ,
@@ -426,7 +426,7 @@ static struct i2c_gpio_platform_data pdata_i2c0 = {
        .sda_is_open_drain      = 1,
        .scl_pin                = AT91_PIN_PA21,
        .scl_is_open_drain      = 1,
-       .udelay                 = 2,            /* ~100 kHz */
+       .udelay                 = 5,            /* ~100 kHz */
 };
 
 static struct platform_device at91sam9g45_twi0_device = {
@@ -440,7 +440,7 @@ static struct i2c_gpio_platform_data pdata_i2c1 = {
        .sda_is_open_drain      = 1,
        .scl_pin                = AT91_PIN_PB11,
        .scl_is_open_drain      = 1,
-       .udelay                 = 2,            /* ~100 kHz */
+       .udelay                 = 5,            /* ~100 kHz */
 };
 
 static struct platform_device at91sam9g45_twi1_device = {
@@ -835,9 +835,9 @@ static struct platform_device at91sam9g45_tcb1_device = {
 static void __init at91_add_device_tc(void)
 {
        /* this chip has one clock and irq for all six TC channels */
-       at91_clock_associate("tcb_clk", &at91sam9g45_tcb0_device.dev, "t0_clk");
+       at91_clock_associate("tcb0_clk", &at91sam9g45_tcb0_device.dev, "t0_clk");
        platform_device_register(&at91sam9g45_tcb0_device);
-       at91_clock_associate("tcb_clk", &at91sam9g45_tcb1_device.dev, "t0_clk");
+       at91_clock_associate("tcb1_clk", &at91sam9g45_tcb1_device.dev, "t0_clk");
        platform_device_register(&at91sam9g45_tcb1_device);
 }
 #else
index c4c8865d52d7bfd84f5a22416ef7fe6bc1c472aa..65eb0943194f4b0ea4ea65613435ceef4d875c75 100644 (file)
@@ -93,11 +93,12 @@ static struct resource dm9000_resource[] = {
                .start  = AT91_PIN_PC11,
                .end    = AT91_PIN_PC11,
                .flags  = IORESOURCE_IRQ
+                       | IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE,
        }
 };
 
 static struct dm9000_plat_data dm9000_platdata = {
-       .flags          = DM9000_PLATF_16BITONLY,
+       .flags          = DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM,
 };
 
 static struct platform_device dm9000_device = {
@@ -167,17 +168,6 @@ static struct at91_udc_data __initdata ek_udc_data = {
 };
 
 
-/*
- * MCI (SD/MMC)
- */
-static struct at91_mmc_data __initdata ek_mmc_data = {
-       .wire4          = 1,
-//     .det_pin        = ... not connected
-//     .wp_pin         = ... not connected
-//     .vcc_pin        = ... not connected
-};
-
-
 /*
  * NAND flash
  */
@@ -246,6 +236,10 @@ static void __init ek_add_device_nand(void)
        at91_add_device_nand(&ek_nand_data);
 }
 
+/*
+ * SPI related devices
+ */
+#if defined(CONFIG_SPI_ATMEL) || defined(CONFIG_SPI_ATMEL_MODULE)
 
 /*
  * ADS7846 Touchscreen
@@ -356,6 +350,19 @@ static struct spi_board_info ek_spi_devices[] = {
 #endif
 };
 
+#else /* CONFIG_SPI_ATMEL_* */
+/* spi0 and mmc/sd share the same PIO pins: cannot be used at the same time */
+
+/*
+ * MCI (SD/MMC)
+ * det_pin, wp_pin and vcc_pin are not connected
+ */
+static struct at91_mmc_data __initdata ek_mmc_data = {
+       .wire4          = 1,
+};
+
+#endif /* CONFIG_SPI_ATMEL_* */
+
 
 /*
  * LCD Controller
index 7f7da439341fabc4e85b6febeb8b1f3e2cd6f4ee..7525cee3983f7252fac0542be5955a490ca6869b 100644 (file)
@@ -501,7 +501,8 @@ postcore_initcall(at91_clk_debugfs_init);
 int __init clk_register(struct clk *clk)
 {
        if (clk_is_peripheral(clk)) {
-               clk->parent = &mck;
+               if (!clk->parent)
+                       clk->parent = &mck;
                clk->mode = pmc_periph_mode;
                list_add_tail(&clk->node, &clocks);
        }
index 3d996b659ff41e43d794d93c167b409afb90c355..9be261beae7ddb2a72e2e9a76d9dbf2e2bf51316 100644 (file)
@@ -769,8 +769,7 @@ static struct map_desc dm355_io_desc[] = {
                .virtual        = SRAM_VIRT,
                .pfn            = __phys_to_pfn(0x00010000),
                .length         = SZ_32K,
-               /* MT_MEMORY_NONCACHED requires supersection alignment */
-               .type           = MT_DEVICE,
+               .type           = MT_MEMORY_NONCACHED,
        },
 };
 
index 6b6f4c643709c7d14a4b2baf168464ad772be4ce..7781e35daec3d0a7cf9e956aceb7e717f48c7c44 100644 (file)
@@ -969,8 +969,7 @@ static struct map_desc dm365_io_desc[] = {
                .virtual        = SRAM_VIRT,
                .pfn            = __phys_to_pfn(0x00010000),
                .length         = SZ_32K,
-               /* MT_MEMORY_NONCACHED requires supersection alignment */
-               .type           = MT_DEVICE,
+               .type           = MT_MEMORY_NONCACHED,
        },
 };
 
index 40fec315c99a192826d7530d43c6406726ecbcff..5e5b0a7831fbf2b7af156ba76bf4af0b6be26489 100644 (file)
@@ -653,8 +653,7 @@ static struct map_desc dm644x_io_desc[] = {
                .virtual        = SRAM_VIRT,
                .pfn            = __phys_to_pfn(0x00008000),
                .length         = SZ_16K,
-               /* MT_MEMORY_NONCACHED requires supersection alignment */
-               .type           = MT_DEVICE,
+               .type           = MT_MEMORY_NONCACHED,
        },
 };
 
index e4a3df1872aca89404899034ebbe920d453e7f66..26e8a9c7f50b4393f1e716575950fff967658b9a 100644 (file)
@@ -737,8 +737,7 @@ static struct map_desc dm646x_io_desc[] = {
                .virtual        = SRAM_VIRT,
                .pfn            = __phys_to_pfn(0x00010000),
                .length         = SZ_32K,
-               /* MT_MEMORY_NONCACHED requires supersection alignment */
-               .type           = MT_DEVICE,
+               .type           = MT_MEMORY_NONCACHED,
        },
 };
 
index 3b3e4721ce2ea0fb9236803e566d4c67b0d5638e..eb4936ff90ad9c42b283fc6c354eae1f01c6c4c7 100644 (file)
@@ -13,8 +13,8 @@
 
 #define IO_SPACE_LIMIT         0xffffffff
 
-#define __io(a)  ((void __iomem *)(((a) - DOVE_PCIE0_IO_PHYS_BASE) +\
-                                  DOVE_PCIE0_IO_VIRT_BASE))
-#define __mem_pci(a)           (a)
+#define __io(a)        ((void __iomem *)(((a) - DOVE_PCIE0_IO_BUS_BASE) + \
+                                                DOVE_PCIE0_IO_VIRT_BASE))
+#define __mem_pci(a)   (a)
 
 #endif
index 8bf3cec98cfadba46d8ca1816c7aff5b8a871bbf..4566bd1c8660b3fe7ac0cff28473746fd3d34c82 100644 (file)
@@ -560,4 +560,4 @@ static int __init ep93xx_clock_init(void)
        clkdev_add_table(clocks, ARRAY_SIZE(clocks));
        return 0;
 }
-arch_initcall(ep93xx_clock_init);
+postcore_initcall(ep93xx_clock_init);
index 61cd4d64b98596c7507dcaf67d069dbe22508e50..24498a932ba65b8054de5ba8229ef0e0c838dd71 100644 (file)
@@ -503,6 +503,14 @@ struct pci_bus * __devinit ixp4xx_scan_bus(int nr, struct pci_sys_data *sys)
        return pci_scan_bus(sys->busnr, &ixp4xx_ops, sys);
 }
 
+int dma_set_coherent_mask(struct device *dev, u64 mask)
+{
+       if (mask >= SZ_64M - 1)
+               return 0;
+
+       return -EIO;
+}
+
 EXPORT_SYMBOL(ixp4xx_pci_read);
 EXPORT_SYMBOL(ixp4xx_pci_write);
 
index f91ca6d4fbe8a2820758d2252091a761caac377c..8138371c406e6584b8990803079c7a093cbe1964 100644 (file)
@@ -26,6 +26,8 @@
 #define PCIBIOS_MAX_MEM                0x4BFFFFFF
 #endif
 
+#define ARCH_HAS_DMA_SET_COHERENT_MASK
+
 #define pcibios_assign_all_busses()    1
 
 /* Register locations and bits */
index 93fc2ec95e7687b30b6144f2116adb0664266a4d..6e924b398919e822ca4f2bddd294358bf4008ede 100644 (file)
@@ -38,7 +38,7 @@
 
 #define KIRKWOOD_PCIE1_IO_PHYS_BASE    0xf3000000
 #define KIRKWOOD_PCIE1_IO_VIRT_BASE    0xfef00000
-#define KIRKWOOD_PCIE1_IO_BUS_BASE     0x00000000
+#define KIRKWOOD_PCIE1_IO_BUS_BASE     0x00100000
 #define KIRKWOOD_PCIE1_IO_SIZE         SZ_1M
 
 #define KIRKWOOD_PCIE_IO_PHYS_BASE     0xf2000000
index 55e7f00836b7cdba68a9662729119e977904a2dd..513ad3102d7c192d2fdc00229b6cdb527b002d87 100644 (file)
@@ -117,7 +117,7 @@ static void __init pcie0_ioresources_init(struct pcie_port *pp)
         * IORESOURCE_IO
         */
        pp->res[0].name = "PCIe 0 I/O Space";
-       pp->res[0].start = KIRKWOOD_PCIE_IO_PHYS_BASE;
+       pp->res[0].start = KIRKWOOD_PCIE_IO_BUS_BASE;
        pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE_IO_SIZE - 1;
        pp->res[0].flags = IORESOURCE_IO;
 
@@ -139,7 +139,7 @@ static void __init pcie1_ioresources_init(struct pcie_port *pp)
         * IORESOURCE_IO
         */
        pp->res[0].name = "PCIe 1 I/O Space";
-       pp->res[0].start = KIRKWOOD_PCIE1_IO_PHYS_BASE;
+       pp->res[0].start = KIRKWOOD_PCIE1_IO_BUS_BASE;
        pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE1_IO_SIZE - 1;
        pp->res[0].flags = IORESOURCE_IO;
 
index 4f5b0e0ce6cf8f87e0e843b82ce2f8b0b4cbc34c..1a8a25edb1b422ace6925e07171953390b51bc11 100644 (file)
@@ -9,6 +9,8 @@
 #ifndef __ASM_MACH_SYSTEM_H
 #define __ASM_MACH_SYSTEM_H
 
+#include <mach/cputype.h>
+
 static inline void arch_idle(void)
 {
        cpu_do_idle();
@@ -16,6 +18,9 @@ static inline void arch_idle(void)
 
 static inline void arch_reset(char mode, const char *cmd)
 {
-       cpu_reset(0);
+       if (cpu_is_pxa168())
+               cpu_reset(0xffff0000);
+       else
+               cpu_reset(0);
 }
 #endif /* __ASM_MACH_SYSTEM_H */
index 91931dcb068997d540dd2a133001eed458cbc81b..4aaadc753d3e6ff4e60b88c17e62e5b3c0cfb812 100644 (file)
@@ -215,7 +215,7 @@ struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata = {
  * Add platform devices present on this baseboard and init
  * them from CPU side as far as required to use them later on
  */
-void __init eukrea_mbimxsd_baseboard_init(void)
+void __init eukrea_mbimxsd25_baseboard_init(void)
 {
        if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads,
                        ARRAY_SIZE(eukrea_mbimxsd_pads)))
index a5f0174290b4eaa0ae36a1769ae4c9cb32631d3c..e064bb3d69197b8ddee286eda06dee980052712d 100644 (file)
@@ -147,8 +147,8 @@ static void __init eukrea_cpuimx25_init(void)
        if (!otg_mode_host)
                mxc_register_device(&otg_udc_device, &otg_device_pdata);
 
-#ifdef CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD
-       eukrea_mbimxsd_baseboard_init();
+#ifdef CONFIG_MACH_EUKREA_MBIMXSD25_BASEBOARD
+       eukrea_mbimxsd25_baseboard_init();
 #endif
 }
 
index d3af0fdf8475f7ef0d67b3afbb080df739c36431..7a62e744a8b0fcbc5eef1da9645bf663199608f8 100644 (file)
@@ -155,7 +155,7 @@ static unsigned long get_rate_arm(void)
 
        aad = &clk_consumer[(pdr0 >> 16) & 0xf];
        if (aad->sel)
-               fref = fref * 2 / 3;
+               fref = fref * 3 / 4;
 
        return fref / aad->arm;
 }
@@ -164,7 +164,7 @@ static unsigned long get_rate_ahb(struct clk *clk)
 {
        unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0);
        struct arm_ahb_div *aad;
-       unsigned long fref = get_rate_mpll();
+       unsigned long fref = get_rate_arm();
 
        aad = &clk_consumer[(pdr0 >> 16) & 0xf];
 
@@ -176,16 +176,11 @@ static unsigned long get_rate_ipg(struct clk *clk)
        return get_rate_ahb(NULL) >> 1;
 }
 
-static unsigned long get_3_3_div(unsigned long in)
-{
-       return (((in >> 3) & 0x7) + 1) * ((in & 0x7) + 1);
-}
-
 static unsigned long get_rate_uart(struct clk *clk)
 {
        unsigned long pdr3 = __raw_readl(CCM_BASE + CCM_PDR3);
        unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4);
-       unsigned long div = get_3_3_div(pdr4 >> 10);
+       unsigned long div = ((pdr4 >> 10) & 0x3f) + 1;
 
        if (pdr3 & (1 << 14))
                return get_rate_arm() / div;
@@ -216,7 +211,7 @@ static unsigned long get_rate_sdhc(struct clk *clk)
                break;
        }
 
-       return rate / get_3_3_div(div);
+       return rate / (div + 1);
 }
 
 static unsigned long get_rate_mshc(struct clk *clk)
@@ -270,7 +265,7 @@ static unsigned long get_rate_csi(struct clk *clk)
        else
                rate = get_rate_ppll();
 
-       return rate / get_3_3_div((pdr2 >> 16) & 0x3f);
+       return rate / (((pdr2 >> 16) & 0x3f) + 1);
 }
 
 static unsigned long get_rate_otg(struct clk *clk)
@@ -283,25 +278,51 @@ static unsigned long get_rate_otg(struct clk *clk)
        else
                rate = get_rate_ppll();
 
-       return rate / get_3_3_div((pdr4 >> 22) & 0x3f);
+       return rate / (((pdr4 >> 22) & 0x3f) + 1);
 }
 
 static unsigned long get_rate_ipg_per(struct clk *clk)
 {
        unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0);
        unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4);
-       unsigned long div1, div2;
+       unsigned long div;
 
        if (pdr0 & (1 << 26)) {
-               div1 = (pdr4 >> 19) & 0x7;
-               div2 = (pdr4 >> 16) & 0x7;
-               return get_rate_arm() / ((div1 + 1) * (div2 + 1));
+               div = (pdr4 >> 16) & 0x3f;
+               return get_rate_arm() / (div + 1);
        } else {
-               div1 = (pdr0 >> 12) & 0x7;
-               return get_rate_ahb(NULL) / div1;
+               div = (pdr0 >> 12) & 0x7;
+               return get_rate_ahb(NULL) / (div + 1);
        }
 }
 
+static unsigned long get_rate_hsp(struct clk *clk)
+{
+       unsigned long hsp_podf = (__raw_readl(CCM_BASE + CCM_PDR0) >> 20) & 0x03;
+       unsigned long fref = get_rate_mpll();
+
+       if (fref > 400 * 1000 * 1000) {
+               switch (hsp_podf) {
+               case 0:
+                       return fref >> 2;
+               case 1:
+                       return fref >> 3;
+               case 2:
+                       return fref / 3;
+               }
+       } else {
+               switch (hsp_podf) {
+               case 0:
+               case 2:
+                       return fref / 3;
+               case 1:
+                       return fref / 6;
+               }
+       }
+
+       return 0;
+}
+
 static int clk_cgr_enable(struct clk *clk)
 {
        u32 reg;
@@ -359,7 +380,7 @@ DEFINE_CLOCK(i2c1_clk,   0, CCM_CGR1, 10, get_rate_ipg_per, NULL);
 DEFINE_CLOCK(i2c2_clk,   1, CCM_CGR1, 12, get_rate_ipg_per, NULL);
 DEFINE_CLOCK(i2c3_clk,   2, CCM_CGR1, 14, get_rate_ipg_per, NULL);
 DEFINE_CLOCK(iomuxc_clk, 0, CCM_CGR1, 16, NULL, NULL);
-DEFINE_CLOCK(ipu_clk,    0, CCM_CGR1, 18, get_rate_ahb, NULL);
+DEFINE_CLOCK(ipu_clk,    0, CCM_CGR1, 18, get_rate_hsp, NULL);
 DEFINE_CLOCK(kpp_clk,    0, CCM_CGR1, 20, get_rate_ipg, NULL);
 DEFINE_CLOCK(mlb_clk,    0, CCM_CGR1, 22, get_rate_ahb, NULL);
 DEFINE_CLOCK(mshc_clk,   0, CCM_CGR1, 24, get_rate_mshc, NULL);
@@ -485,10 +506,10 @@ static struct clk_lookup lookups[] = {
 
 int __init mx35_clocks_init()
 {
-       unsigned int ll = 0;
+       unsigned int cgr2 = 3 << 26, cgr3 = 0;
 
 #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
-       ll = (3 << 16);
+       cgr2 |= 3 << 16;
 #endif
 
        clkdev_add_table(lookups, ARRAY_SIZE(lookups));
@@ -499,8 +520,20 @@ int __init mx35_clocks_init()
        __raw_writel((3 << 18), CCM_BASE + CCM_CGR0);
        __raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16),
                        CCM_BASE + CCM_CGR1);
-       __raw_writel((3 << 26) | ll, CCM_BASE + CCM_CGR2);
-       __raw_writel(0, CCM_BASE + CCM_CGR3);
+
+       /*
+        * Check if we came up in internal boot mode. If yes, we need some
+        * extra clocks turned on, otherwise the MX35 boot ROM code will
+        * hang after a watchdog reset.
+        */
+       if (!(__raw_readl(CCM_BASE + CCM_RCSR) & (3 << 10))) {
+               /* Additionally turn on UART1, SCC, and IIM clocks */
+               cgr2 |= 3 << 16 | 3 << 4;
+               cgr3 |= 3 << 2;
+       }
+
+       __raw_writel(cgr2, CCM_BASE + CCM_CGR2);
+       __raw_writel(cgr3, CCM_BASE + CCM_CGR3);
 
        mxc_timer_init(&gpt_clk,
                        MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT);
index 1dc5004df866d4e9df7d817aca27e658573f8c6c..f8f15e3ac7a0e82a6cde9f6bd021c85a546bbae7 100644 (file)
@@ -216,7 +216,7 @@ struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata = {
  * Add platform devices present on this baseboard and init
  * them from CPU side as far as required to use them later on
  */
-void __init eukrea_mbimxsd_baseboard_init(void)
+void __init eukrea_mbimxsd35_baseboard_init(void)
 {
        if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads,
                        ARRAY_SIZE(eukrea_mbimxsd_pads)))
index 9770a6a973be561fdfb67cfdda1f8cdf36381e8e..2a4f8b781ba4c60a4d66f554d860953fcafe1cf0 100644 (file)
@@ -201,8 +201,8 @@ static void __init mxc_board_init(void)
        if (!otg_mode_host)
                mxc_register_device(&mxc_otg_udc_device, &otg_device_pdata);
 
-#ifdef CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD
-       eukrea_mbimxsd_baseboard_init();
+#ifdef CONFIG_MACH_EUKREA_MBIMXSD35_BASEBOARD
+       eukrea_mbimxsd35_baseboard_init();
 #endif
 }
 
index 6af69def357f92d2f177d19d8fc7bce330ff5666..57c10a9926cc5056668645ff39fe3ac07fed9969 100644 (file)
@@ -56,7 +56,7 @@ static void _clk_ccgr_disable(struct clk *clk)
 {
        u32 reg;
        reg = __raw_readl(clk->enable_reg);
-       reg &= ~(MXC_CCM_CCGRx_MOD_OFF << clk->enable_shift);
+       reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift);
        __raw_writel(reg, clk->enable_reg);
 
 }
index 268a9bc6be8a22a4ca0ac4fed742d035753581a3..58093d9e07be44ea482df319c88acb78c0db3975 100644 (file)
@@ -312,8 +312,7 @@ static int pxa_set_target(struct cpufreq_policy *policy,
        freqs.cpu = policy->cpu;
 
        if (freq_debug)
-               pr_debug(KERN_INFO "Changing CPU frequency to %d Mhz, "
-                        "(SDRAM %d Mhz)\n",
+               pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n",
                         freqs.new / 1000, (pxa_freq_settings[idx].div2) ?
                         (new_freq_mem / 2000) : (new_freq_mem / 1000));
 
@@ -398,7 +397,7 @@ static int pxa_set_target(struct cpufreq_policy *policy,
        return 0;
 }
 
-static __init int pxa_cpufreq_init(struct cpufreq_policy *policy)
+static int pxa_cpufreq_init(struct cpufreq_policy *policy)
 {
        int i;
        unsigned int freq;
index 27fa329d9a8b7a5677c2cf75e25900797130eb46..0a0d0fe99220d7f450e61dc04495d8dfe4492be3 100644 (file)
@@ -204,7 +204,7 @@ static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy,
        return 0;
 }
 
-static __init int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
+static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
 {
        int ret = -EINVAL;
 
index 7f64d24cd5648df0790c6ec570322195d92359e5..814f1458a06a52ec5af6b37643ca4b6db602dd55 100644 (file)
  * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x
  * == 0x3 for pxa300/pxa310/pxa320
  */
+#if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x)
 #define __cpu_is_pxa2xx(id)                            \
        ({                                              \
                unsigned int _id = (id) >> 13 & 0x7;    \
                _id <= 0x2;                             \
         })
+#else
+#define __cpu_is_pxa2xx(id)    (0)
+#endif
 
+#ifdef CONFIG_PXA3xx
 #define __cpu_is_pxa3xx(id)                            \
        ({                                              \
                unsigned int _id = (id) >> 13 & 0x7;    \
                _id == 0x3;                             \
         })
+#else
+#define __cpu_is_pxa3xx(id)    (0)
+#endif
 
+#if defined(CONFIG_CPU_PXA930) || defined(CONFIG_CPU_PXA935)
 #define __cpu_is_pxa93x(id)                            \
        ({                                              \
                unsigned int _id = (id) >> 4 & 0xfff;   \
                _id == 0x683 || _id == 0x693;           \
         })
+#else
+#define __cpu_is_pxa93x(id)    (0)
+#endif
 
 #define cpu_is_pxa2xx()                                        \
        ({                                              \
@@ -309,7 +321,7 @@ extern unsigned long get_clock_tick_rate(void);
 #define PCIBIOS_MIN_IO         0
 #define PCIBIOS_MIN_MEM                0
 #define pcibios_assign_all_busses()    1
+#define ARCH_HAS_DMA_SET_COHERENT_MASK
 #endif
 
-
 #endif  /* _ASM_ARCH_HARDWARE_H */
index 262691fb97d86c2ce36de96d7e180a35b4906298..fdca3be47d9bb1fdaa70d8b7b9c112c34b32c495 100644 (file)
@@ -6,6 +6,8 @@
 #ifndef __ASM_ARM_ARCH_IO_H
 #define __ASM_ARM_ARCH_IO_H
 
+#include <mach/hardware.h>
+
 #define IO_SPACE_LIMIT 0xffffffff
 
 /*
index 7139e0dc26d16062304bd72beca1f339e61899e7..4e1287070d219c32235aebadfbf2d97df3d059fb 100644 (file)
 #define GPIO46_CI_DD_7         MFP_CFG_DRV(GPIO46, AF0, DS04X)
 #define GPIO47_CI_DD_8         MFP_CFG_DRV(GPIO47, AF1, DS04X)
 #define GPIO48_CI_DD_9         MFP_CFG_DRV(GPIO48, AF1, DS04X)
-#define GPIO52_CI_HSYNC                MFP_CFG_DRV(GPIO52, AF0, DS04X)
-#define GPIO51_CI_VSYNC                MFP_CFG_DRV(GPIO51, AF0, DS04X)
 #define GPIO49_CI_MCLK         MFP_CFG_DRV(GPIO49, AF0, DS04X)
 #define GPIO50_CI_PCLK         MFP_CFG_DRV(GPIO50, AF0, DS04X)
+#define GPIO51_CI_HSYNC                MFP_CFG_DRV(GPIO51, AF0, DS04X)
+#define GPIO52_CI_VSYNC                MFP_CFG_DRV(GPIO52, AF0, DS04X)
 
 /* KEYPAD */
 #define GPIO3_KP_DKIN_6                MFP_CFG_LPM(GPIO3,   AF2, FLOAT)
index 77ad6d34ab5bc9cd0eb5ce2917a461ce8904c55c..405b92a29793d80fa771546418d78b4bec7094cd 100644 (file)
@@ -469,9 +469,13 @@ static struct i2c_board_info __initdata palm27x_pi2c_board_info[] = {
        },
 };
 
+static struct i2c_pxa_platform_data palm27x_i2c_power_info = {
+       .use_pio        = 1,
+};
+
 void __init palm27x_pmic_init(void)
 {
        i2c_register_board_info(1, ARRAY_AND_SIZE(palm27x_pi2c_board_info));
-       pxa27x_set_i2c_power_info(NULL);
+       pxa27x_set_i2c_power_info(&palm27x_i2c_power_info);
 }
 #endif
index c9b747cedea8fb48a4a9615d52394fa5d2e77fcc..37d6173bbb660868a3cb696cedcaab97b9eee66d 100644 (file)
@@ -240,6 +240,7 @@ static void __init vpac270_onenand_init(void) {}
 #if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE)
 static struct pxamci_platform_data vpac270_mci_platform_data = {
        .ocr_mask               = MMC_VDD_32_33 | MMC_VDD_33_34,
+       .gpio_power             = -1,
        .gpio_card_detect       = GPIO53_VPAC270_SD_DETECT_N,
        .gpio_card_ro           = GPIO52_VPAC270_SD_READONLY,
        .detect_delay_ms        = 200,
index a492b982aa062705a0c24d03dd696b020719b262..405e621289172512bbf01527d9061717a95d55d6 100644 (file)
 #include <mach/map.h>
 #include <mach/gpio-bank-c.h>
 #include <mach/spi-clocks.h>
+#include <mach/irqs.h>
 
 #include <plat/s3c64xx-spi.h>
 #include <plat/gpio-cfg.h>
-#include <plat/irqs.h>
+#include <plat/devs.h>
 
 static char *spi_src_clks[] = {
        [S3C64XX_SPI_SRCCLK_PCLK] = "pclk",
index 5c07d013b23da47dc387a436a2cedbeea2c826b7..e130379ba0e8df2752dfd5e4cae25e25b5dc6630 100644 (file)
 #include <plat/devs.h>
 #include <plat/regs-serial.h>
 
-#define UCON S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK
-#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB
-#define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE
+#define UCON (S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK)
+#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB)
+#define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE)
 
 static struct s3c2410_uartcfg real6410_uartcfgs[] __initdata = {
        [0] = {
-               .hwport      = 0,
-               .flags       = 0,
-               .ucon        = UCON,
-               .ulcon       = ULCON,
-               .ufcon       = UFCON,
+               .hwport = 0,
+               .flags  = 0,
+               .ucon   = UCON,
+               .ulcon  = ULCON,
+               .ufcon  = UFCON,
        },
        [1] = {
-               .hwport      = 1,
-               .flags       = 0,
-               .ucon        = UCON,
-               .ulcon       = ULCON,
-               .ufcon       = UFCON,
+               .hwport = 1,
+               .flags  = 0,
+               .ucon   = UCON,
+               .ulcon  = ULCON,
+               .ufcon  = UFCON,
        },
        [2] = {
-               .hwport      = 2,
-               .flags       = 0,
-               .ucon        = UCON,
-               .ulcon       = ULCON,
-               .ufcon       = UFCON,
+               .hwport = 2,
+               .flags  = 0,
+               .ucon   = UCON,
+               .ulcon  = ULCON,
+               .ufcon  = UFCON,
        },
        [3] = {
-               .hwport      = 3,
-               .flags       = 0,
-               .ucon        = UCON,
-               .ulcon       = ULCON,
-               .ufcon       = UFCON,
+               .hwport = 3,
+               .flags  = 0,
+               .ucon   = UCON,
+               .ulcon  = ULCON,
+               .ufcon  = UFCON,
        },
 };
 
 /* DM9000AEP 10/100 ethernet controller */
 
 static struct resource real6410_dm9k_resource[] = {
-        [0] = {
-                .start = S3C64XX_PA_XM0CSN1,
-                .end   = S3C64XX_PA_XM0CSN1 + 1,
-                .flags = IORESOURCE_MEM
-        },
-        [1] = {
-                .start = S3C64XX_PA_XM0CSN1 + 4,
-                .end   = S3C64XX_PA_XM0CSN1 + 5,
-                .flags = IORESOURCE_MEM
-        },
-        [2] = {
-                .start = S3C_EINT(7),
-                .end   = S3C_EINT(7),
-                .flags = IORESOURCE_IRQ,
-        }
+       [0] = {
+               .start  = S3C64XX_PA_XM0CSN1,
+               .end    = S3C64XX_PA_XM0CSN1 + 1,
+               .flags  = IORESOURCE_MEM
+       },
+       [1] = {
+               .start  = S3C64XX_PA_XM0CSN1 + 4,
+               .end    = S3C64XX_PA_XM0CSN1 + 5,
+               .flags  = IORESOURCE_MEM
+       },
+       [2] = {
+               .start  = S3C_EINT(7),
+               .end    = S3C_EINT(7),
+               .flags  = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL
+       }
 };
 
 static struct dm9000_plat_data real6410_dm9k_pdata = {
-        .flags          = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM),
+       .flags          = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM),
 };
 
 static struct platform_device real6410_device_eth = {
-        .name           = "dm9000",
-        .id             = -1,
-        .num_resources  = ARRAY_SIZE(real6410_dm9k_resource),
-        .resource       = real6410_dm9k_resource,
-        .dev            = {
-                .platform_data  = &real6410_dm9k_pdata,
-        },
+       .name           = "dm9000",
+       .id             = -1,
+       .num_resources  = ARRAY_SIZE(real6410_dm9k_resource),
+       .resource       = real6410_dm9k_resource,
+       .dev            = {
+               .platform_data  = &real6410_dm9k_pdata,
+       },
 };
 
 static struct platform_device *real6410_devices[] __initdata = {
@@ -129,12 +129,12 @@ static void __init real6410_machine_init(void)
        /* set timing for nCS1 suitable for ethernet chip */
 
        __raw_writel((0 << S3C64XX_SROM_BCX__PMC__SHIFT) |
-                       (6 << S3C64XX_SROM_BCX__TACP__SHIFT) |
-                       (4 << S3C64XX_SROM_BCX__TCAH__SHIFT) |
-                       (1 << S3C64XX_SROM_BCX__TCOH__SHIFT) |
-                       (13 << S3C64XX_SROM_BCX__TACC__SHIFT) |
-                       (4 << S3C64XX_SROM_BCX__TCOS__SHIFT) |
-                       (0 << S3C64XX_SROM_BCX__TACS__SHIFT), S3C64XX_SROM_BC1);
+               (6 << S3C64XX_SROM_BCX__TACP__SHIFT) |
+               (4 << S3C64XX_SROM_BCX__TCAH__SHIFT) |
+               (1 << S3C64XX_SROM_BCX__TCOH__SHIFT) |
+               (13 << S3C64XX_SROM_BCX__TACC__SHIFT) |
+               (4 << S3C64XX_SROM_BCX__TCOS__SHIFT) |
+               (0 << S3C64XX_SROM_BCX__TACS__SHIFT), S3C64XX_SROM_BC1);
 
        platform_add_devices(real6410_devices, ARRAY_SIZE(real6410_devices));
 }
index af91fefef2c6c77899c4f96ffc9bed03cacee633..cfecd70657cb62e51d01ad820ce2f7856957f9fc 100644 (file)
@@ -280,6 +280,24 @@ static struct clk init_clocks_disable[] = {
                .parent         = &clk_hclk_dsys.clk,
                .enable         = s5pv210_clk_ip0_ctrl,
                .ctrlbit        = (1<<29),
+       }, {
+               .name           = "fimc",
+               .id             = 0,
+               .parent         = &clk_hclk_dsys.clk,
+               .enable         = s5pv210_clk_ip0_ctrl,
+               .ctrlbit        = (1 << 24),
+       }, {
+               .name           = "fimc",
+               .id             = 1,
+               .parent         = &clk_hclk_dsys.clk,
+               .enable         = s5pv210_clk_ip0_ctrl,
+               .ctrlbit        = (1 << 25),
+       }, {
+               .name           = "fimc",
+               .id             = 2,
+               .parent         = &clk_hclk_dsys.clk,
+               .enable         = s5pv210_clk_ip0_ctrl,
+               .ctrlbit        = (1 << 26),
        }, {
                .name           = "otg",
                .id             = -1,
@@ -357,7 +375,7 @@ static struct clk init_clocks_disable[] = {
                .id             = 1,
                .parent         = &clk_pclk_psys.clk,
                .enable         = s5pv210_clk_ip3_ctrl,
-               .ctrlbit        = (1<<8),
+               .ctrlbit        = (1 << 10),
        }, {
                .name           = "i2c",
                .id             = 2,
index b9f4d677cf5541460747a5086fb25f5fbcea744e..77f456c91ad36ba97001630613f9c75d1d335819 100644 (file)
@@ -47,7 +47,7 @@ static struct map_desc s5pv210_iodesc[] __initdata = {
        {
                .virtual        = (unsigned long)S5P_VA_SYSTIMER,
                .pfn            = __phys_to_pfn(S5PV210_PA_SYSTIMER),
-               .length         = SZ_1M,
+               .length         = SZ_4K,
                .type           = MT_DEVICE,
        }, {
                .virtual        = (unsigned long)VA_VIC2,
index 5e16b4c692222a4a45d5728cec5b4970cae495e0..ae416fe7daf2e61b09f683c9be36ebc57c105c62 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 # Common objects
-obj-y                          := timer.o console.o clock.o
+obj-y                          := timer.o console.o clock.o pm_runtime.o
 
 # CPU objects
 obj-$(CONFIG_ARCH_SH7367)      += setup-sh7367.o clock-sh7367.o intc-sh7367.o
index 23d472f9525e6a160c97cbf8adc21c505819fb05..95935c83c30654ee94d301e94086680475a6ed67 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/platform_device.h>
 #include <linux/delay.h>
 #include <linux/mfd/sh_mobile_sdhi.h>
+#include <linux/mfd/tmio.h>
 #include <linux/mmc/host.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
@@ -39,6 +40,7 @@
 #include <linux/sh_clk.h>
 #include <linux/gpio.h>
 #include <linux/input.h>
+#include <linux/leds.h>
 #include <linux/input/sh_keysc.h>
 #include <linux/usb/r8a66597.h>
 
@@ -307,6 +309,7 @@ static struct sh_mobile_sdhi_info sdhi1_info = {
        .dma_slave_tx   = SHDMA_SLAVE_SDHI1_TX,
        .dma_slave_rx   = SHDMA_SLAVE_SDHI1_RX,
        .tmio_ocr_mask  = MMC_VDD_165_195,
+       .tmio_flags     = TMIO_MMC_WRPROTECT_DISABLE,
 };
 
 static struct resource sdhi1_resources[] = {
@@ -558,7 +561,7 @@ static struct resource fsi_resources[] = {
 
 static struct platform_device fsi_device = {
        .name           = "sh_fsi2",
-       .id             = 0,
+       .id             = -1,
        .num_resources  = ARRAY_SIZE(fsi_resources),
        .resource       = fsi_resources,
        .dev    = {
@@ -650,7 +653,44 @@ static struct platform_device hdmi_device = {
        },
 };
 
+static struct gpio_led ap4evb_leds[] = {
+       {
+               .name                   = "led4",
+               .gpio                   = GPIO_PORT185,
+               .default_state  = LEDS_GPIO_DEFSTATE_ON,
+       },
+       {
+               .name                   = "led2",
+               .gpio                   = GPIO_PORT186,
+               .default_state  = LEDS_GPIO_DEFSTATE_ON,
+       },
+       {
+               .name                   = "led3",
+               .gpio                   = GPIO_PORT187,
+               .default_state  = LEDS_GPIO_DEFSTATE_ON,
+       },
+       {
+               .name                   = "led1",
+               .gpio                   = GPIO_PORT188,
+               .default_state  = LEDS_GPIO_DEFSTATE_ON,
+       }
+};
+
+static struct gpio_led_platform_data ap4evb_leds_pdata = {
+       .num_leds = ARRAY_SIZE(ap4evb_leds),
+       .leds = ap4evb_leds,
+};
+
+static struct platform_device leds_device = {
+       .name = "leds-gpio",
+       .id = 0,
+       .dev = {
+               .platform_data  = &ap4evb_leds_pdata,
+       },
+};
+
 static struct platform_device *ap4evb_devices[] __initdata = {
+       &leds_device,
        &nor_flash_device,
        &smc911x_device,
        &sdhi0_device,
@@ -840,20 +880,6 @@ static void __init ap4evb_init(void)
        gpio_request(GPIO_FN_CS5A,      NULL);
        gpio_request(GPIO_FN_IRQ6_39,   NULL);
 
-       /* enable LED 1 - 4 */
-       gpio_request(GPIO_PORT185, NULL);
-       gpio_request(GPIO_PORT186, NULL);
-       gpio_request(GPIO_PORT187, NULL);
-       gpio_request(GPIO_PORT188, NULL);
-       gpio_direction_output(GPIO_PORT185, 1);
-       gpio_direction_output(GPIO_PORT186, 1);
-       gpio_direction_output(GPIO_PORT187, 1);
-       gpio_direction_output(GPIO_PORT188, 1);
-       gpio_export(GPIO_PORT185, 0);
-       gpio_export(GPIO_PORT186, 0);
-       gpio_export(GPIO_PORT187, 0);
-       gpio_export(GPIO_PORT188, 0);
-
        /* enable Debug switch (S6) */
        gpio_request(GPIO_PORT32, NULL);
        gpio_request(GPIO_PORT33, NULL);
index fb4e9b1d788e464922ba2345d60fb43b8e1173d2..759468992ad287ff3f40b2f2e92e19d99734c94a 100644 (file)
@@ -286,7 +286,6 @@ static struct clk_ops pllc2_clk_ops = {
 
 struct clk pllc2_clk = {
        .ops            = &pllc2_clk_ops,
-       .flags          = CLK_ENABLE_ON_INIT,
        .parent         = &extal1_div2_clk,
        .freq_table     = pllc2_freq_table,
        .parent_table   = pllc2_parent,
@@ -395,7 +394,7 @@ static struct clk div6_reparent_clks[DIV6_REPARENT_NR] = {
 
 enum { MSTP001,
        MSTP131, MSTP130,
-       MSTP129, MSTP128,
+       MSTP129, MSTP128, MSTP127, MSTP126,
        MSTP118, MSTP117, MSTP116,
        MSTP106, MSTP101, MSTP100,
        MSTP223,
@@ -413,6 +412,8 @@ static struct clk mstp_clks[MSTP_NR] = {
        [MSTP130] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 30, 0), /* VEU2 */
        [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* VEU1 */
        [MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* VEU0 */
+       [MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU */
+       [MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2 */
        [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX */
        [MSTP117] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */
        [MSTP116] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */
@@ -428,7 +429,7 @@ static struct clk mstp_clks[MSTP_NR] = {
        [MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */
        [MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */
        [MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
-       [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, CLK_ENABLE_ON_INIT), /* FSIA */
+       [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSIA */
        [MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */
        [MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */
        [MSTP314] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 14, 0), /* SDHI0 */
@@ -498,6 +499,8 @@ static struct clk_lookup lookups[] = {
        CLKDEV_DEV_ID("uio_pdrv_genirq.3", &mstp_clks[MSTP130]), /* VEU2 */
        CLKDEV_DEV_ID("uio_pdrv_genirq.2", &mstp_clks[MSTP129]), /* VEU1 */
        CLKDEV_DEV_ID("uio_pdrv_genirq.1", &mstp_clks[MSTP128]), /* VEU0 */
+       CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU */
+       CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2 */
        CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */
        CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1", &mstp_clks[MSTP117]), /* LCDC1 */
        CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* IIC0 */
index b7c705a213a2a1400e180df83d9e5a6f67649db4..6b7c7c42bc8fc529678fe7e77d01878299d049c4 100644 (file)
@@ -1,8 +1,10 @@
 /*
- * SH-Mobile Timer
+ * SH-Mobile Clock Framework
  *
  * Copyright (C) 2010  Magnus Damm
  *
+ * Used together with arch/arm/common/clkdev.c and drivers/sh/clk.c.
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; version 2 of the License.
diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c
new file mode 100644 (file)
index 0000000..94912d3
--- /dev/null
@@ -0,0 +1,169 @@
+/*
+ * arch/arm/mach-shmobile/pm_runtime.c
+ *
+ * Runtime PM support code for SuperH Mobile ARM
+ *
+ *  Copyright (C) 2009-2010 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/sh_clk.h>
+#include <linux/bitmap.h>
+
+#ifdef CONFIG_PM_RUNTIME
+#define BIT_ONCE 0
+#define BIT_ACTIVE 1
+#define BIT_CLK_ENABLED 2
+
+struct pm_runtime_data {
+       unsigned long flags;
+       struct clk *clk;
+};
+
+static void __devres_release(struct device *dev, void *res)
+{
+       struct pm_runtime_data *prd = res;
+
+       dev_dbg(dev, "__devres_release()\n");
+
+       if (test_bit(BIT_CLK_ENABLED, &prd->flags))
+               clk_disable(prd->clk);
+
+       if (test_bit(BIT_ACTIVE, &prd->flags))
+               clk_put(prd->clk);
+}
+
+static struct pm_runtime_data *__to_prd(struct device *dev)
+{
+       return devres_find(dev, __devres_release, NULL, NULL);
+}
+
+static void platform_pm_runtime_init(struct device *dev,
+                                    struct pm_runtime_data *prd)
+{
+       if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) {
+               prd->clk = clk_get(dev, NULL);
+               if (!IS_ERR(prd->clk)) {
+                       set_bit(BIT_ACTIVE, &prd->flags);
+                       dev_info(dev, "clocks managed by runtime pm\n");
+               }
+       }
+}
+
+static void platform_pm_runtime_bug(struct device *dev,
+                                   struct pm_runtime_data *prd)
+{
+       if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags))
+               dev_err(dev, "runtime pm suspend before resume\n");
+}
+
+int platform_pm_runtime_suspend(struct device *dev)
+{
+       struct pm_runtime_data *prd = __to_prd(dev);
+
+       dev_dbg(dev, "platform_pm_runtime_suspend()\n");
+
+       platform_pm_runtime_bug(dev, prd);
+
+       if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
+               clk_disable(prd->clk);
+               clear_bit(BIT_CLK_ENABLED, &prd->flags);
+       }
+
+       return 0;
+}
+
+int platform_pm_runtime_resume(struct device *dev)
+{
+       struct pm_runtime_data *prd = __to_prd(dev);
+
+       dev_dbg(dev, "platform_pm_runtime_resume()\n");
+
+       platform_pm_runtime_init(dev, prd);
+
+       if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
+               clk_enable(prd->clk);
+               set_bit(BIT_CLK_ENABLED, &prd->flags);
+       }
+
+       return 0;
+}
+
+int platform_pm_runtime_idle(struct device *dev)
+{
+       /* suspend synchronously to disable clocks immediately */
+       return pm_runtime_suspend(dev);
+}
+
+static int platform_bus_notify(struct notifier_block *nb,
+                              unsigned long action, void *data)
+{
+       struct device *dev = data;
+       struct pm_runtime_data *prd;
+
+       dev_dbg(dev, "platform_bus_notify() %ld !\n", action);
+
+       if (action == BUS_NOTIFY_BIND_DRIVER) {
+               prd = devres_alloc(__devres_release, sizeof(*prd), GFP_KERNEL);
+               if (prd)
+                       devres_add(dev, prd);
+               else
+                       dev_err(dev, "unable to alloc memory for runtime pm\n");
+       }
+
+       return 0;
+}
+
+#else /* CONFIG_PM_RUNTIME */
+
+static int platform_bus_notify(struct notifier_block *nb,
+                              unsigned long action, void *data)
+{
+       struct device *dev = data;
+       struct clk *clk;
+
+       dev_dbg(dev, "platform_bus_notify() %ld !\n", action);
+
+       switch (action) {
+       case BUS_NOTIFY_BIND_DRIVER:
+               clk = clk_get(dev, NULL);
+               if (!IS_ERR(clk)) {
+                       clk_enable(clk);
+                       clk_put(clk);
+                       dev_info(dev, "runtime pm disabled, clock forced on\n");
+               }
+               break;
+       case BUS_NOTIFY_UNBOUND_DRIVER:
+               clk = clk_get(dev, NULL);
+               if (!IS_ERR(clk)) {
+                       clk_disable(clk);
+                       clk_put(clk);
+                       dev_info(dev, "runtime pm disabled, clock forced off\n");
+               }
+               break;
+       }
+
+       return 0;
+}
+
+#endif /* CONFIG_PM_RUNTIME */
+
+static struct notifier_block platform_bus_notifier = {
+       .notifier_call = platform_bus_notify
+};
+
+static int __init sh_pm_runtime_init(void)
+{
+       bus_register_notifier(&platform_bus_type, &platform_bus_notifier);
+       return 0;
+}
+core_initcall(sh_pm_runtime_init);
index 7b1fc984abb64c85dd3124eba6b6b108000835b9..d5a71abcbaeaf5a8da0255e50e89f5982fd447d4 100644 (file)
@@ -273,6 +273,9 @@ extern void gpio_pullup(unsigned gpio, int value);
 extern int gpio_get_value(unsigned gpio);
 extern void gpio_set_value(unsigned gpio, int value);
 
+#define gpio_get_value_cansleep gpio_get_value
+#define gpio_set_value_cansleep gpio_set_value
+
 /* wrappers to sleep-enable the previous two functions */
 static inline unsigned gpio_to_irq(unsigned gpio)
 {
index 577df6cccb0891503bf0188f3c0f33103c159000..efb127022d42facb807b4cb54220d3299fe24e04 100644 (file)
@@ -227,7 +227,13 @@ static void ct_ca9x4_init(void)
        int i;
 
 #ifdef CONFIG_CACHE_L2X0
-       l2x0_init(MMIO_P2V(CT_CA9X4_L2CC), 0x00000000, 0xfe0fffff);
+       void __iomem *l2x0_base = MMIO_P2V(CT_CA9X4_L2CC);
+
+       /* set RAM latencies to 1 cycle for this core tile. */
+       writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL);
+       writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL);
+
+       l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
 #endif
 
        clkdev_add_table(lookups, ARRAY_SIZE(lookups));
index 33c3f570aaa06c2a56f6a6d70eb7f558a883b0b1..a0a2928ae4dd7670a1342863040791833b57dbab 100644 (file)
@@ -398,7 +398,7 @@ config CPU_V6
 # ARMv6k
 config CPU_32v6K
        bool "Support ARM V6K processor extensions" if !SMP
-       depends on CPU_V6
+       depends on CPU_V6 || CPU_V7
        default y if SMP && !(ARCH_MX3 || ARCH_OMAP2)
        help
          Say Y here if your ARMv6 processor supports the 'K' extension.
index d073b64ae87ec4f6652c67959244292dbb3e69ad..724ba3bce72c952ff44645d2a50e5566b568c943 100644 (file)
@@ -885,8 +885,23 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 
        if (ai_usermode & UM_SIGNAL)
                force_sig(SIGBUS, current);
-       else
-               set_cr(cr_no_alignment);
+       else {
+               /*
+                * We're about to disable the alignment trap and return to
+                * user space.  But if an interrupt occurs before actually
+                * reaching user space, then the IRQ vector entry code will
+                * notice that we were still in kernel space and therefore
+                * the alignment trap won't be re-enabled in that case as it
+                * is presumed to be always on from kernel space.
+                * Let's prevent that race by disabling interrupts here (they
+                * are disabled on the way back to user space anyway in
+                * entry-common.S) and disable the alignment trap only if
+                * there is no work pending for this thread.
+                */
+               raw_local_irq_disable();
+               if (!(current_thread_info()->flags & _TIF_WORK_MASK))
+                       set_cr(cr_no_alignment);
+       }
 
        return 0;
 }
index c704eed63c5ddba4c5f849f7ab7b6420008cef21..4bc43e535d3baadc657df9af504078ff1ae00570 100644 (file)
@@ -229,6 +229,8 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
                        }
                } while (size -= PAGE_SIZE);
 
+               dsb();
+
                return (void *)c->vm_start;
        }
        return NULL;
index 6e1c4f6a2b3f3a09ed3f10be9aeafab36c9a0924..6a3a2d0cd6db15806342a7357c2c154b6ab5970d 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/nodemask.h>
 #include <linux/memblock.h>
 #include <linux/sort.h>
+#include <linux/fs.h>
 
 #include <asm/cputype.h>
 #include <asm/sections.h>
@@ -246,6 +247,9 @@ static struct mem_type mem_types[] = {
                .domain    = DOMAIN_USER,
        },
        [MT_MEMORY] = {
+               .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+                               L_PTE_USER | L_PTE_EXEC,
+               .prot_l1   = PMD_TYPE_TABLE,
                .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
                .domain    = DOMAIN_KERNEL,
        },
@@ -254,6 +258,9 @@ static struct mem_type mem_types[] = {
                .domain    = DOMAIN_KERNEL,
        },
        [MT_MEMORY_NONCACHED] = {
+               .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+                               L_PTE_USER | L_PTE_EXEC | L_PTE_MT_BUFFERABLE,
+               .prot_l1   = PMD_TYPE_TABLE,
                .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
                .domain    = DOMAIN_KERNEL,
        },
@@ -411,9 +418,12 @@ static void __init build_mem_type_table(void)
         * Enable CPU-specific coherency if supported.
         * (Only available on XSC3 at the moment.)
         */
-       if (arch_is_coherent() && cpu_is_xsc3())
+       if (arch_is_coherent() && cpu_is_xsc3()) {
                mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
-
+               mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+               mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
+               mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
+       }
        /*
         * ARMv6 and above have extended page tables.
         */
@@ -438,7 +448,9 @@ static void __init build_mem_type_table(void)
                mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
                mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
                mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
+               mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
                mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
+               mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
 #endif
        }
 
@@ -475,6 +487,8 @@ static void __init build_mem_type_table(void)
        mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
        mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
        mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
+       mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
+       mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
        mem_types[MT_ROM].prot_sect |= cp->pmd;
 
        switch (cp->pmd) {
@@ -498,6 +512,19 @@ static void __init build_mem_type_table(void)
        }
 }
 
+#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
+pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+                             unsigned long size, pgprot_t vma_prot)
+{
+       if (!pfn_valid(pfn))
+               return pgprot_noncached(vma_prot);
+       else if (file->f_flags & O_SYNC)
+               return pgprot_writecombine(vma_prot);
+       return vma_prot;
+}
+EXPORT_SYMBOL(phys_mem_access_prot);
+#endif
+
 #define vectors_base() (vectors_high() ? 0xffff0000 : 0)
 
 static void __init *early_alloc(unsigned long sz)
index 6a8506d99ee9abbb0845f39d94d663f802c56885..7563ff0141bd85cee6d4cc626b69f7210141094c 100644 (file)
@@ -186,13 +186,14 @@ cpu_v7_name:
  *     It is assumed that:
  *     - cache type register is implemented
  */
-__v7_setup:
+__v7_ca9mp_setup:
 #ifdef CONFIG_SMP
        mrc     p15, 0, r0, c1, c0, 1
        tst     r0, #(1 << 6)                   @ SMP/nAMP mode enabled?
        orreq   r0, r0, #(1 << 6) | (1 << 0)    @ Enable SMP/nAMP mode and
        mcreq   p15, 0, r0, c1, c0, 1           @ TLB ops broadcasting
 #endif
+__v7_setup:
        adr     r12, __v7_setup_stack           @ the local stack
        stmia   r12, {r0-r5, r7, r9, r11, lr}
        bl      v7_flush_dcache_all
@@ -201,11 +202,16 @@ __v7_setup:
        mrc     p15, 0, r0, c0, c0, 0           @ read main ID register
        and     r10, r0, #0xff000000            @ ARM?
        teq     r10, #0x41000000
-       bne     2f
+       bne     3f
        and     r5, r0, #0x00f00000             @ variant
        and     r6, r0, #0x0000000f             @ revision
-       orr     r0, r6, r5, lsr #20-4           @ combine variant and revision
+       orr     r6, r6, r5, lsr #20-4           @ combine variant and revision
+       ubfx    r0, r0, #4, #12                 @ primary part number
 
+       /* Cortex-A8 Errata */
+       ldr     r10, =0x00000c08                @ Cortex-A8 primary part number
+       teq     r0, r10
+       bne     2f
 #ifdef CONFIG_ARM_ERRATA_430973
        teq     r5, #0x00100000                 @ only present in r1p*
        mrceq   p15, 0, r10, c1, c0, 1          @ read aux control register
@@ -213,21 +219,42 @@ __v7_setup:
        mcreq   p15, 0, r10, c1, c0, 1          @ write aux control register
 #endif
 #ifdef CONFIG_ARM_ERRATA_458693
-       teq     r0, #0x20                       @ only present in r2p0
+       teq     r6, #0x20                       @ only present in r2p0
        mrceq   p15, 0, r10, c1, c0, 1          @ read aux control register
        orreq   r10, r10, #(1 << 5)             @ set L1NEON to 1
        orreq   r10, r10, #(1 << 9)             @ set PLDNOP to 1
        mcreq   p15, 0, r10, c1, c0, 1          @ write aux control register
 #endif
 #ifdef CONFIG_ARM_ERRATA_460075
-       teq     r0, #0x20                       @ only present in r2p0
+       teq     r6, #0x20                       @ only present in r2p0
        mrceq   p15, 1, r10, c9, c0, 2          @ read L2 cache aux ctrl register
        tsteq   r10, #1 << 22
        orreq   r10, r10, #(1 << 22)            @ set the Write Allocate disable bit
        mcreq   p15, 1, r10, c9, c0, 2          @ write the L2 cache aux ctrl register
 #endif
+       b       3f
+
+       /* Cortex-A9 Errata */
+2:     ldr     r10, =0x00000c09                @ Cortex-A9 primary part number
+       teq     r0, r10
+       bne     3f
+#ifdef CONFIG_ARM_ERRATA_742230
+       cmp     r6, #0x22                       @ only present up to r2p2
+       mrcle   p15, 0, r10, c15, c0, 1         @ read diagnostic register
+       orrle   r10, r10, #1 << 4               @ set bit #4
+       mcrle   p15, 0, r10, c15, c0, 1         @ write diagnostic register
+#endif
+#ifdef CONFIG_ARM_ERRATA_742231
+       teq     r6, #0x20                       @ present in r2p0
+       teqne   r6, #0x21                       @ present in r2p1
+       teqne   r6, #0x22                       @ present in r2p2
+       mrceq   p15, 0, r10, c15, c0, 1         @ read diagnostic register
+       orreq   r10, r10, #1 << 12              @ set bit #12
+       orreq   r10, r10, #1 << 22              @ set bit #22
+       mcreq   p15, 0, r10, c15, c0, 1         @ write diagnostic register
+#endif
 
-2:     mov     r10, #0
+3:     mov     r10, #0
 #ifdef HARVARD_CACHE
        mcr     p15, 0, r10, c7, c5, 0          @ I+BTB cache invalidate
 #endif
@@ -323,6 +350,29 @@ cpu_elf_name:
 
        .section ".proc.info.init", #alloc, #execinstr
 
+       .type   __v7_ca9mp_proc_info, #object
+__v7_ca9mp_proc_info:
+       .long   0x410fc090              @ Required ID value
+       .long   0xff0ffff0              @ Mask for ID
+       .long   PMD_TYPE_SECT | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ | \
+               PMD_FLAGS
+       .long   PMD_TYPE_SECT | \
+               PMD_SECT_XN | \
+               PMD_SECT_AP_WRITE | \
+               PMD_SECT_AP_READ
+       b       __v7_ca9mp_setup
+       .long   cpu_arch_name
+       .long   cpu_elf_name
+       .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
+       .long   cpu_v7_name
+       .long   v7_processor_functions
+       .long   v7wbi_tlb_fns
+       .long   v6_user_fns
+       .long   v7_cache_fns
+       .size   __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
+
        /*
         * Match any ARMv7 processor core.
         */
index 0691176899ffc24f0a176d154a34f5b7200c6047..72e09eb642dd7c7a00d47fce210bb1290e926fd9 100644 (file)
@@ -102,6 +102,7 @@ static int op_create_counter(int cpu, int event)
        if (IS_ERR(pevent)) {
                ret = PTR_ERR(pevent);
        } else if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
+               perf_event_release_kernel(pevent);
                pr_warning("oprofile: failed to enable event %d "
                                "on CPU %d\n", event, cpu);
                ret = -EBUSY;
@@ -365,6 +366,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
        ret = init_driverfs();
        if (ret) {
                kfree(counter_config);
+               counter_config = NULL;
                return ret;
        }
 
@@ -402,7 +404,6 @@ void oprofile_arch_exit(void)
        struct perf_event *event;
 
        if (*perf_events) {
-               exit_driverfs();
                for_each_possible_cpu(cpu) {
                        for (id = 0; id < perf_num_counters; ++id) {
                                event = perf_events[cpu][id];
@@ -413,8 +414,10 @@ void oprofile_arch_exit(void)
                }
        }
 
-       if (counter_config)
+       if (counter_config) {
                kfree(counter_config);
+               exit_driverfs();
+       }
 }
 #else
 int __init oprofile_arch_init(struct oprofile_operations *ops)
index 0527e65318f4a647b5b00192ce08ba47368ce5f8..6785db4179b84ccd925f9112cd48e9be71668e7c 100644 (file)
@@ -43,6 +43,7 @@ config ARCH_MXC91231
 config ARCH_MX5
        bool "MX5-based"
        select CPU_V7
+       select ARM_L1_CACHE_SHIFT_6
        help
          This enables support for systems based on the Freescale i.MX51 family
 
index 634e3f4c454df222728aa678bdcd657967286dbc..656acb45d434b7333e0864798fb6b69538387701 100644 (file)
@@ -37,9 +37,9 @@
  * mach-mx5/eukrea_mbimx51-baseboard.c for cpuimx51
  */
 
-extern void eukrea_mbimx25_baseboard_init(void);
+extern void eukrea_mbimxsd25_baseboard_init(void);
 extern void eukrea_mbimx27_baseboard_init(void);
-extern void eukrea_mbimx35_baseboard_init(void);
+extern void eukrea_mbimxsd35_baseboard_init(void);
 extern void eukrea_mbimx51_baseboard_init(void);
 
 #endif
index b3da9aad4295704ef9ea8a4a43c95127d74e6d9e..3703ab28257fbbb55d3db89bee56877eebb38345 100644 (file)
@@ -164,8 +164,9 @@ int tzic_enable_wake(int is_idle)
                return -EAGAIN;
 
        for (i = 0; i < 4; i++) {
-               v = is_idle ? __raw_readl(TZIC_ENSET0(i)) : wakeup_intr[i];
-               __raw_writel(v, TZIC_WAKEUP0(i));
+               v = is_idle ? __raw_readl(tzic_base + TZIC_ENSET0(i)) :
+                       wakeup_intr[i];
+               __raw_writel(v, tzic_base + TZIC_WAKEUP0(i));
        }
 
        return 0;
index ea3ca86c52836ba1ec9fe780da9c1629db91723f..aedf9c1d645e4a820c8f1f9dd884fdf5fd1cebf5 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *  linux/arch/arm/mach-nomadik/timer.c
+ *  linux/arch/arm/plat-nomadik/timer.c
  *
  * Copyright (C) 2008 STMicroelectronics
  * Copyright (C) 2010 Alessandro Rubini
@@ -75,7 +75,7 @@ static void nmdk_clkevt_mode(enum clock_event_mode mode,
                cr = readl(mtu_base + MTU_CR(1));
                writel(0, mtu_base + MTU_LR(1));
                writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1));
-               writel(0x2, mtu_base + MTU_IMSC);
+               writel(1 << 1, mtu_base + MTU_IMSC);
                break;
        case CLOCK_EVT_MODE_SHUTDOWN:
        case CLOCK_EVT_MODE_UNUSED:
@@ -131,25 +131,23 @@ void __init nmdk_timer_init(void)
 {
        unsigned long rate;
        struct clk *clk0;
-       struct clk *clk1;
-       u32 cr;
+       u32 cr = MTU_CRn_32BITS;
 
        clk0 = clk_get_sys("mtu0", NULL);
        BUG_ON(IS_ERR(clk0));
 
-       clk1 = clk_get_sys("mtu1", NULL);
-       BUG_ON(IS_ERR(clk1));
-
        clk_enable(clk0);
-       clk_enable(clk1);
 
        /*
-        * Tick rate is 2.4MHz for Nomadik and 110MHz for ux500:
-        * use a divide-by-16 counter if it's more than 16MHz
+        * Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz
+        * for ux500.
+        * Use a divide-by-16 counter if the tick rate is more than 32MHz.
+        * At 32 MHz, the timer (with 32 bit counter) can be programmed
+        * to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer
+        * with 16 gives too low timer resolution.
         */
-       cr = MTU_CRn_32BITS;;
        rate = clk_get_rate(clk0);
-       if (rate > 16 << 20) {
+       if (rate > 32000000) {
                rate /= 16;
                cr |= MTU_CRn_PRESCALE_16;
        } else {
@@ -170,15 +168,8 @@ void __init nmdk_timer_init(void)
                pr_err("timer: failed to initialize clock source %s\n",
                       nmdk_clksrc.name);
 
-       /* Timer 1 is used for events, fix according to rate */
-       cr = MTU_CRn_32BITS;
-       rate = clk_get_rate(clk1);
-       if (rate > 16 << 20) {
-               rate /= 16;
-               cr |= MTU_CRn_PRESCALE_16;
-       } else {
-               cr |= MTU_CRn_PRESCALE_1;
-       }
+       /* Timer 1 is used for events */
+
        clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE);
 
        writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */
index e39a417a368dc92ad6776a68ef6fefc22a046b76..a92cb499313fdc9583890ebcc182ecae280cdc09 100644 (file)
@@ -33,7 +33,7 @@ config OMAP_DEBUG_DEVICES
 config OMAP_DEBUG_LEDS
        bool
        depends on OMAP_DEBUG_DEVICES
-       default y if LEDS
+       default y if LEDS_CLASS
 
 config OMAP_RESET_CLOCKS
        bool "Reset unused clocks during boot"
index e31496e35b0f452d4ff9e375855718fc0a078d40..0c8612fd831237164968b1f2120a1134618557e3 100644 (file)
@@ -156,7 +156,7 @@ static irqreturn_t omap_mcbsp_rx_irq_handler(int irq, void *dev_id)
                /* Writing zero to RSYNC_ERR clears the IRQ */
                MCBSP_WRITE(mcbsp_rx, SPCR1, MCBSP_READ_CACHE(mcbsp_rx, SPCR1));
        } else {
-               complete(&mcbsp_rx->tx_irq_completion);
+               complete(&mcbsp_rx->rx_irq_completion);
        }
 
        return IRQ_HANDLED;
index 226b2e858d6c9617243a91138821fb0e355daf56..10b3b4c63372f406e6ee206ce691977753017e56 100644 (file)
@@ -220,20 +220,7 @@ void __init omap_map_sram(void)
        if (omap_sram_size == 0)
                return;
 
-       if (cpu_is_omap24xx()) {
-               omap_sram_io_desc[0].virtual = OMAP2_SRAM_VA;
-
-               base = OMAP2_SRAM_PA;
-               base = ROUND_DOWN(base, PAGE_SIZE);
-               omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
-       }
-
        if (cpu_is_omap34xx()) {
-               omap_sram_io_desc[0].virtual = OMAP3_SRAM_VA;
-               base = OMAP3_SRAM_PA;
-               base = ROUND_DOWN(base, PAGE_SIZE);
-               omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
-
                /*
                 * SRAM must be marked as non-cached on OMAP3 since the
                 * CORE DPLL M2 divider change code (in SRAM) runs with the
@@ -244,13 +231,11 @@ void __init omap_map_sram(void)
                omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED;
        }
 
-       if (cpu_is_omap44xx()) {
-               omap_sram_io_desc[0].virtual = OMAP4_SRAM_VA;
-               base = OMAP4_SRAM_PA;
-               base = ROUND_DOWN(base, PAGE_SIZE);
-               omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
-       }
-       omap_sram_io_desc[0].length = 1024 * 1024;      /* Use section desc */
+       omap_sram_io_desc[0].virtual = omap_sram_base;
+       base = omap_sram_start;
+       base = ROUND_DOWN(base, PAGE_SIZE);
+       omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
+       omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE);
        iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc));
 
        printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n",
index 0732c6c8d511979e354cced2cd6987889702a1e7..ef32686feef9431ab00f42e4a2a0e2d7656af783 100644 (file)
@@ -176,7 +176,7 @@ static inline void __add_pwm(struct pwm_device *pwm)
 
 static int __devinit pwm_probe(struct platform_device *pdev)
 {
-       struct platform_device_id *id = platform_get_device_id(pdev);
+       const struct platform_device_id *id = platform_get_device_id(pdev);
        struct pwm_device *pwm, *secondary = NULL;
        struct resource *r;
        int ret = 0;
index d3f1a9b5d2b5a4d01f040ba784a13185629b5938..608770fc1531335967f4e42a4c9355ed8e5f6c87 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/dma-mapping.h>
 #include <linux/platform_device.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
@@ -18,7 +19,7 @@
 static struct resource s5p_fimc0_resource[] = {
        [0] = {
                .start  = S5P_PA_FIMC0,
-               .end    = S5P_PA_FIMC0 + SZ_1M - 1,
+               .end    = S5P_PA_FIMC0 + SZ_4K - 1,
                .flags  = IORESOURCE_MEM,
        },
        [1] = {
@@ -28,9 +29,15 @@ static struct resource s5p_fimc0_resource[] = {
        },
 };
 
+static u64 s5p_fimc0_dma_mask = DMA_BIT_MASK(32);
+
 struct platform_device s5p_device_fimc0 = {
        .name           = "s5p-fimc",
        .id             = 0,
        .num_resources  = ARRAY_SIZE(s5p_fimc0_resource),
        .resource       = s5p_fimc0_resource,
+       .dev            = {
+               .dma_mask               = &s5p_fimc0_dma_mask,
+               .coherent_dma_mask      = DMA_BIT_MASK(32),
+       },
 };
index 41bd6986d0ad03210cf85c4e32f1f497a29d67f6..76e3a97a87d37c934f95da3ab2187831b9eb6da6 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/dma-mapping.h>
 #include <linux/platform_device.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
@@ -18,7 +19,7 @@
 static struct resource s5p_fimc1_resource[] = {
        [0] = {
                .start  = S5P_PA_FIMC1,
-               .end    = S5P_PA_FIMC1 + SZ_1M - 1,
+               .end    = S5P_PA_FIMC1 + SZ_4K - 1,
                .flags  = IORESOURCE_MEM,
        },
        [1] = {
@@ -28,9 +29,15 @@ static struct resource s5p_fimc1_resource[] = {
        },
 };
 
+static u64 s5p_fimc1_dma_mask = DMA_BIT_MASK(32);
+
 struct platform_device s5p_device_fimc1 = {
        .name           = "s5p-fimc",
        .id             = 1,
        .num_resources  = ARRAY_SIZE(s5p_fimc1_resource),
        .resource       = s5p_fimc1_resource,
+       .dev            = {
+               .dma_mask               = &s5p_fimc1_dma_mask,
+               .coherent_dma_mask      = DMA_BIT_MASK(32),
+       },
 };
index dfddeda6d4a373445abdabbbe3e59155c61fa84e..24d29816fa2c03711ad0b5a62e2561caabd11e3b 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/dma-mapping.h>
 #include <linux/platform_device.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
@@ -18,7 +19,7 @@
 static struct resource s5p_fimc2_resource[] = {
        [0] = {
                .start  = S5P_PA_FIMC2,
-               .end    = S5P_PA_FIMC2 + SZ_1M - 1,
+               .end    = S5P_PA_FIMC2 + SZ_4K - 1,
                .flags  = IORESOURCE_MEM,
        },
        [1] = {
@@ -28,9 +29,15 @@ static struct resource s5p_fimc2_resource[] = {
        },
 };
 
+static u64 s5p_fimc2_dma_mask = DMA_BIT_MASK(32);
+
 struct platform_device s5p_device_fimc2 = {
        .name           = "s5p-fimc",
        .id             = 2,
        .num_resources  = ARRAY_SIZE(s5p_fimc2_resource),
        .resource       = s5p_fimc2_resource,
+       .dev            = {
+               .dma_mask               = &s5p_fimc2_dma_mask,
+               .coherent_dma_mask      = DMA_BIT_MASK(32),
+       },
 };
index 57b68a50f45e93aa14852500ace0df1fdc48da8b..e3d41eaed1ffd9feed571cef0af6bf5bf0841bf2 100644 (file)
@@ -273,13 +273,13 @@ s5p_gpio_drvstr_t s5p_gpio_get_drvstr(unsigned int pin)
        if (!chip)
                return -EINVAL;
 
-       off = chip->chip.base - pin;
+       off = pin - chip->chip.base;
        shift = off * 2;
        reg = chip->base + 0x0C;
 
        drvstr = __raw_readl(reg);
-       drvstr = 0xffff & (0x3 << shift);
        drvstr = drvstr >> shift;
+       drvstr &= 0x3;
 
        return (__force s5p_gpio_drvstr_t)drvstr;
 }
@@ -296,11 +296,12 @@ int s5p_gpio_set_drvstr(unsigned int pin, s5p_gpio_drvstr_t drvstr)
        if (!chip)
                return -EINVAL;
 
-       off = chip->chip.base - pin;
+       off = pin - chip->chip.base;
        shift = off * 2;
        reg = chip->base + 0x0C;
 
        tmp = __raw_readl(reg);
+       tmp &= ~(0x3 << shift);
        tmp |= drvstr << shift;
 
        __raw_writel(tmp, reg);
index db4112c6f2becd7371fc86428c5ca4b50a07bbdd..1c6b92947c5db7e8b955ac3831214bbf556bf2bb 100644 (file)
@@ -143,12 +143,12 @@ extern s3c_gpio_pull_t s3c_gpio_getpull(unsigned int pin);
 /* Define values for the drvstr available for each gpio pin.
  *
  * These values control the value of the output signal driver strength,
- * configurable on most pins on the S5C series.
+ * configurable on most pins on the S5P series.
  */
-#define S5P_GPIO_DRVSTR_LV1    ((__force s5p_gpio_drvstr_t)0x00)
-#define S5P_GPIO_DRVSTR_LV2    ((__force s5p_gpio_drvstr_t)0x01)
-#define S5P_GPIO_DRVSTR_LV3    ((__force s5p_gpio_drvstr_t)0x10)
-#define S5P_GPIO_DRVSTR_LV4    ((__force s5p_gpio_drvstr_t)0x11)
+#define S5P_GPIO_DRVSTR_LV1    ((__force s5p_gpio_drvstr_t)0x0)
+#define S5P_GPIO_DRVSTR_LV2    ((__force s5p_gpio_drvstr_t)0x2)
+#define S5P_GPIO_DRVSTR_LV3    ((__force s5p_gpio_drvstr_t)0x1)
+#define S5P_GPIO_DRVSTR_LV4    ((__force s5p_gpio_drvstr_t)0x3)
 
 /**
  * s5c_gpio_get_drvstr() - get the driver streght value of a gpio pin
index 48cbdcb6bbd4288929f31bef94da3691b160a489..55590a4d87c932984404d1df13ca4c296c9d7117 100644 (file)
@@ -12,7 +12,7 @@
 #
 #   http://www.arm.linux.org.uk/developer/machines/?action=new
 #
-# Last update: Mon Jul 12 21:10:14 2010
+# Last update: Thu Sep 9 22:43:01 2010
 #
 # machine_is_xxx       CONFIG_xxxx             MACH_TYPE_xxx           number
 #
@@ -2622,7 +2622,7 @@ kraken                    MACH_KRAKEN             KRAKEN                  2634
 gw2388                 MACH_GW2388             GW2388                  2635
 jadecpu                        MACH_JADECPU            JADECPU                 2636
 carlisle               MACH_CARLISLE           CARLISLE                2637
-lux_sf9                        MACH_LUX_SFT9           LUX_SFT9                2638
+lux_sf9                        MACH_LUX_SF9            LUX_SF9                 2638
 nemid_tb               MACH_NEMID_TB           NEMID_TB                2639
 terrier                        MACH_TERRIER            TERRIER                 2640
 turbot                 MACH_TURBOT             TURBOT                  2641
@@ -2950,3 +2950,97 @@ davinci_dm365_dvr        MACH_DAVINCI_DM365_DVR  DAVINCI_DM365_DVR       2963
 netviz                 MACH_NETVIZ             NETVIZ                  2964
 flexibity              MACH_FLEXIBITY          FLEXIBITY               2965
 wlan_computer          MACH_WLAN_COMPUTER      WLAN_COMPUTER           2966
+lpc24xx                        MACH_LPC24XX            LPC24XX                 2967
+spica                  MACH_SPICA              SPICA                   2968
+gpsdisplay             MACH_GPSDISPLAY         GPSDISPLAY              2969
+bipnet                 MACH_BIPNET             BIPNET                  2970
+overo_ctu_inertial     MACH_OVERO_CTU_INERTIAL OVERO_CTU_INERTIAL      2971
+davinci_dm355_mmm      MACH_DAVINCI_DM355_MMM  DAVINCI_DM355_MMM       2972
+pc9260_v2              MACH_PC9260_V2          PC9260_V2               2973
+ptx7545                        MACH_PTX7545            PTX7545                 2974
+tm_efdc                        MACH_TM_EFDC            TM_EFDC                 2975
+omap3_waldo1           MACH_OMAP3_WALDO1       OMAP3_WALDO1            2977
+flyer                  MACH_FLYER              FLYER                   2978
+tornado3240            MACH_TORNADO3240        TORNADO3240             2979
+soli_01                        MACH_SOLI_01            SOLI_01                 2980
+omapl138_europalc      MACH_OMAPL138_EUROPALC  OMAPL138_EUROPALC       2981
+helios_v1              MACH_HELIOS_V1          HELIOS_V1               2982
+netspace_lite_v2       MACH_NETSPACE_LITE_V2   NETSPACE_LITE_V2        2983
+ssc                    MACH_SSC                SSC                     2984
+premierwave_en         MACH_PREMIERWAVE_EN     PREMIERWAVE_EN          2985
+wasabi                 MACH_WASABI             WASABI                  2986
+vivow                  MACH_VIVOW              VIVOW                   2987
+mx50_rdp               MACH_MX50_RDP           MX50_RDP                2988
+universal              MACH_UNIVERSAL          UNIVERSAL               2989
+real6410               MACH_REAL6410           REAL6410                2990
+spx_sakura             MACH_SPX_SAKURA         SPX_SAKURA              2991
+ij3k_2440              MACH_IJ3K_2440          IJ3K_2440               2992
+omap3_bc10             MACH_OMAP3_BC10         OMAP3_BC10              2993
+thebe                  MACH_THEBE              THEBE                   2994
+rv082                  MACH_RV082              RV082                   2995
+armlguest              MACH_ARMLGUEST          ARMLGUEST               2996
+tjinc1000              MACH_TJINC1000          TJINC1000               2997
+dockstar               MACH_DOCKSTAR           DOCKSTAR                2998
+ax8008                 MACH_AX8008             AX8008                  2999
+gnet_sgce              MACH_GNET_SGCE          GNET_SGCE               3000
+pxwnas_500_1000                MACH_PXWNAS_500_1000    PXWNAS_500_1000         3001
+ea20                   MACH_EA20               EA20                    3002
+awm2                   MACH_AWM2               AWM2                    3003
+ti8148evm              MACH_TI8148EVM          TI8148EVM               3004
+tegra_seaboard         MACH_TEGRA_SEABOARD     TEGRA_SEABOARD          3005
+linkstation_chlv2      MACH_LINKSTATION_CHLV2  LINKSTATION_CHLV2       3006
+tera_pro2_rack         MACH_TERA_PRO2_RACK     TERA_PRO2_RACK          3007
+rubys                  MACH_RUBYS              RUBYS                   3008
+aquarius               MACH_AQUARIUS           AQUARIUS                3009
+mx53_ard               MACH_MX53_ARD           MX53_ARD                3010
+mx53_smd               MACH_MX53_SMD           MX53_SMD                3011
+lswxl                  MACH_LSWXL              LSWXL                   3012
+dove_avng_v3           MACH_DOVE_AVNG_V3       DOVE_AVNG_V3            3013
+sdi_ess_9263           MACH_SDI_ESS_9263       SDI_ESS_9263            3014
+jocpu550               MACH_JOCPU550           JOCPU550                3015
+msm8x60_rumi3          MACH_MSM8X60_RUMI3      MSM8X60_RUMI3           3016
+msm8x60_ffa            MACH_MSM8X60_FFA        MSM8X60_FFA             3017
+yanomami               MACH_YANOMAMI           YANOMAMI                3018
+gta04                  MACH_GTA04              GTA04                   3019
+cm_a510                        MACH_CM_A510            CM_A510                 3020
+omap3_rfs200           MACH_OMAP3_RFS200       OMAP3_RFS200            3021
+kx33xx                 MACH_KX33XX             KX33XX                  3022
+ptx7510                        MACH_PTX7510            PTX7510                 3023
+top9000                        MACH_TOP9000            TOP9000                 3024
+teenote                        MACH_TEENOTE            TEENOTE                 3025
+ts3                    MACH_TS3                TS3                     3026
+a0                     MACH_A0                 A0                      3027
+fsm9xxx_surf           MACH_FSM9XXX_SURF       FSM9XXX_SURF            3028
+fsm9xxx_ffa            MACH_FSM9XXX_FFA        FSM9XXX_FFA             3029
+frrhwcdma60w           MACH_FRRHWCDMA60W       FRRHWCDMA60W            3030
+remus                  MACH_REMUS              REMUS                   3031
+at91cap7xdk            MACH_AT91CAP7XDK        AT91CAP7XDK             3032
+at91cap7stk            MACH_AT91CAP7STK        AT91CAP7STK             3033
+kt_sbc_sam9_1          MACH_KT_SBC_SAM9_1      KT_SBC_SAM9_1           3034
+oratisrouter           MACH_ORATISROUTER       ORATISROUTER            3035
+armada_xp_db           MACH_ARMADA_XP_DB       ARMADA_XP_DB            3036
+spdm                   MACH_SPDM               SPDM                    3037
+gtib                   MACH_GTIB               GTIB                    3038
+dgm3240                        MACH_DGM3240            DGM3240                 3039
+atlas_i_lpe            MACH_ATLAS_I_LPE        ATLAS_I_LPE             3040
+htcmega                        MACH_HTCMEGA            HTCMEGA                 3041
+tricorder              MACH_TRICORDER          TRICORDER               3042
+tx28                   MACH_TX28               TX28                    3043
+bstbrd                 MACH_BSTBRD             BSTBRD                  3044
+pwb3090                        MACH_PWB3090            PWB3090                 3045
+idea6410               MACH_IDEA6410           IDEA6410                3046
+qbc9263                        MACH_QBC9263            QBC9263                 3047
+borabora               MACH_BORABORA           BORABORA                3048
+valdez                 MACH_VALDEZ             VALDEZ                  3049
+ls9g20                 MACH_LS9G20             LS9G20                  3050
+mios_v1                        MACH_MIOS_V1            MIOS_V1                 3051
+s5pc110_crespo         MACH_S5PC110_CRESPO     S5PC110_CRESPO          3052
+controltek9g20         MACH_CONTROLTEK9G20     CONTROLTEK9G20          3053
+tin307                 MACH_TIN307             TIN307                  3054
+tin510                 MACH_TIN510             TIN510                  3055
+bluecheese             MACH_BLUECHEESE         BLUECHEESE              3057
+tem3x30                        MACH_TEM3X30            TEM3X30                 3058
+harvest_desoto         MACH_HARVEST_DESOTO     HARVEST_DESOTO          3059
+msm8x60_qrdc           MACH_MSM8X60_QRDC       MSM8X60_QRDC            3060
+spear900               MACH_SPEAR900           SPEAR900                3061
+pcontrol_g20           MACH_PCONTROL_G20       PCONTROL_G20            3062
index 98f94d041d9c1dd212a0519efac60def8034b7db..a727f54d64d6e633d58ae2836bbbfae4c82f0017 100644 (file)
@@ -314,10 +314,9 @@ int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
        vfree(module->arch.syminfo);
        module->arch.syminfo = NULL;
 
-       return module_bug_finalize(hdr, sechdrs, module);
+       return 0;
 }
 
 void module_arch_cleanup(struct module *module)
 {
-       module_bug_cleanup(module);
 }
index 0974c0ecc594817ee9d8067aeba0eee7e3379dbf..bab01298b58ee2873fd7ef85de9230ac7af354f7 100644 (file)
@@ -121,6 +121,9 @@ static int restore_sigcontext(struct sigcontext __user *sc, int *_gr8)
        struct user_context *user = current->thread.user;
        unsigned long tbr, psr;
 
+       /* Always make any pending restarted system calls return -EINTR */
+       current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
        tbr = user->i.tbr;
        psr = user->i.psr;
        if (copy_from_user(user, &sc->sc_context, sizeof(sc->sc_context)))
@@ -250,6 +253,8 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set)
        struct sigframe __user *frame;
        int rsig;
 
+       set_fs(USER_DS);
+
        frame = get_sigframe(ka, sizeof(*frame));
 
        if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
@@ -293,22 +298,23 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set)
                                   (unsigned long) (frame->retcode + 2));
        }
 
-       /* set up registers for signal handler */
-       __frame->sp   = (unsigned long) frame;
-       __frame->lr   = (unsigned long) &frame->retcode;
-       __frame->gr8  = sig;
-
+       /* Set up registers for the signal handler */
        if (current->personality & FDPIC_FUNCPTRS) {
                struct fdpic_func_descriptor __user *funcptr =
                        (struct fdpic_func_descriptor __user *) ka->sa.sa_handler;
-               __get_user(__frame->pc, &funcptr->text);
-               __get_user(__frame->gr15, &funcptr->GOT);
+               struct fdpic_func_descriptor desc;
+               if (copy_from_user(&desc, funcptr, sizeof(desc)))
+                       goto give_sigsegv;
+               __frame->pc = desc.text;
+               __frame->gr15 = desc.GOT;
        } else {
                __frame->pc   = (unsigned long) ka->sa.sa_handler;
                __frame->gr15 = 0;
        }
 
-       set_fs(USER_DS);
+       __frame->sp   = (unsigned long) frame;
+       __frame->lr   = (unsigned long) &frame->retcode;
+       __frame->gr8  = sig;
 
        /* the tracer may want to single-step inside the handler */
        if (test_thread_flag(TIF_SINGLESTEP))
@@ -323,7 +329,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set)
        return 0;
 
 give_sigsegv:
-       force_sig(SIGSEGV, current);
+       force_sigsegv(sig, current);
        return -EFAULT;
 
 } /* end setup_frame() */
@@ -338,6 +344,8 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        struct rt_sigframe __user *frame;
        int rsig;
 
+       set_fs(USER_DS);
+
        frame = get_sigframe(ka, sizeof(*frame));
 
        if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
@@ -392,22 +400,23 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        }
 
        /* Set up registers for signal handler */
-       __frame->sp  = (unsigned long) frame;
-       __frame->lr   = (unsigned long) &frame->retcode;
-       __frame->gr8 = sig;
-       __frame->gr9 = (unsigned long) &frame->info;
-
        if (current->personality & FDPIC_FUNCPTRS) {
                struct fdpic_func_descriptor __user *funcptr =
                        (struct fdpic_func_descriptor __user *) ka->sa.sa_handler;
-               __get_user(__frame->pc, &funcptr->text);
-               __get_user(__frame->gr15, &funcptr->GOT);
+               struct fdpic_func_descriptor desc;
+               if (copy_from_user(&desc, funcptr, sizeof(desc)))
+                       goto give_sigsegv;
+               __frame->pc = desc.text;
+               __frame->gr15 = desc.GOT;
        } else {
                __frame->pc   = (unsigned long) ka->sa.sa_handler;
                __frame->gr15 = 0;
        }
 
-       set_fs(USER_DS);
+       __frame->sp  = (unsigned long) frame;
+       __frame->lr  = (unsigned long) &frame->retcode;
+       __frame->gr8 = sig;
+       __frame->gr9 = (unsigned long) &frame->info;
 
        /* the tracer may want to single-step inside the handler */
        if (test_thread_flag(TIF_SINGLESTEP))
@@ -422,7 +431,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        return 0;
 
 give_sigsegv:
-       force_sig(SIGSEGV, current);
+       force_sigsegv(sig, current);
        return -EFAULT;
 
 } /* end setup_rt_frame() */
@@ -437,7 +446,7 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
        int ret;
 
        /* Are we from a system call? */
-       if (in_syscall(__frame)) {
+       if (__frame->syscallno != -1) {
                /* If so, check system call restarting.. */
                switch (__frame->gr8) {
                case -ERESTART_RESTARTBLOCK:
@@ -456,6 +465,7 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
                        __frame->gr8 = __frame->orig_gr8;
                        __frame->pc -= 4;
                }
+               __frame->syscallno = -1;
        }
 
        /* Set up the stack frame */
@@ -538,10 +548,11 @@ no_signal:
                        break;
 
                case -ERESTART_RESTARTBLOCK:
-                       __frame->gr8 = __NR_restart_syscall;
+                       __frame->gr7 = __NR_restart_syscall;
                        __frame->pc -= 4;
                        break;
                }
+               __frame->syscallno = -1;
        }
 
        /* if there's no signal to deliver, we just put the saved sigmask
index 0865e291c20d2948c95edc70f52925121409599d..db4953dc4e1b445adbdd7e004a68890a5c1c07a6 100644 (file)
@@ -112,10 +112,9 @@ int module_finalize(const Elf_Ehdr *hdr,
                    const Elf_Shdr *sechdrs,
                    struct module *me)
 {
-       return module_bug_finalize(hdr, sechdrs, me);
+       return 0;
 }
 
 void module_arch_cleanup(struct module *mod)
 {
-       module_bug_cleanup(mod);
 }
index f90edc85b50933a9df52391d041ad9f8b1a8ee66..9301a2821615b66cacd781eeba3e53d57e60fe75 100644 (file)
@@ -199,7 +199,7 @@ ptr_to_compat(void __user *uptr)
 }
 
 static __inline__ void __user *
-compat_alloc_user_space (long len)
+arch_compat_alloc_user_space (long len)
 {
        struct pt_regs *regs = task_pt_regs(current);
        return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len);
index 3567d54f8cee7533ecba41847c5f9957d9481296..331d42bda77ae97f457b13f970aa83c40e11d4b0 100644 (file)
@@ -420,22 +420,31 @@ EX(.fail_efault, ld8 r14=[r33])                   // r14 <- *set
        ;;
 
        RSM_PSR_I(p0, r18, r19)                 // mask interrupt delivery
-       mov ar.ccv=0
        andcm r14=r14,r17                       // filter out SIGKILL & SIGSTOP
+       mov r8=EINVAL                   // default to EINVAL
 
 #ifdef CONFIG_SMP
-       mov r17=1
+       // __ticket_spin_trylock(r31)
+       ld4 r17=[r31]
        ;;
-       cmpxchg4.acq r18=[r31],r17,ar.ccv       // try to acquire the lock
-       mov r8=EINVAL                   // default to EINVAL
+       mov.m ar.ccv=r17
+       extr.u r9=r17,17,15
+       adds r19=1,r17
+       extr.u r18=r17,0,15
+       ;;
+       cmp.eq p6,p7=r9,r18
        ;;
+(p6)   cmpxchg4.acq r9=[r31],r19,ar.ccv
+(p6)   dep.z r20=r19,1,15              // next serving ticket for unlock
+(p7)   br.cond.spnt.many .lock_contention
+       ;;
+       cmp4.eq p0,p7=r9,r17
+       adds r31=2,r31
+(p7)   br.cond.spnt.many .lock_contention
        ld8 r3=[r2]                     // re-read current->blocked now that we hold the lock
-       cmp4.ne p6,p0=r18,r0
-(p6)   br.cond.spnt.many .lock_contention
        ;;
 #else
        ld8 r3=[r2]                     // re-read current->blocked now that we hold the lock
-       mov r8=EINVAL                   // default to EINVAL
 #endif
        add r18=IA64_TASK_PENDING_OFFSET+IA64_SIGPENDING_SIGNAL_OFFSET,r16
        add r19=IA64_TASK_SIGNAL_OFFSET,r16
@@ -490,7 +499,9 @@ EX(.fail_efault, ld8 r14=[r33])                     // r14 <- *set
 (p6)   br.cond.spnt.few 1b                     // yes -> retry
 
 #ifdef CONFIG_SMP
-       st4.rel [r31]=r0                        // release the lock
+       // __ticket_spin_unlock(r31)
+       st2.rel [r31]=r20
+       mov r20=0                                       // i must not leak kernel bits...
 #endif
        SSM_PSR_I(p0, p9, r31)
        ;;
@@ -512,7 +523,8 @@ EX(.fail_efault, (p15) st8 [r34]=r3)
 
 .sig_pending:
 #ifdef CONFIG_SMP
-       st4.rel [r31]=r0                        // release the lock
+       // __ticket_spin_unlock(r31)
+       st2.rel [r31]=r20                       // release the lock
 #endif
        SSM_PSR_I(p0, p9, r17)
        ;;
index 9c1acb2b1a928984c8f62ce4a5cd91a25329dc21..b2eeb0de1c8d337a6d7ab5abef363b4797ab7e9a 100644 (file)
@@ -157,7 +157,6 @@ typedef struct sigaltstack {
 #undef __HAVE_ARCH_SIG_BITOPS
 
 struct pt_regs;
-extern int do_signal(struct pt_regs *regs, sigset_t *oldset);
 
 #define ptrace_signal_deliver(regs, cookie)    do { } while (0)
 
index 76125777483ccda07d9d31d55c2f8f3b3201ea05..c70545689da83ef2ffaef987b2c3374fadca04fc 100644 (file)
 #define __ARCH_WANT_SYS_OLD_GETRLIMIT /*will be unused*/
 #define __ARCH_WANT_SYS_OLDUMOUNT
 #define __ARCH_WANT_SYS_RT_SIGACTION
+#define __ARCH_WANT_SYS_RT_SIGSUSPEND
 
 #define __IGNORE_lchown
 #define __IGNORE_setuid
index 403869833b98fe6c95fd360d80cd762f3a5008e2..225412bc227e690bcb313743dd16f58fca5c4115 100644 (file)
@@ -235,10 +235,9 @@ work_resched:
 work_notifysig:                                ; deal with pending signals and
                                        ; notify-resume requests
        mv      r0, sp                  ; arg1 : struct pt_regs *regs
-       ldi     r1, #0                  ; arg2 : sigset_t *oldset
-       mv      r2, r9                  ; arg3 : __u32 thread_info_flags
+       mv      r1, r9                  ; arg2 : __u32 thread_info_flags
        bl      do_notify_resume
-       bra     restore_all
+       bra     resume_userspace
 
        ; perform syscall exit tracing
        ALIGN
index e555091eb97cbcf8bbe261851be3b72e90e6075e..0021ade4cba8c86bf1d2fd348b283d8cac591955 100644 (file)
@@ -592,16 +592,17 @@ void user_enable_single_step(struct task_struct *child)
 
        if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
            != sizeof(insn))
-               break;
+               return -EIO;
 
        compute_next_pc(insn, pc, &next_pc, child);
        if (next_pc & 0x80000000)
-               break;
+               return -EIO;
 
        if (embed_debug_trap(child, next_pc))
-               break;
+               return -EIO;
 
        invalidate_cache();
+       return 0;
 }
 
 void user_disable_single_step(struct task_struct *child)
index 144b0f124fc72f08b20f93336f96da81327fe61c..7bbe38645ed5559395f85c5b5fce93eb3f4992e3 100644 (file)
 
 #define DEBUG_SIG 0
 
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
-int do_signal(struct pt_regs *, sigset_t *);
-
-asmlinkage int
-sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize,
-                 unsigned long r2, unsigned long r3, unsigned long r4,
-                 unsigned long r5, unsigned long r6, struct pt_regs *regs)
-{
-       sigset_t newset;
-
-       /* XXX: Don't preclude handling different sized sigset_t's.  */
-       if (sigsetsize != sizeof(sigset_t))
-               return -EINVAL;
-
-       if (copy_from_user(&newset, unewset, sizeof(newset)))
-               return -EFAULT;
-       sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
-
-       spin_lock_irq(&current->sighand->siglock);
-       current->saved_sigmask = current->blocked;
-       current->blocked = newset;
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-
-       current->state = TASK_INTERRUPTIBLE;
-       schedule();
-       set_thread_flag(TIF_RESTORE_SIGMASK);
-       return -ERESTARTNOHAND;
-}
-
 asmlinkage int
 sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
                unsigned long r2, unsigned long r3, unsigned long r4,
@@ -218,7 +187,7 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
        return (void __user *)((sp - frame_size) & -8ul);
 }
 
-static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
                           sigset_t *set, struct pt_regs *regs)
 {
        struct rt_sigframe __user *frame;
@@ -275,22 +244,34 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
                current->comm, current->pid, frame, regs->pc);
 #endif
 
-       return;
+       return 0;
 
 give_sigsegv:
        force_sigsegv(sig, current);
+       return -EFAULT;
+}
+
+static int prev_insn(struct pt_regs *regs)
+{
+       u16 inst;
+       if (get_user(&inst, (u16 __user *)(regs->bpc - 2)))
+               return -EFAULT;
+       if ((inst & 0xfff0) == 0x10f0)  /* trap ? */
+               regs->bpc -= 2;
+       else
+               regs->bpc -= 4;
+       regs->syscall_nr = -1;
+       return 0;
 }
 
 /*
  * OK, we're invoking a handler
  */
 
-static void
+static int
 handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
              sigset_t *oldset, struct pt_regs *regs)
 {
-       unsigned short inst;
-
        /* Are we from a system call? */
        if (regs->syscall_nr >= 0) {
                /* If so, check system call restarting.. */
@@ -308,16 +289,14 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
                        /* fallthrough */
                        case -ERESTARTNOINTR:
                                regs->r0 = regs->orig_r0;
-                               inst = *(unsigned short *)(regs->bpc - 2);
-                               if ((inst & 0xfff0) == 0x10f0)  /* trap ? */
-                                       regs->bpc -= 2;
-                               else
-                                       regs->bpc -= 4;
+                               if (prev_insn(regs) < 0)
+                                       return -EFAULT;
                }
        }
 
        /* Set up the stack frame */
-       setup_rt_frame(sig, ka, info, oldset, regs);
+       if (setup_rt_frame(sig, ka, info, oldset, regs))
+               return -EFAULT;
 
        spin_lock_irq(&current->sighand->siglock);
        sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
@@ -325,6 +304,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
                sigaddset(&current->blocked,sig);
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
+       return 0;
 }
 
 /*
@@ -332,12 +312,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
  * want to handle. Thus you cannot kill init even with a SIGKILL even by
  * mistake.
  */
-int do_signal(struct pt_regs *regs, sigset_t *oldset)
+static void do_signal(struct pt_regs *regs)
 {
        siginfo_t info;
        int signr;
        struct k_sigaction ka;
-       unsigned short inst;
+       sigset_t *oldset;
 
        /*
         * We want the common case to go fast, which
@@ -346,12 +326,14 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
         * if so.
         */
        if (!user_mode(regs))
-               return 1;
+               return;
 
        if (try_to_freeze()) 
                goto no_signal;
 
-       if (!oldset)
+       if (test_thread_flag(TIF_RESTORE_SIGMASK))
+               oldset = &current->saved_sigmask;
+       else
                oldset = &current->blocked;
 
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
@@ -363,8 +345,10 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
                 */
 
                /* Whee!  Actually deliver the signal.  */
-               handle_signal(signr, &ka, &info, oldset, regs);
-               return 1;
+               if (handle_signal(signr, &ka, &info, oldset, regs) == 0)
+                       clear_thread_flag(TIF_RESTORE_SIGMASK);
+
+               return;
        }
 
  no_signal:
@@ -375,31 +359,24 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
                    regs->r0 == -ERESTARTSYS ||
                    regs->r0 == -ERESTARTNOINTR) {
                        regs->r0 = regs->orig_r0;
-                       inst = *(unsigned short *)(regs->bpc - 2);
-                       if ((inst & 0xfff0) == 0x10f0)  /* trap ? */
-                               regs->bpc -= 2;
-                       else
-                               regs->bpc -= 4;
-               }
-               if (regs->r0 == -ERESTART_RESTARTBLOCK){
+                       prev_insn(regs);
+               } else if (regs->r0 == -ERESTART_RESTARTBLOCK){
                        regs->r0 = regs->orig_r0;
                        regs->r7 = __NR_restart_syscall;
-                       inst = *(unsigned short *)(regs->bpc - 2);
-                       if ((inst & 0xfff0) == 0x10f0)  /* trap ? */
-                               regs->bpc -= 2;
-                       else
-                               regs->bpc -= 4;
+                       prev_insn(regs);
                }
        }
-       return 0;
+       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+               clear_thread_flag(TIF_RESTORE_SIGMASK);
+               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+       }
 }
 
 /*
  * notification of userspace execution resumption
  * - triggered by current->work.notify_resume
  */
-void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
-                     __u32 thread_info_flags)
+void do_notify_resume(struct pt_regs *regs, __u32 thread_info_flags)
 {
        /* Pending single-step? */
        if (thread_info_flags & _TIF_SINGLESTEP)
@@ -407,7 +384,7 @@ void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
 
        /* deal with pending signal delivery */
        if (thread_info_flags & _TIF_SIGPENDING)
-               do_signal(regs,oldset);
+               do_signal(regs);
 
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
index 60b15d0aa07290bfd2494423ecc4c331c33c3032..b43b36beafe37aed63baf7714a92ba55a7cf2f1c 100644 (file)
 #define __NR_set_thread_area   334
 #define __NR_atomic_cmpxchg_32 335
 #define __NR_atomic_barrier    336
+#define __NR_fanotify_init     337
+#define __NR_fanotify_mark     338
+#define __NR_prlimit64         339
 
 #ifdef __KERNEL__
 
-#define NR_syscalls            337
+#define NR_syscalls            340
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
index 2391bdff09962e7a51d36c7842290ea90932ca54..6360c437dcf51c5491e192151b95ce6c1a6923a2 100644 (file)
@@ -765,4 +765,7 @@ sys_call_table:
        .long sys_set_thread_area
        .long sys_atomic_cmpxchg_32     /* 335 */
        .long sys_atomic_barrier
+       .long sys_fanotify_init
+       .long sys_fanotify_mark
+       .long sys_prlimit64
 
index 8f0640847ad2bf7bf99d0a184ed10ce8272a84f8..05285d08e54767a71a814773c23a506386f9626a 100644 (file)
@@ -162,7 +162,7 @@ static void mac_init_asc( void )
 void mac_mksound( unsigned int freq, unsigned int length )
 {
        __u32 cfreq = ( freq << 5 ) / 468;
-       __u32 flags;
+       unsigned long flags;
        int i;
 
        if ( mac_special_bell == NULL )
@@ -224,7 +224,7 @@ static void mac_nosound( unsigned long ignored )
  */
 static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsigned int volume )
 {
-       __u32 flags;
+       unsigned long flags;
 
        /* if the bell is already ringing, ring longer */
        if ( mac_bell_duration > 0 )
@@ -271,7 +271,7 @@ static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsig
 static void mac_quadra_ring_bell( unsigned long ignored )
 {
        int     i, count = mac_asc_samplespersec / HZ;
-       __u32 flags;
+       unsigned long flags;
 
        /*
         * we neither want a sound buffer overflow nor underflow, so we need to match
index b30b3eb197a5d2c157d6e96f8d60de45395d4a21..79b1ed198c070dd40dbb3cf4f03dd6f24b79cb71 100644 (file)
@@ -355,6 +355,9 @@ ENTRY(sys_call_table)
        .long sys_set_thread_area
        .long sys_atomic_cmpxchg_32     /* 335 */
        .long sys_atomic_barrier
+       .long sys_fanotify_init
+       .long sys_fanotify_mark
+       .long sys_prlimit64
 
        .rept NR_syscalls-(.-sys_call_table)/4
                .long sys_ni_syscall
index 3ad59dde485209bce858c425e4fc8a2f64b04c09..5526faabfc21433cf3e3fa679c9fcf802646c96c 100644 (file)
@@ -13,6 +13,7 @@ config MIPS
        select HAVE_KPROBES
        select HAVE_KRETPROBES
        select RTC_LIB if !MACH_LOONGSON
+       select GENERIC_ATOMIC64 if !64BIT
 
 mainmenu "Linux/MIPS Kernel Configuration"
 
@@ -1646,8 +1647,16 @@ config MIPS_MT_SMP
        select SYS_SUPPORTS_SMP
        select SMP_UP
        help
-         This is a kernel model which is also known a VSMP or lately
-         has been marketesed into SMVP.
+         This is a kernel model which is known a VSMP but lately has been
+         marketesed into SMVP.
+         Virtual SMP uses the processor's VPEs  to implement virtual
+         processors. In currently available configuration of the 34K processor
+         this allows for a dual processor. Both processors will share the same
+         primary caches; each will obtain the half of the TLB for it's own
+         exclusive use. For a layman this model can be described as similar to
+         what Intel calls Hyperthreading.
+
+         For further information see http://www.linux-mips.org/wiki/34K#VSMP
 
 config MIPS_MT_SMTC
        bool "SMTC: Use all TCs on all VPEs for SMP"
@@ -1664,6 +1673,14 @@ config MIPS_MT_SMTC
        help
          This is a kernel model which is known a SMTC or lately has been
          marketesed into SMVP.
+         is presenting the available TC's of the core as processors to Linux.
+         On currently available 34K processors this means a Linux system will
+         see up to 5 processors. The implementation of the SMTC kernel differs
+         significantly from VSMP and cannot efficiently coexist in the same
+         kernel binary so the choice between VSMP and SMTC is a compile time
+         decision.
+
+         For further information see http://www.linux-mips.org/wiki/34K#SMTC
 
 endchoice
 
index c29511b11d44fd6732b0dd153d009dbd7051555d..5340210596297fa54c8723e866aebaeb8c20269e 100644 (file)
@@ -43,7 +43,7 @@ int prom_argc;
 char **prom_argv;
 char **prom_envp;
 
-void prom_init_cmdline(void)
+void __init prom_init_cmdline(void)
 {
        int i;
 
@@ -104,7 +104,7 @@ static inline void str2eaddr(unsigned char *ea, unsigned char *str)
        }
 }
 
-int prom_get_ethernet_addr(char *ethernet_addr)
+int __init prom_get_ethernet_addr(char *ethernet_addr)
 {
        char *ethaddr_str;
 
@@ -123,7 +123,6 @@ int prom_get_ethernet_addr(char *ethernet_addr)
 
        return 0;
 }
-EXPORT_SYMBOL(prom_get_ethernet_addr);
 
 void __init prom_free_prom_memory(void)
 {
index ed9bb709c9a3816a4738d0ceef91db8c8de20cd4..5fd7f7a58b7eb9b02446e8ea2aa3e2e9b1e48e8f 100644 (file)
@@ -59,7 +59,7 @@ $(obj)/piggy.o: $(obj)/dummy.o $(obj)/vmlinux.bin.z FORCE
 hostprogs-y := calc_vmlinuz_load_addr
 
 VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \
-               $(objtree)/$(KBUILD_IMAGE) $(VMLINUX_LOAD_ADDRESS))
+               $(obj)/vmlinux.bin $(VMLINUX_LOAD_ADDRESS))
 
 vmlinuzobjs-y += $(obj)/piggy.o
 
index 094c17e38e163ab691b71bef78710f73848c7645..47323ca452dcbde751536c58c9f613dd1d9d51c8 100644 (file)
@@ -83,3 +83,7 @@ config ARCH_SPARSEMEM_ENABLE
        def_bool y
        select SPARSEMEM_STATIC
        depends on CPU_CAVIUM_OCTEON
+
+config CAVIUM_OCTEON_HELPER
+       def_bool y
+       depends on OCTEON_ETHERNET || PCI
index c664c8cc2b42cb8970b9f57531a03e2998075566..a5b427909b5cac04d28c4da1b099342ee72df4ce 100644 (file)
@@ -41,7 +41,7 @@ static int cnmips_cu2_call(struct notifier_block *nfb, unsigned long action,
        return NOTIFY_OK;               /* Let default notifier send signals */
 }
 
-static int cnmips_cu2_setup(void)
+static int __init cnmips_cu2_setup(void)
 {
        return cu2_notifier(cnmips_cu2_call, 0);
 }
index 2fd66db6939e0f981c338015127c69788d960843..7f41c5be2190ddca03fc92a00e8f21bd735414f5 100644 (file)
@@ -11,4 +11,4 @@
 
 obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o
 
-obj-$(CONFIG_PCI) += cvmx-helper-errata.o cvmx-helper-jtag.o
+obj-$(CONFIG_CAVIUM_OCTEON_HELPER) += cvmx-helper-errata.o cvmx-helper-jtag.o
index c63c56bfd18461b558e0cba7380d6297fc47115e..47d87da379f947c8a578f2a2b84e7da804f559a8 100644 (file)
@@ -782,6 +782,10 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
  */
 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
 
+#else /* !CONFIG_64BIT */
+
+#include <asm-generic/atomic64.h>
+
 #endif /* CONFIG_64BIT */
 
 /*
index 613f6912dfc1d024b2aa6b28fb4215ba8b251552..dbc51065df5b3fe53611ff15e9cc259188ff0dbc 100644 (file)
@@ -145,7 +145,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
        return (u32)(unsigned long)uptr;
 }
 
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
 {
        struct pt_regs *regs = (struct pt_regs *)
                ((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1;
index 2cb2f0c2c4f89342ae5256a79d8082f01af4d319..3532e2c5f098ae46a4a79f7455cd004d699dd4a9 100644 (file)
@@ -24,7 +24,7 @@ extern int cu2_notifier_call_chain(unsigned long val, void *v);
 
 #define cu2_notifier(fn, pri)                                          \
 ({                                                                     \
-       static struct notifier_block fn##_nb __cpuinitdata = {          \
+       static struct notifier_block fn##_nb = {                        \
                .notifier_call = fn,                                    \
                .priority = pri                                         \
        };                                                              \
index 9b9436a4d816bfe7f26a1eb8a7e7cb677507c388..86548da650e765f79db345f4d3964a5d0eb6c0fe 100644 (file)
@@ -321,6 +321,7 @@ struct gic_intrmask_regs {
  */
 struct gic_intr_map {
        unsigned int cpunum;    /* Directed to this CPU */
+#define GIC_UNUSED             0xdead                  /* Dummy data */
        unsigned int pin;       /* Directed to this Pin */
        unsigned int polarity;  /* Polarity : +/-       */
        unsigned int trigtype;  /* Trigger  : Edge/Levl */
index b74caf65482b2e068b86d2763daa81733bc45503..ff9a8b86cb9363c1fb458546c56550fc35a7a3ab 100644 (file)
@@ -1,6 +1,6 @@
 #ifndef __ASM_MACH_TX49XX_KMALLOC_H
 #define __ASM_MACH_TX49XX_KMALLOC_H
 
-#define ARCH_KMALLOC_MINALIGN  L1_CACHE_BYTES
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
 
 #endif /* __ASM_MACH_TX49XX_KMALLOC_H */
index cea872fc6f5c0d1ae92f00f920f7f991c8b9b806..d11aa02a956a57ca41ff890dbe16bbdd0145db53 100644 (file)
@@ -88,9 +88,6 @@
 
 #define GIC_EXT_INTR(x)                x
 
-/* Dummy data */
-#define X                      0xdead
-
 /* External Interrupts used for IPI */
 #define GIC_IPI_EXT_INTR_RESCHED_VPE0  16
 #define GIC_IPI_EXT_INTR_CALLFNC_VPE0  17
index a16beafcea91dd091f0b491ea572b5902e272a3d..e59cd1ac09c2f82eb8c91af1bdb0b6a520513d89 100644 (file)
@@ -150,6 +150,20 @@ typedef struct { unsigned long pgprot; } pgprot_t;
     ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
 #endif
 #define __va(x)                ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
+
+/*
+ * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad
+ * (lmo) rsp. 8431fd094d625b94d364fe393076ccef88e6ce18 (kernel.org).  The
+ * discussion can be found in lkml posting
+ * <a2ebde260608230500o3407b108hc03debb9da6e62c@mail.gmail.com> which is
+ * archived at http://lists.linuxcoding.com/kernel/2006-q3/msg17360.html
+ *
+ * It is unclear if the misscompilations mentioned in
+ * http://lkml.org/lkml/2010/8/8/138 also affect MIPS so we keep this one
+ * until GCC 3.x has been retired before we can apply
+ * https://patchwork.linux-mips.org/patch/1541/
+ */
+
 #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
 
 #define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
index 2376f2e06e470a264eeff5115dc692c80e9f37c3..70df9c0d3c5be20e2d7b646460276a44374fc077 100644 (file)
@@ -146,7 +146,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
 #define _TIF_LOAD_WATCH                (1<<TIF_LOAD_WATCH)
 
 /* work to do on interrupt/exception return */
-#define _TIF_WORK_MASK         (0x0000ffef & ~_TIF_SECCOMP)
+#define _TIF_WORK_MASK         (0x0000ffef &                           \
+                                       ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT))
 /* work to do on any return to u-space */
 #define _TIF_ALLWORK_MASK      (0x8000ffff & ~_TIF_SECCOMP)
 
index baa318a59c97f8c2791842df809c1218796c7492..550725b881d5edec666a5b32bbe1200164ac7fd8 100644 (file)
 #define __NR_perf_event_open           (__NR_Linux + 333)
 #define __NR_accept4                   (__NR_Linux + 334)
 #define __NR_recvmmsg                  (__NR_Linux + 335)
+#define __NR_fanotify_init             (__NR_Linux + 336)
+#define __NR_fanotify_mark             (__NR_Linux + 337)
+#define __NR_prlimit64                 (__NR_Linux + 338)
 
 /*
  * Offset of the last Linux o32 flavoured syscall
  */
-#define __NR_Linux_syscalls            335
+#define __NR_Linux_syscalls            338
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 #define __NR_O32_Linux                 4000
-#define __NR_O32_Linux_syscalls                335
+#define __NR_O32_Linux_syscalls                338
 
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 
 #define __NR_perf_event_open           (__NR_Linux + 292)
 #define __NR_accept4                   (__NR_Linux + 293)
 #define __NR_recvmmsg                  (__NR_Linux + 294)
+#define __NR_fanotify_init             (__NR_Linux + 295)
+#define __NR_fanotify_mark             (__NR_Linux + 296)
+#define __NR_prlimit64                 (__NR_Linux + 297)
 
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  */
-#define __NR_Linux_syscalls            294
+#define __NR_Linux_syscalls            297
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_64_Linux                  5000
-#define __NR_64_Linux_syscalls         294
+#define __NR_64_Linux_syscalls         297
 
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 
 #define __NR_accept4                   (__NR_Linux + 297)
 #define __NR_recvmmsg                  (__NR_Linux + 298)
 #define __NR_getdents64                        (__NR_Linux + 299)
+#define __NR_fanotify_init             (__NR_Linux + 300)
+#define __NR_fanotify_mark             (__NR_Linux + 301)
+#define __NR_prlimit64                 (__NR_Linux + 302)
 
 /*
  * Offset of the last N32 flavoured syscall
  */
-#define __NR_Linux_syscalls            299
+#define __NR_Linux_syscalls            302
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
 #define __NR_N32_Linux                 6000
-#define __NR_N32_Linux_syscalls                299
+#define __NR_N32_Linux_syscalls                302
 
 #ifdef __KERNEL__
 
index b181f2f0ea8e71709f331c8ee32a7f6792999106..82ba9f62f49e3b2faa98abc1f6da2fe1e062a4ed 100644 (file)
@@ -7,7 +7,6 @@
 #include <asm/io.h>
 #include <asm/gic.h>
 #include <asm/gcmpregs.h>
-#include <asm/mips-boards/maltaint.h>
 #include <asm/irq.h>
 #include <linux/hardirq.h>
 #include <asm-generic/bitops/find.h>
@@ -131,7 +130,7 @@ static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
        int             i;
 
        irq -= _irqbase;
-       pr_debug(KERN_DEBUG "%s(%d) called\n", __func__, irq);
+       pr_debug("%s(%d) called\n", __func__, irq);
        cpumask_and(&tmp, cpumask, cpu_online_mask);
        if (cpus_empty(tmp))
                return -1;
@@ -222,7 +221,7 @@ static void __init gic_basic_init(int numintrs, int numvpes,
        /* Setup specifics */
        for (i = 0; i < mapsize; i++) {
                cpu = intrmap[i].cpunum;
-               if (cpu == X)
+               if (cpu == GIC_UNUSED)
                        continue;
                if (cpu == 0 && i != 0 && intrmap[i].flags == 0)
                        continue;
index 1f4e2fa64140ee8204aed74ecf82eba7bab056be..f4546e97c60db111215495f924aa567595c39581 100644 (file)
@@ -283,7 +283,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
        struct pt_regs *regs = args->regs;
        int trap = (regs->cp0_cause & 0x7c) >> 2;
 
-       /* Userpace events, ignore. */
+       /* Userspace events, ignore. */
        if (user_mode(regs))
                return NOTIFY_DONE;
 
index 80e2ba694babcd0d70bd8266a6be941996e2a8a8..29811f043399588604da9bbc00efd9f0997aa530 100644 (file)
@@ -251,7 +251,7 @@ void sp_work_handle_request(void)
                memset(&tz, 0, sizeof(tz));
                if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv,
                                             (int)&tz, 0, 0)) == 0)
-               ret.retval = tv.tv_sec;
+                       ret.retval = tv.tv_sec;
                break;
 
        case MTSP_SYSCALL_EXIT:
index c2dab140dc98fb1588259063699c7ba09b6f8ed7..6343b4a5b8350cb3a93edea5d75f3154cde48343 100644 (file)
@@ -341,3 +341,10 @@ asmlinkage long sys32_lookup_dcookie(u32 a0, u32 a1, char __user *buf,
 {
        return sys_lookup_dcookie(merge_64(a0, a1), buf, len);
 }
+
+SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags,
+               u64, a3, u64, a4, int, dfd, const char  __user *, pathname)
+{
+       return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4),
+                                dfd, pathname);
+}
index 17202bbe843f91172534dd30e41e8a9542e0d762..584415eef8c92ae9168790b4ef824470c2034f73 100644 (file)
@@ -583,7 +583,10 @@ einval:    li      v0, -ENOSYS
        sys     sys_rt_tgsigqueueinfo   4
        sys     sys_perf_event_open     5
        sys     sys_accept4             4
-       sys     sys_recvmmsg            5
+       sys     sys_recvmmsg            5       /* 4335 */
+       sys     sys_fanotify_init       2
+       sys     sys_fanotify_mark       6
+       sys     sys_prlimit64           4
        .endm
 
        /* We pre-compute the number of _instruction_ bytes needed to
index a8a6c596eb0405bab886e8dfff6ffeb8097d7fc6..5573f8e4e326910b3873007c2b216fb31fe566d4 100644 (file)
@@ -416,9 +416,12 @@ sys_call_table:
        PTR     sys_pipe2
        PTR     sys_inotify_init1
        PTR     sys_preadv
-       PTR     sys_pwritev                     /* 5390 */
+       PTR     sys_pwritev                     /* 5290 */
        PTR     sys_rt_tgsigqueueinfo
        PTR     sys_perf_event_open
        PTR     sys_accept4
-       PTR     sys_recvmmsg
+       PTR     sys_recvmmsg
+       PTR     sys_fanotify_init               /* 5295 */
+       PTR     sys_fanotify_mark
+       PTR     sys_prlimit64
        .size   sys_call_table,.-sys_call_table
index a3d66137731ac24386972c82426a4679d9ff9be3..1e38ec97672ed82d30b238ad7bbd6adbb2c10b9f 100644 (file)
@@ -419,5 +419,8 @@ EXPORT(sysn32_call_table)
        PTR     sys_perf_event_open
        PTR     sys_accept4
        PTR     compat_sys_recvmmsg
-       PTR     sys_getdents
+       PTR     sys_getdents64
+       PTR     sys_fanotify_init               /* 6300 */
+       PTR     sys_fanotify_mark
+       PTR     sys_prlimit64
        .size   sysn32_call_table,.-sysn32_call_table
index 813689ef23847c6a2db230ebd6dcae60e0ff79f6..171979fc98e59a5169049d44001ad75383268684 100644 (file)
@@ -538,5 +538,8 @@ sys_call_table:
        PTR     compat_sys_rt_tgsigqueueinfo
        PTR     sys_perf_event_open
        PTR     sys_accept4
-       PTR     compat_sys_recvmmsg
+       PTR     compat_sys_recvmmsg             /* 4335 */
+       PTR     sys_fanotify_init
+       PTR     sys_32_fanotify_mark
+       PTR     sys_prlimit64
        .size   sys_call_table,.-sys_call_table
index 7ba890860d98cb3916c84f369e3fef0200b07f2b..469d4019f795bd072b0aa4ba109d075b5377f55c 100644 (file)
@@ -44,27 +44,39 @@ static inline int cpu_is_noncoherent_r10000(struct device *dev)
 
 static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
 {
+       gfp_t dma_flag;
+
        /* ignore region specifiers */
        gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
 
-#ifdef CONFIG_ZONE_DMA
+#ifdef CONFIG_ISA
        if (dev == NULL)
-               gfp |= __GFP_DMA;
-       else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
-               gfp |= __GFP_DMA;
+               dma_flag = __GFP_DMA;
        else
 #endif
-#ifdef CONFIG_ZONE_DMA32
+#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
             if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
-               gfp |= __GFP_DMA32;
+                       dma_flag = __GFP_DMA;
+       else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
+                       dma_flag = __GFP_DMA32;
+       else
+#endif
+#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
+            if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
+               dma_flag = __GFP_DMA32;
+       else
+#endif
+#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
+            if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
+               dma_flag = __GFP_DMA;
        else
 #endif
-               ;
+               dma_flag = 0;
 
        /* Don't invoke OOM killer */
        gfp |= __GFP_NORETRY;
 
-       return gfp;
+       return gfp | dma_flag;
 }
 
 void *dma_alloc_noncoherent(struct device *dev, size_t size,
index 1ef75cd80a0d819827f5057549fdd2622442a752..274af3be1442b42fa41d3cb960b598ddcbf5b8c2 100644 (file)
@@ -30,7 +30,7 @@
 #define tc_lsize       32
 
 extern unsigned long icache_way_size, dcache_way_size;
-unsigned long tcache_size;
+static unsigned long tcache_size;
 
 #include <asm/r4kcache.h>
 
index 15949b0be811f9718af9e2896d4bd9e947c84897..b79b24afe3a2fc67ab6687a082e44d45bf219242 100644 (file)
@@ -385,6 +385,8 @@ static int __initdata msc_nr_eicirqs = ARRAY_SIZE(msc_eicirqmap);
  */
 
 #define GIC_CPU_NMI GIC_MAP_TO_NMI_MSK
+#define X GIC_UNUSED
+
 static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
        { X, X,            X,           X,              0 },
        { X, X,            X,           X,              0 },
@@ -404,6 +406,7 @@ static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
        { X, X,            X,           X,              0 },
        /* The remainder of this table is initialised by fill_ipi_map */
 };
+#undef X
 
 /*
  * GCMP needs to be detected before any SMP initialisation
index 71f7d27b0d4cccf28dba3777b0a8848de53f82c4..f31218e17d3c1437f4f8ef15a7d1855ddc6a32fb 100644 (file)
@@ -118,7 +118,7 @@ static int __init rc32434_pcibridge_init(void)
        if (!((pcicvalue == PCIM_H_EA) ||
              (pcicvalue == PCIM_H_IA_FIX) ||
              (pcicvalue == PCIM_H_IA_RR))) {
-               pr_err(KERN_ERR "PCI init error!!!\n");
+               pr_err("PCI init error!!!\n");
                /* Not in Host Mode, return ERROR */
                return -1;
        }
index fadd8744a6bccfbf25283369608c66b16afeab04..e7a12ff304b9475c0db2e6097989c510b288a3c4 100644 (file)
  */
 #include <linux/kernel.h>
 
+#include <asm/processor.h>
 #include <asm/reboot.h>
 #include <glb.h>
 
 void pnx8550_machine_restart(char *command)
 {
-       char head[] = "************* Machine restart *************";
-       char foot[] = "*******************************************";
-
-       printk("\n\n");
-       printk("%s\n", head);
-       if (command != NULL)
-               printk("* %s\n", command);
-       printk("%s\n", foot);
-
        PNX8550_RST_CTL = PNX8550_RST_DO_SW_RST;
 }
 
 void pnx8550_machine_halt(void)
 {
-       printk("*** Machine halt. (Not implemented) ***\n");
-}
-
-void pnx8550_machine_power_off(void)
-{
-       printk("*** Machine power off.  (Not implemented) ***\n");
+       while (1) {
+               if (cpu_wait)
+                       cpu_wait();
+       }
 }
index 64246c9c875c51d09e5c3861ca0e6f1096d50ac5..43cb3945fdbfffb8b355789e237a9b27df21abef 100644 (file)
@@ -44,7 +44,6 @@
 extern void __init board_setup(void);
 extern void pnx8550_machine_restart(char *);
 extern void pnx8550_machine_halt(void);
-extern void pnx8550_machine_power_off(void);
 extern struct resource ioport_resource;
 extern struct resource iomem_resource;
 extern char *prom_getcmdline(void);
@@ -100,7 +99,7 @@ void __init plat_mem_setup(void)
 
         _machine_restart = pnx8550_machine_restart;
         _machine_halt = pnx8550_machine_halt;
-        pm_power_off = pnx8550_machine_power_off;
+        pm_power_off = pnx8550_machine_halt;
 
        /* Clear the Global 2 Register, PCI Inta Output Enable Registers
           Bit 1:Enable DAC Powerdown
index 444b9f918fdf8f2d5dec64b9827d8d746bb1ade1..7c2a2f7f8dc143889b74605d2741f5707ad330e4 100644 (file)
@@ -8,7 +8,6 @@ mainmenu "Linux Kernel Configuration"
 config MN10300
        def_bool y
        select HAVE_OPROFILE
-       select HAVE_ARCH_TRACEHOOK
 
 config AM33
        def_bool y
index ff80e86b9bd2d2305d34a731685d94fe5d3f3491..ce83c74b3fd714abf68fca4a4d2beef024545c83 100644 (file)
@@ -101,7 +101,7 @@ config GDBSTUB_DEBUG_BREAKPOINT
 
 choice
        prompt "GDB stub port"
-       default GDBSTUB_TTYSM0
+       default GDBSTUB_ON_TTYSM0
        depends on GDBSTUB
        help
          Select the serial port used for GDB-stub.
index f49ac49e09adc079adaabdd9893258ae9b795b7f..3f50e966107641f21f346a38e50cca97d2eda24b 100644 (file)
@@ -229,9 +229,9 @@ int ffs(int x)
 #include <asm-generic/bitops/hweight.h>
 
 #define ext2_set_bit_atomic(lock, nr, addr) \
-       test_and_set_bit((nr) ^ 0x18, (addr))
+       test_and_set_bit((nr), (addr))
 #define ext2_clear_bit_atomic(lock, nr, addr) \
-       test_and_clear_bit((nr) ^ 0x18, (addr))
+       test_and_clear_bit((nr), (addr))
 
 #include <asm-generic/bitops/ext2-non-atomic.h>
 #include <asm-generic/bitops/minix-le.h>
index 7e891fce2370028acea4a56497aafa74c443c0be..1865d72a86ff7cc6823a7be07dbbb0f3907e2518 100644 (file)
@@ -78,7 +78,7 @@ typedef unsigned long sigset_t;
 
 /* These should not be considered constants from userland.  */
 #define SIGRTMIN       32
-#define SIGRTMAX       (_NSIG-1)
+#define SIGRTMAX       _NSIG
 
 /*
  * SA_FLAGS values:
index 9d49073e827a26429335b54cc51cff3304433ac7..db509dd80565b9e91c2b661c08c5fd64bc52e8d4 100644 (file)
@@ -156,17 +156,17 @@ struct mn10300_serial_port mn10300_serial_port_sif0 = {
        ._intr          = &SC0ICR,
        ._rxb           = &SC0RXB,
        ._txb           = &SC0TXB,
-       .rx_name        = "ttySM0/Rx",
-       .tx_name        = "ttySM0/Tx",
+       .rx_name        = "ttySM0:Rx",
+       .tx_name        = "ttySM0:Tx",
 #ifdef CONFIG_MN10300_TTYSM0_TIMER8
-       .tm_name        = "ttySM0/Timer8",
+       .tm_name        = "ttySM0:Timer8",
        ._tmxmd         = &TM8MD,
        ._tmxbr         = &TM8BR,
        ._tmicr         = &TM8ICR,
        .tm_irq         = TM8IRQ,
        .div_timer      = MNSCx_DIV_TIMER_16BIT,
 #else /* CONFIG_MN10300_TTYSM0_TIMER2 */
-       .tm_name        = "ttySM0/Timer2",
+       .tm_name        = "ttySM0:Timer2",
        ._tmxmd         = &TM2MD,
        ._tmxbr         = (volatile u16 *) &TM2BR,
        ._tmicr         = &TM2ICR,
@@ -209,17 +209,17 @@ struct mn10300_serial_port mn10300_serial_port_sif1 = {
        ._intr          = &SC1ICR,
        ._rxb           = &SC1RXB,
        ._txb           = &SC1TXB,
-       .rx_name        = "ttySM1/Rx",
-       .tx_name        = "ttySM1/Tx",
+       .rx_name        = "ttySM1:Rx",
+       .tx_name        = "ttySM1:Tx",
 #ifdef CONFIG_MN10300_TTYSM1_TIMER9
-       .tm_name        = "ttySM1/Timer9",
+       .tm_name        = "ttySM1:Timer9",
        ._tmxmd         = &TM9MD,
        ._tmxbr         = &TM9BR,
        ._tmicr         = &TM9ICR,
        .tm_irq         = TM9IRQ,
        .div_timer      = MNSCx_DIV_TIMER_16BIT,
 #else /* CONFIG_MN10300_TTYSM1_TIMER3 */
-       .tm_name        = "ttySM1/Timer3",
+       .tm_name        = "ttySM1:Timer3",
        ._tmxmd         = &TM3MD,
        ._tmxbr         = (volatile u16 *) &TM3BR,
        ._tmicr         = &TM3ICR,
@@ -260,9 +260,9 @@ struct mn10300_serial_port mn10300_serial_port_sif2 = {
        .uart.lock      =
        __SPIN_LOCK_UNLOCKED(mn10300_serial_port_sif2.uart.lock),
        .name           = "ttySM2",
-       .rx_name        = "ttySM2/Rx",
-       .tx_name        = "ttySM2/Tx",
-       .tm_name        = "ttySM2/Timer10",
+       .rx_name        = "ttySM2:Rx",
+       .tx_name        = "ttySM2:Tx",
+       .tm_name        = "ttySM2:Timer10",
        ._iobase        = &SC2CTR,
        ._control       = &SC2CTR,
        ._status        = &SC2STR,
index 6aea7fd76993b931f31f2dda76e72aecc1e31b4d..196a111e2e2937b134217356991c0ca2f68bda05 100644 (file)
@@ -206,7 +206,7 @@ int module_finalize(const Elf_Ehdr *hdr,
                    const Elf_Shdr *sechdrs,
                    struct module *me)
 {
-       return module_bug_finalize(hdr, sechdrs, me);
+       return 0;
 }
 
 /*
@@ -214,5 +214,4 @@ int module_finalize(const Elf_Ehdr *hdr,
  */
 void module_arch_cleanup(struct module *mod)
 {
-       module_bug_cleanup(mod);
 }
index 717db14c2cc32d8905a45e6cfae4bf2a00d38abd..d4de05ab786464cd585e7f1f0e6ed1652ad3de1c 100644 (file)
@@ -65,10 +65,10 @@ asmlinkage long sys_sigaction(int sig,
                old_sigset_t mask;
                if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
                    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
-                   __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+                   __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
+                   __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
+                   __get_user(mask, &act->sa_mask))
                        return -EFAULT;
-               __get_user(new_ka.sa.sa_flags, &act->sa_flags);
-               __get_user(mask, &act->sa_mask);
                siginitset(&new_ka.sa.sa_mask, mask);
        }
 
@@ -77,10 +77,10 @@ asmlinkage long sys_sigaction(int sig,
        if (!ret && oact) {
                if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
                    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
-                   __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+                   __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
+                   __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
+                   __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
                        return -EFAULT;
-               __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
-               __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
        }
 
        return ret;
@@ -102,6 +102,9 @@ static int restore_sigcontext(struct pt_regs *regs,
 {
        unsigned int err = 0;
 
+       /* Always make any pending restarted system calls return -EINTR */
+       current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
        if (is_using_fpu(current))
                fpu_kill_state(current);
 
@@ -330,8 +333,6 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
        regs->d0 = sig;
        regs->d1 = (unsigned long) &frame->sc;
 
-       set_fs(USER_DS);
-
        /* the tracer may want to single-step inside the handler */
        if (test_thread_flag(TIF_SINGLESTEP))
                ptrace_notify(SIGTRAP);
@@ -345,7 +346,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
        return 0;
 
 give_sigsegv:
-       force_sig(SIGSEGV, current);
+       force_sigsegv(sig, current);
        return -EFAULT;
 }
 
@@ -413,8 +414,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        regs->d0 = sig;
        regs->d1 = (long) &frame->info;
 
-       set_fs(USER_DS);
-
        /* the tracer may want to single-step inside the handler */
        if (test_thread_flag(TIF_SINGLESTEP))
                ptrace_notify(SIGTRAP);
@@ -428,10 +427,16 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        return 0;
 
 give_sigsegv:
-       force_sig(SIGSEGV, current);
+       force_sigsegv(sig, current);
        return -EFAULT;
 }
 
+static inline void stepback(struct pt_regs *regs)
+{
+       regs->pc -= 2;
+       regs->orig_d0 = -1;
+}
+
 /*
  * handle the actual delivery of a signal to userspace
  */
@@ -459,7 +464,7 @@ static int handle_signal(int sig,
                        /* fallthrough */
                case -ERESTARTNOINTR:
                        regs->d0 = regs->orig_d0;
-                       regs->pc -= 2;
+                       stepback(regs);
                }
        }
 
@@ -527,12 +532,12 @@ static void do_signal(struct pt_regs *regs)
                case -ERESTARTSYS:
                case -ERESTARTNOINTR:
                        regs->d0 = regs->orig_d0;
-                       regs->pc -= 2;
+                       stepback(regs);
                        break;
 
                case -ERESTART_RESTARTBLOCK:
                        regs->d0 = __NR_restart_syscall;
-                       regs->pc -= 2;
+                       stepback(regs);
                        break;
                }
        }
index 28b9d983db0cb280c07dcd76921d695892e11c48..1557277fbc5c03962c56f39b7d1a5687bdea80bd 100644 (file)
@@ -2,13 +2,11 @@
 # Makefile for the MN10300-specific memory management code
 #
 
+cacheflush-y   := cache.o cache-mn10300.o
+cacheflush-$(CONFIG_MN10300_CACHE_WBACK) += cache-flush-mn10300.o
+
+cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o
+
 obj-y := \
        init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \
-       misalignment.o dma-alloc.o
-
-ifneq ($(CONFIG_MN10300_CACHE_DISABLED),y)
-obj-y  += cache.o cache-mn10300.o
-ifeq ($(CONFIG_MN10300_CACHE_WBACK),y)
-obj-y  += cache-flush-mn10300.o
-endif
-endif
+       misalignment.o dma-alloc.o $(cacheflush-y)
diff --git a/arch/mn10300/mm/cache-disabled.c b/arch/mn10300/mm/cache-disabled.c
new file mode 100644 (file)
index 0000000..f669ea4
--- /dev/null
@@ -0,0 +1,21 @@
+/* Handle the cache being disabled
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/mm.h>
+
+/*
+ * allow userspace to flush the instruction cache
+ */
+asmlinkage long sys_cacheflush(unsigned long start, unsigned long end)
+{
+       if (end < start)
+               return -EINVAL;
+       return 0;
+}
index 1b76719ec1c37b1686a648cd07f8c5e7baaf9ce1..9261217e8d2c5741bb500b829bbd7663859b5541 100644 (file)
@@ -54,13 +54,30 @@ EXPORT_SYMBOL(flush_icache_page);
 void flush_icache_range(unsigned long start, unsigned long end)
 {
 #ifdef CONFIG_MN10300_CACHE_WBACK
-       unsigned long addr, size, off;
+       unsigned long addr, size, base, off;
        struct page *page;
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd;
        pte_t *ppte, pte;
 
+       if (end > 0x80000000UL) {
+               /* addresses above 0xa0000000 do not go through the cache */
+               if (end > 0xa0000000UL) {
+                       end = 0xa0000000UL;
+                       if (start >= end)
+                               return;
+               }
+
+               /* kernel addresses between 0x80000000 and 0x9fffffff do not
+                * require page tables, so we just map such addresses directly */
+               base = (start >= 0x80000000UL) ? start : 0x80000000UL;
+               mn10300_dcache_flush_range(base, end);
+               if (base == start)
+                       goto invalidate;
+               end = base;
+       }
+
        for (; start < end; start += size) {
                /* work out how much of the page to flush */
                off = start & (PAGE_SIZE - 1);
@@ -104,6 +121,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
        }
 #endif
 
+invalidate:
        mn10300_icache_inv();
 }
 EXPORT_SYMBOL(flush_icache_range);
index 02b77baa5da69f04729013cd9b6402ffc2dc3c39..efa0b60c63fe683f22629ddd540ed036495cd896 100644 (file)
@@ -147,7 +147,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
        return (u32)(unsigned long)uptr;
 }
 
-static __inline__ void __user *compat_alloc_user_space(long len)
+static __inline__ void __user *arch_compat_alloc_user_space(long len)
 {
        struct pt_regs *regs = &current->thread.regs;
        return (void __user *)regs->gr[30];
index 159a2b81e90c630db82eb9834c2096df7eb66896..6e81bb596e5b476e598e4a7309e4aba80ba0a322 100644 (file)
@@ -941,11 +941,10 @@ int module_finalize(const Elf_Ehdr *hdr,
        nsyms = newptr - (Elf_Sym *)symhdr->sh_addr;
        DEBUGP("NEW num_symtab %lu\n", nsyms);
        symhdr->sh_size = nsyms * sizeof(Elf_Sym);
-       return module_bug_finalize(hdr, sechdrs, me);
+       return 0;
 }
 
 void module_arch_cleanup(struct module *mod)
 {
        deregister_unwind_table(mod);
-       module_bug_cleanup(mod);
 }
index 396d21a800587f0c8a740714a3277a743d2618cc..a11d4eac4f97f369f48866a7475696879b8be5d3 100644 (file)
@@ -134,7 +134,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
        return (u32)(unsigned long)uptr;
 }
 
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
 {
        struct pt_regs *regs = current->thread.regs;
        unsigned long usp = regs->gpr[1];
index a67aeed17d405fbc37e3a44a860c67a3a1dd9fcf..debc5ed96d6e087a2e241e47421f537a13bf1feb 100644 (file)
@@ -11,6 +11,7 @@
 #ifndef __ARCH_POWERPC_ASM_FSLDMA_H__
 #define __ARCH_POWERPC_ASM_FSLDMA_H__
 
+#include <linux/slab.h>
 #include <linux/dmaengine.h>
 
 /*
index 477c663e014043a5c08fbaf51e82853005391344..49cee9df225be8bfc6b06a429ee9243d10484439 100644 (file)
@@ -63,11 +63,6 @@ int module_finalize(const Elf_Ehdr *hdr,
                const Elf_Shdr *sechdrs, struct module *me)
 {
        const Elf_Shdr *sect;
-       int err;
-
-       err = module_bug_finalize(hdr, sechdrs, me);
-       if (err)
-               return err;
 
        /* Apply feature fixups */
        sect = find_section(hdr, sechdrs, "__ftr_fixup");
@@ -101,5 +96,4 @@ int module_finalize(const Elf_Ehdr *hdr,
 
 void module_arch_cleanup(struct module *mod)
 {
-       module_bug_cleanup(mod);
 }
index 7109f5b1baa87bd63e36aa18910c7d0bdfa40b12..2300426e531a096239b0620f9ec29f71e70b6daf 100644 (file)
@@ -138,6 +138,7 @@ static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs)
                        ti->local_flags &= ~_TLF_RESTORE_SIGMASK;
                        sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
                }
+               regs->trap = 0;
                return 0;               /* no signals delivered */
        }
 
@@ -164,6 +165,7 @@ static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs)
                ret = handle_rt_signal64(signr, &ka, &info, oldset, regs);
        }
 
+       regs->trap = 0;
        if (ret) {
                spin_lock_irq(&current->sighand->siglock);
                sigorsets(&current->blocked, &current->blocked,
index 266610119f664970c66b72832ebb571eb842b1bc..b96a3a010c26859ab93f8cca5ec74cd4905dd16b 100644 (file)
@@ -511,6 +511,7 @@ static long restore_user_regs(struct pt_regs *regs,
        if (!sig)
                save_r2 = (unsigned int)regs->gpr[2];
        err = restore_general_regs(regs, sr);
+       regs->trap = 0;
        err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
        if (!sig)
                regs->gpr[2] = (unsigned long) save_r2;
@@ -884,7 +885,6 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
        regs->nip = (unsigned long) ka->sa.sa_handler;
        /* enter the signal handler in big-endian mode */
        regs->msr &= ~MSR_LE;
-       regs->trap = 0;
        return 1;
 
 badframe:
@@ -1228,7 +1228,6 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
        regs->nip = (unsigned long) ka->sa.sa_handler;
        /* enter the signal handler in big-endian mode */
        regs->msr &= ~MSR_LE;
-       regs->trap = 0;
 
        return 1;
 
index 2fe6fc64b614ef9d1b935a472487eaf161967fd9..27c4a4584f805b83fbcf09c5478e7ec9fa7f5e7c 100644 (file)
@@ -178,7 +178,7 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
        err |= __get_user(regs->xer, &sc->gp_regs[PT_XER]);
        err |= __get_user(regs->ccr, &sc->gp_regs[PT_CCR]);
        /* skip SOFTE */
-       err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]);
+       regs->trap = 0;
        err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]);
        err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
        err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
index 5b243bd3eb3b699ee6a0712340c9df51db2ed948..3dc2a8d262b8731aa4995b6c4b20621f06742fef 100644 (file)
@@ -57,7 +57,7 @@ static struct clk *mpc5121_clk_get(struct device *dev, const char *id)
        int id_match = 0;
 
        if (dev == NULL || id == NULL)
-               return NULL;
+               return clk;
 
        mutex_lock(&clocks_mutex);
        list_for_each_entry(p, &clocks, node) {
index 45c0cb9b67e6774958c621b6e3c8055d43e46163..18c10482019811fd4fa25cbf1b0972ef87d4d0c7 100644 (file)
@@ -99,7 +99,7 @@ static void __init efika_pcisetup(void)
        if (bus_range == NULL || len < 2 * sizeof(int)) {
                printk(KERN_WARNING EFIKA_PLATFORM_NAME
                       ": Can't get bus-range for %s\n", pcictrl->full_name);
-               return;
+               goto out_put;
        }
 
        if (bus_range[1] == bus_range[0])
@@ -111,12 +111,12 @@ static void __init efika_pcisetup(void)
        printk(" controlled by %s\n", pcictrl->full_name);
        printk("\n");
 
-       hose = pcibios_alloc_controller(of_node_get(pcictrl));
+       hose = pcibios_alloc_controller(pcictrl);
        if (!hose) {
                printk(KERN_WARNING EFIKA_PLATFORM_NAME
                       ": Can't allocate PCI controller structure for %s\n",
                       pcictrl->full_name);
-               return;
+               goto out_put;
        }
 
        hose->first_busno = bus_range[0];
@@ -124,6 +124,9 @@ static void __init efika_pcisetup(void)
        hose->ops = &rtas_pci_ops;
 
        pci_process_bridge_OF_ranges(hose, pcictrl, 0);
+       return;
+out_put:
+       of_node_put(pcictrl);
 }
 
 #else
index 6e905314ad5d66035daf38a514adbfaa2aa60dfa..41f3a7eda1def670c1c12864de488788fc27ff98 100644 (file)
@@ -325,12 +325,16 @@ int mpc5200_psc_ac97_gpio_reset(int psc_number)
        clrbits32(&simple_gpio->simple_dvo, sync | out);
        clrbits8(&wkup_gpio->wkup_dvo, reset);
 
-       /* wait at lease 1 us */
-       udelay(2);
+       /* wait for 1 us */
+       udelay(1);
 
        /* Deassert reset */
        setbits8(&wkup_gpio->wkup_dvo, reset);
 
+       /* wait at least 200ns */
+       /* 7 ~= (200ns * timebase) / ns2sec */
+       __delay(7);
+
        /* Restore pin-muxing */
        out_be32(&simple_gpio->port_config, mux);
 
index 104f2007f097720b339e469d37b7732c120c8168..a875c2f542e1070a120b484c96a0d2fc10fc55cb 100644 (file)
@@ -181,7 +181,7 @@ static inline int is_compat_task(void)
 
 #endif
 
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
 {
        unsigned long stack;
 
index 22cfd634c35531b7f8a1d4057f92b4af72e20d1a..f7167ee4604cf7033e30eb3845aa8f9ba9fd538b 100644 (file)
@@ -407,10 +407,9 @@ int module_finalize(const Elf_Ehdr *hdr,
 {
        vfree(me->arch.syminfo);
        me->arch.syminfo = NULL;
-       return module_bug_finalize(hdr, sechdrs, me);
+       return 0;
 }
 
 void module_arch_cleanup(struct module *mod)
 {
-       module_bug_cleanup(mod);
 }
index 43adddfe4c04b6d2eee9acfa8cad99a7788dfcc4..ae0be697a89e4b220f527a2bdb34f4da7ec7d318 100644 (file)
@@ -149,13 +149,11 @@ int module_finalize(const Elf_Ehdr *hdr,
        int ret = 0;
 
        ret |= module_dwarf_finalize(hdr, sechdrs, me);
-       ret |= module_bug_finalize(hdr, sechdrs, me);
 
        return ret;
 }
 
 void module_arch_cleanup(struct module *mod)
 {
-       module_bug_cleanup(mod);
        module_dwarf_cleanup(mod);
 }
index 5016f76ea98a6f38510d3dc5a8593a1d871d4f53..6f57325bb883c5553b4781691cd699952bb90ff8 100644 (file)
@@ -167,7 +167,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
        return (u32)(unsigned long)uptr;
 }
 
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
 {
        struct pt_regs *regs = current_thread_info()->kregs;
        unsigned long usp = regs->u_regs[UREG_I6];
index 357ced3c33ffac87a992e01b6820a77084cfb8de..6318e622cfb065d9da81e40b766c12dc0a6e2e7b 100644 (file)
@@ -1038,6 +1038,7 @@ static int __hw_perf_event_init(struct perf_event *event)
        if (atomic_read(&nmi_active) < 0)
                return -ENODEV;
 
+       pmap = NULL;
        if (attr->type == PERF_TYPE_HARDWARE) {
                if (attr->config >= sparc_pmu->max_events)
                        return -EINVAL;
@@ -1046,9 +1047,18 @@ static int __hw_perf_event_init(struct perf_event *event)
                pmap = sparc_map_cache_event(attr->config);
                if (IS_ERR(pmap))
                        return PTR_ERR(pmap);
-       } else
+       } else if (attr->type != PERF_TYPE_RAW)
                return -EOPNOTSUPP;
 
+       if (pmap) {
+               hwc->event_base = perf_event_encode(pmap);
+       } else {
+               /* User gives us "(encoding << 16) | pic_mask" for
+                * PERF_TYPE_RAW events.
+                */
+               hwc->event_base = attr->config;
+       }
+
        /* We save the enable bits in the config_base.  */
        hwc->config_base = sparc_pmu->irq_bit;
        if (!attr->exclude_user)
@@ -1058,8 +1068,6 @@ static int __hw_perf_event_init(struct perf_event *event)
        if (!attr->exclude_hv)
                hwc->config_base |= sparc_pmu->hv_bit;
 
-       hwc->event_base = perf_event_encode(pmap);
-
        n = 0;
        if (event->group_leader != event) {
                n = collect_events(event->group_leader,
index ea22cd373c64f4bc371478d2b7be14f71cd32560..75fad425e249bc40559f98d14ead5699839bbbb8 100644 (file)
@@ -453,8 +453,66 @@ static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
        return err;
 }
 
-static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
-                         int signo, sigset_t *oldset)
+/* The I-cache flush instruction only works in the primary ASI, which
+ * right now is the nucleus, aka. kernel space.
+ *
+ * Therefore we have to kick the instructions out using the kernel
+ * side linear mapping of the physical address backing the user
+ * instructions.
+ */
+static void flush_signal_insns(unsigned long address)
+{
+       unsigned long pstate, paddr;
+       pte_t *ptep, pte;
+       pgd_t *pgdp;
+       pud_t *pudp;
+       pmd_t *pmdp;
+
+       /* Commit all stores of the instructions we are about to flush.  */
+       wmb();
+
+       /* Disable cross-call reception.  In this way even a very wide
+        * munmap() on another cpu can't tear down the page table
+        * hierarchy from underneath us, since that can't complete
+        * until the IPI tlb flush returns.
+        */
+
+       __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
+       __asm__ __volatile__("wrpr %0, %1, %%pstate"
+                               : : "r" (pstate), "i" (PSTATE_IE));
+
+       pgdp = pgd_offset(current->mm, address);
+       if (pgd_none(*pgdp))
+               goto out_irqs_on;
+       pudp = pud_offset(pgdp, address);
+       if (pud_none(*pudp))
+               goto out_irqs_on;
+       pmdp = pmd_offset(pudp, address);
+       if (pmd_none(*pmdp))
+               goto out_irqs_on;
+
+       ptep = pte_offset_map(pmdp, address);
+       pte = *ptep;
+       if (!pte_present(pte))
+               goto out_unmap;
+
+       paddr = (unsigned long) page_address(pte_page(pte));
+
+       __asm__ __volatile__("flush     %0 + %1"
+                            : /* no outputs */
+                            : "r" (paddr),
+                              "r" (address & (PAGE_SIZE - 1))
+                            : "memory");
+
+out_unmap:
+       pte_unmap(ptep);
+out_irqs_on:
+       __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
+
+}
+
+static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
+                        int signo, sigset_t *oldset)
 {
        struct signal_frame32 __user *sf;
        int sigframe_size;
@@ -547,13 +605,7 @@ static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
        if (ka->ka_restorer) {
                regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
        } else {
-               /* Flush instruction space. */
                unsigned long address = ((unsigned long)&(sf->insns[0]));
-               pgd_t *pgdp = pgd_offset(current->mm, address);
-               pud_t *pudp = pud_offset(pgdp, address);
-               pmd_t *pmdp = pmd_offset(pudp, address);
-               pte_t *ptep;
-               pte_t pte;
 
                regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
        
@@ -562,34 +614,22 @@ static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
                if (err)
                        goto sigsegv;
 
-               preempt_disable();
-               ptep = pte_offset_map(pmdp, address);
-               pte = *ptep;
-               if (pte_present(pte)) {
-                       unsigned long page = (unsigned long)
-                               page_address(pte_page(pte));
-
-                       wmb();
-                       __asm__ __volatile__("flush     %0 + %1"
-                                            : /* no outputs */
-                                            : "r" (page),
-                                              "r" (address & (PAGE_SIZE - 1))
-                                            : "memory");
-               }
-               pte_unmap(ptep);
-               preempt_enable();
+               flush_signal_insns(address);
        }
-       return;
+       return 0;
 
 sigill:
        do_exit(SIGILL);
+       return -EINVAL;
+
 sigsegv:
        force_sigsegv(signo, current);
+       return -EFAULT;
 }
 
-static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
-                            unsigned long signr, sigset_t *oldset,
-                            siginfo_t *info)
+static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
+                           unsigned long signr, sigset_t *oldset,
+                           siginfo_t *info)
 {
        struct rt_signal_frame32 __user *sf;
        int sigframe_size;
@@ -687,12 +727,7 @@ static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
        if (ka->ka_restorer)
                regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
        else {
-               /* Flush instruction space. */
                unsigned long address = ((unsigned long)&(sf->insns[0]));
-               pgd_t *pgdp = pgd_offset(current->mm, address);
-               pud_t *pudp = pud_offset(pgdp, address);
-               pmd_t *pmdp = pmd_offset(pudp, address);
-               pte_t *ptep;
 
                regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
        
@@ -704,38 +739,32 @@ static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
                if (err)
                        goto sigsegv;
 
-               preempt_disable();
-               ptep = pte_offset_map(pmdp, address);
-               if (pte_present(*ptep)) {
-                       unsigned long page = (unsigned long)
-                               page_address(pte_page(*ptep));
-
-                       wmb();
-                       __asm__ __volatile__("flush     %0 + %1"
-                                            : /* no outputs */
-                                            : "r" (page),
-                                              "r" (address & (PAGE_SIZE - 1))
-                                            : "memory");
-               }
-               pte_unmap(ptep);
-               preempt_enable();
+               flush_signal_insns(address);
        }
-       return;
+       return 0;
 
 sigill:
        do_exit(SIGILL);
+       return -EINVAL;
+
 sigsegv:
        force_sigsegv(signr, current);
+       return -EFAULT;
 }
 
-static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka,
-                                  siginfo_t *info,
-                                  sigset_t *oldset, struct pt_regs *regs)
+static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka,
+                                 siginfo_t *info,
+                                 sigset_t *oldset, struct pt_regs *regs)
 {
+       int err;
+
        if (ka->sa.sa_flags & SA_SIGINFO)
-               setup_rt_frame32(ka, regs, signr, oldset, info);
+               err = setup_rt_frame32(ka, regs, signr, oldset, info);
        else
-               setup_frame32(ka, regs, signr, oldset);
+               err = setup_frame32(ka, regs, signr, oldset);
+
+       if (err)
+               return err;
 
        spin_lock_irq(&current->sighand->siglock);
        sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
@@ -743,6 +772,10 @@ static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka,
                sigaddset(&current->blocked,signr);
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
+
+       tracehook_signal_handler(signr, info, ka, regs, 0);
+
+       return 0;
 }
 
 static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs,
@@ -789,16 +822,14 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs,
        if (signr > 0) {
                if (restart_syscall)
                        syscall_restart32(orig_i0, regs, &ka.sa);
-               handle_signal32(signr, &ka, &info, oldset, regs);
-
-               /* A signal was successfully delivered; the saved
-                * sigmask will have been stored in the signal frame,
-                * and will be restored by sigreturn, so we can simply
-                * clear the TS_RESTORE_SIGMASK flag.
-                */
-               current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-
-               tracehook_signal_handler(signr, &info, &ka, regs, 0);
+               if (handle_signal32(signr, &ka, &info, oldset, regs) == 0) {
+                       /* A signal was successfully delivered; the saved
+                        * sigmask will have been stored in the signal frame,
+                        * and will be restored by sigreturn, so we can simply
+                        * clear the TS_RESTORE_SIGMASK flag.
+                        */
+                       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+               }
                return;
        }
        if (restart_syscall &&
@@ -809,12 +840,14 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs,
                regs->u_regs[UREG_I0] = orig_i0;
                regs->tpc -= 4;
                regs->tnpc -= 4;
+               pt_regs_clear_syscall(regs);
        }
        if (restart_syscall &&
            regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
                regs->u_regs[UREG_G1] = __NR_restart_syscall;
                regs->tpc -= 4;
                regs->tnpc -= 4;
+               pt_regs_clear_syscall(regs);
        }
 
        /* If there's no signal to deliver, we just put the saved sigmask
index 9882df92ba0a2c8b8da4639f7e181214930c8ed6..5e5c5fd03783c997f5c344025e8f4784182a0ddc 100644 (file)
@@ -315,8 +315,8 @@ save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
        return err;
 }
 
-static void setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
-                       int signo, sigset_t *oldset)
+static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
+                      int signo, sigset_t *oldset)
 {
        struct signal_frame __user *sf;
        int sigframe_size, err;
@@ -384,16 +384,19 @@ static void setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
                /* Flush instruction space. */
                flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
        }
-       return;
+       return 0;
 
 sigill_and_return:
        do_exit(SIGILL);
+       return -EINVAL;
+
 sigsegv:
        force_sigsegv(signo, current);
+       return -EFAULT;
 }
 
-static void setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
-                          int signo, sigset_t *oldset, siginfo_t *info)
+static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
+                         int signo, sigset_t *oldset, siginfo_t *info)
 {
        struct rt_signal_frame __user *sf;
        int sigframe_size;
@@ -466,22 +469,30 @@ static void setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
                /* Flush instruction space. */
                flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
        }
-       return;
+       return 0;
 
 sigill:
        do_exit(SIGILL);
+       return -EINVAL;
+
 sigsegv:
        force_sigsegv(signo, current);
+       return -EFAULT;
 }
 
-static inline void
+static inline int
 handle_signal(unsigned long signr, struct k_sigaction *ka,
              siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
 {
+       int err;
+
        if (ka->sa.sa_flags & SA_SIGINFO)
-               setup_rt_frame(ka, regs, signr, oldset, info);
+               err = setup_rt_frame(ka, regs, signr, oldset, info);
        else
-               setup_frame(ka, regs, signr, oldset);
+               err = setup_frame(ka, regs, signr, oldset);
+
+       if (err)
+               return err;
 
        spin_lock_irq(&current->sighand->siglock);
        sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
@@ -489,6 +500,10 @@ handle_signal(unsigned long signr, struct k_sigaction *ka,
                sigaddset(&current->blocked, signr);
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
+
+       tracehook_signal_handler(signr, info, ka, regs, 0);
+
+       return 0;
 }
 
 static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
@@ -546,17 +561,15 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
        if (signr > 0) {
                if (restart_syscall)
                        syscall_restart(orig_i0, regs, &ka.sa);
-               handle_signal(signr, &ka, &info, oldset, regs);
-
-               /* a signal was successfully delivered; the saved
-                * sigmask will have been stored in the signal frame,
-                * and will be restored by sigreturn, so we can simply
-                * clear the TIF_RESTORE_SIGMASK flag.
-                */
-               if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                       clear_thread_flag(TIF_RESTORE_SIGMASK);
-
-               tracehook_signal_handler(signr, &info, &ka, regs, 0);
+               if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
+                       /* a signal was successfully delivered; the saved
+                        * sigmask will have been stored in the signal frame,
+                        * and will be restored by sigreturn, so we can simply
+                        * clear the TIF_RESTORE_SIGMASK flag.
+                        */
+                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
+                               clear_thread_flag(TIF_RESTORE_SIGMASK);
+               }
                return;
        }
        if (restart_syscall &&
@@ -567,12 +580,14 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
                regs->u_regs[UREG_I0] = orig_i0;
                regs->pc -= 4;
                regs->npc -= 4;
+               pt_regs_clear_syscall(regs);
        }
        if (restart_syscall &&
            regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
                regs->u_regs[UREG_G1] = __NR_restart_syscall;
                regs->pc -= 4;
                regs->npc -= 4;
+               pt_regs_clear_syscall(regs);
        }
 
        /* if there's no signal to deliver, we just put the saved sigmask
index 9fa48c30037e5356c2f686be695ea8bcfb3613f3..006fe4515886dc6ae2a7a8e6cc9b6df9c16fda46 100644 (file)
@@ -409,7 +409,7 @@ static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *
        return (void __user *) sp;
 }
 
-static inline void
+static inline int
 setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
               int signo, sigset_t *oldset, siginfo_t *info)
 {
@@ -483,26 +483,37 @@ setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
        }
        /* 4. return to kernel instructions */
        regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
-       return;
+       return 0;
 
 sigill:
        do_exit(SIGILL);
+       return -EINVAL;
+
 sigsegv:
        force_sigsegv(signo, current);
+       return -EFAULT;
 }
 
-static inline void handle_signal(unsigned long signr, struct k_sigaction *ka,
-                                siginfo_t *info,
-                                sigset_t *oldset, struct pt_regs *regs)
+static inline int handle_signal(unsigned long signr, struct k_sigaction *ka,
+                               siginfo_t *info,
+                               sigset_t *oldset, struct pt_regs *regs)
 {
-       setup_rt_frame(ka, regs, signr, oldset,
-                      (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
+       int err;
+
+       err = setup_rt_frame(ka, regs, signr, oldset,
+                            (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
+       if (err)
+               return err;
        spin_lock_irq(&current->sighand->siglock);
        sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
        if (!(ka->sa.sa_flags & SA_NOMASK))
                sigaddset(&current->blocked,signr);
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
+
+       tracehook_signal_handler(signr, info, ka, regs, 0);
+
+       return 0;
 }
 
 static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
@@ -571,16 +582,14 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
        if (signr > 0) {
                if (restart_syscall)
                        syscall_restart(orig_i0, regs, &ka.sa);
-               handle_signal(signr, &ka, &info, oldset, regs);
-
-               /* A signal was successfully delivered; the saved
-                * sigmask will have been stored in the signal frame,
-                * and will be restored by sigreturn, so we can simply
-                * clear the TS_RESTORE_SIGMASK flag.
-                */
-               current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-
-               tracehook_signal_handler(signr, &info, &ka, regs, 0);
+               if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
+                       /* A signal was successfully delivered; the saved
+                        * sigmask will have been stored in the signal frame,
+                        * and will be restored by sigreturn, so we can simply
+                        * clear the TS_RESTORE_SIGMASK flag.
+                        */
+                       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+               }
                return;
        }
        if (restart_syscall &&
@@ -591,12 +600,14 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
                regs->u_regs[UREG_I0] = orig_i0;
                regs->tpc -= 4;
                regs->tnpc -= 4;
+               pt_regs_clear_syscall(regs);
        }
        if (restart_syscall &&
            regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
                regs->u_regs[UREG_G1] = __NR_restart_syscall;
                regs->tpc -= 4;
                regs->tnpc -= 4;
+               pt_regs_clear_syscall(regs);
        }
 
        /* If there's no signal to deliver, we just put the saved sigmask
index 50794137d710d71bfa197cbb055d14377f2cd770..675c9e11ada5541085e2fd6e6129272c05713df4 100644 (file)
@@ -166,7 +166,6 @@ sparc_breakpoint (struct pt_regs *regs)
 {
        siginfo_t info;
 
-       lock_kernel();
 #ifdef DEBUG_SPARC_BREAKPOINT
         printk ("TRAP: Entering kernel PC=%x, nPC=%x\n", regs->pc, regs->npc);
 #endif
@@ -180,7 +179,6 @@ sparc_breakpoint (struct pt_regs *regs)
 #ifdef DEBUG_SPARC_BREAKPOINT
        printk ("TRAP: Returning to space: PC=%x nPC=%x\n", regs->pc, regs->npc);
 #endif
-       unlock_kernel();
 }
 
 asmlinkage int
index f8514e291e1559ecda6994ff5be883cc6805128c..12b9f352595f44e26c3f5730d3a84e8557d8cca8 100644 (file)
@@ -323,7 +323,6 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
 {
        enum direction dir;
 
-       lock_kernel();
        if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) ||
           (((insn >> 30) & 3) != 3))
                goto kill_user;
@@ -377,5 +376,5 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
 kill_user:
        user_mna_trap_fault(regs, insn);
 out:
-       unlock_kernel();
+       ;
 }
index f24d298bda29d1ca0457b0b7060a366ace463756..b351770cbdd6aded05ca53554b30c8716a24a4ce 100644 (file)
@@ -112,7 +112,6 @@ void try_to_clear_window_buffer(struct pt_regs *regs, int who)
        struct thread_info *tp = current_thread_info();
        int window;
 
-       lock_kernel();
        flush_user_windows();
        for(window = 0; window < tp->w_saved; window++) {
                unsigned long sp = tp->rwbuf_stkptrs[window];
@@ -123,5 +122,4 @@ void try_to_clear_window_buffer(struct pt_regs *regs, int who)
                        do_exit(SIGILL);
        }
        tp->w_saved = 0;
-       unlock_kernel();
 }
index 1246573be59ee78b872ed5d8fa5d583cf16860ac..261aaba092d4bb424d336caa645b222a1fdb1068 100644 (file)
 /** Is the PROC_STATUS SPR supported? */
 #define CHIP_HAS_PROC_STATUS_SPR() 0
 
+/** Is the DSTREAM_PF SPR supported? */
+#define CHIP_HAS_DSTREAM_PF() 0
+
 /** Log of the number of mshims we have. */
 #define CHIP_LOG_NUM_MSHIMS() 2
 
index e864c47fc89cce6cd7c366784e8d76d22a43e280..70017699a74ce0e37e30d0c7c8e62865e9577e3b 100644 (file)
 /** Is the PROC_STATUS SPR supported? */
 #define CHIP_HAS_PROC_STATUS_SPR() 1
 
+/** Is the DSTREAM_PF SPR supported? */
+#define CHIP_HAS_DSTREAM_PF() 0
+
 /** Log of the number of mshims we have. */
 #define CHIP_LOG_NUM_MSHIMS() 2
 
index 5a34da6cdd79a18802d437056393c41d6e2f6fba..8b60ec8b2d194f6e352df18eba598e3a61ef52f3 100644 (file)
@@ -195,7 +195,7 @@ static inline unsigned long ptr_to_compat_reg(void __user *uptr)
        return (long)(int)(long __force)uptr;
 }
 
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
 {
        struct pt_regs *regs = task_pt_regs(current);
        return (void __user *)regs->sp - len;
@@ -214,8 +214,9 @@ extern int compat_setup_rt_frame(int sig, struct k_sigaction *ka,
 struct compat_sigaction;
 struct compat_siginfo;
 struct compat_sigaltstack;
-long compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
-                      compat_uptr_t __user *envp);
+long compat_sys_execve(const char __user *path,
+                      const compat_uptr_t __user *argv,
+                      const compat_uptr_t __user *envp);
 long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act,
                             struct compat_sigaction __user *oact,
                             size_t sigsetsize);
index 8c95bef3fa45a185a1ec5610bc7a1c21e6f96c13..ee43328713abf2ace9f76e885e9e26ce7b615b3a 100644 (file)
@@ -164,22 +164,22 @@ static inline void _tile_writeq(u64 val, unsigned long addr)
 #define iowrite32 writel
 #define iowrite64 writeq
 
-static inline void *memcpy_fromio(void *dst, void *src, int len)
+static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
+                                size_t len)
 {
        int x;
        BUG_ON((unsigned long)src & 0x3);
        for (x = 0; x < len; x += 4)
                *(u32 *)(dst + x) = readl(src + x);
-       return dst;
 }
 
-static inline void *memcpy_toio(void *dst, void *src, int len)
+static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
+                               size_t len)
 {
        int x;
        BUG_ON((unsigned long)dst & 0x3);
        for (x = 0; x < len; x += 4)
                writel(*(u32 *)(src + x), dst + x);
-       return dst;
 }
 
 /*
index d942d09b252e4012d251d6d09eaa1af6c7df7c3f..ccd5f84256886c7526edd78bc58ab621f14ef2ac 100644 (file)
@@ -103,6 +103,18 @@ struct thread_struct {
        /* Any other miscellaneous processor state bits */
        unsigned long proc_status;
 #endif
+#if !CHIP_HAS_FIXED_INTVEC_BASE()
+       /* Interrupt base for PL0 interrupts */
+       unsigned long interrupt_vector_base;
+#endif
+#if CHIP_HAS_TILE_RTF_HWM()
+       /* Tile cache retry fifo high-water mark */
+       unsigned long tile_rtf_hwm;
+#endif
+#if CHIP_HAS_DSTREAM_PF()
+       /* Data stream prefetch control */
+       unsigned long dstream_pf;
+#endif
 #ifdef CONFIG_HARDWALL
        /* Is this task tied to an activated hardwall? */
        struct hardwall_info *hardwall;
index acdae814e0161b967f6289e265f6aad1516b2aac..4a02bb07397993a2eb025ae7cd79caf618ae03d6 100644 (file)
@@ -51,10 +51,7 @@ typedef uint_reg_t pt_reg_t;
 
 /*
  * This struct defines the way the registers are stored on the stack during a
- * system call/exception.  It should be a multiple of 8 bytes to preserve
- * normal stack alignment rules.
- *
- * Must track <sys/ucontext.h> and <sys/procfs.h>
+ * system call or exception.  "struct sigcontext" has the same shape.
  */
 struct pt_regs {
        /* Saved main processor registers; 56..63 are special. */
@@ -80,11 +77,6 @@ struct pt_regs {
 
 #endif /* __ASSEMBLY__ */
 
-/* Flag bits in pt_regs.flags */
-#define PT_FLAGS_DISABLE_IRQ    1  /* on return to kernel, disable irqs */
-#define PT_FLAGS_CALLER_SAVES   2  /* caller-save registers are valid */
-#define PT_FLAGS_RESTORE_REGS   4  /* restore callee-save regs on return */
-
 #define PTRACE_GETREGS         12
 #define PTRACE_SETREGS         13
 #define PTRACE_GETFPREGS       14
@@ -101,6 +93,11 @@ struct pt_regs {
 
 #ifdef __KERNEL__
 
+/* Flag bits in pt_regs.flags */
+#define PT_FLAGS_DISABLE_IRQ    1  /* on return to kernel, disable irqs */
+#define PT_FLAGS_CALLER_SAVES   2  /* caller-save registers are valid */
+#define PT_FLAGS_RESTORE_REGS   4  /* restore callee-save regs on return */
+
 #ifndef __ASSEMBLY__
 
 #define instruction_pointer(regs) ((regs)->pc)
index 7cd7672e3ad4043072a6b6ac9c393491426f08a1..5e2d03336f5335ae99151a383a855e5db7cae6b0 100644 (file)
 #ifndef _ASM_TILE_SIGCONTEXT_H
 #define _ASM_TILE_SIGCONTEXT_H
 
-/* NOTE: we can't include <linux/ptrace.h> due to #include dependencies. */
-#include <asm/ptrace.h>
-
-/* Must track <sys/ucontext.h> */
+#include <arch/abi.h>
 
+/*
+ * struct sigcontext has the same shape as struct pt_regs,
+ * but is simplified since we know the fault is from userspace.
+ */
 struct sigcontext {
-       struct pt_regs regs;
+       uint_reg_t gregs[53];   /* General-purpose registers.  */
+       uint_reg_t tp;          /* Aliases gregs[TREG_TP].  */
+       uint_reg_t sp;          /* Aliases gregs[TREG_SP].  */
+       uint_reg_t lr;          /* Aliases gregs[TREG_LR].  */
+       uint_reg_t pc;          /* Program counter.  */
+       uint_reg_t ics;         /* In Interrupt Critical Section?  */
+       uint_reg_t faultnum;    /* Fault number.  */
+       uint_reg_t pad[5];
 };
 
 #endif /* _ASM_TILE_SIGCONTEXT_H */
index eb0253f32202c0b52b65012b7c4a8c839aae47dc..c1ee1d61d44ca8a07c504b9af425df3a9c7185c7 100644 (file)
@@ -24,6 +24,7 @@
 #include <asm-generic/signal.h>
 
 #if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+struct pt_regs;
 int restore_sigcontext(struct pt_regs *, struct sigcontext __user *, long *);
 int setup_sigcontext(struct sigcontext __user *, struct pt_regs *);
 void do_signal(struct pt_regs *regs);
index af165a74537f84ff1f377ce22951848bba58d9e0..ce99ffefeacff351c5523d574581e1092d57ca06 100644 (file)
@@ -62,10 +62,12 @@ long sys_fork(void);
 long _sys_fork(struct pt_regs *regs);
 long sys_vfork(void);
 long _sys_vfork(struct pt_regs *regs);
-long sys_execve(char __user *filename, char __user * __user *argv,
-               char __user * __user *envp);
-long _sys_execve(char __user *filename, char __user * __user *argv,
-                char __user * __user *envp, struct pt_regs *regs);
+long sys_execve(const char __user *filename,
+               const char __user *const __user *argv,
+               const char __user *const __user *envp);
+long _sys_execve(const char __user *filename,
+                const char __user *const __user *argv,
+                const char __user *const __user *envp, struct pt_regs *regs);
 
 /* kernel/signal.c */
 long sys_sigaltstack(const stack_t __user *, stack_t __user *);
@@ -86,10 +88,13 @@ int _sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *);
 #endif
 
 #ifdef CONFIG_COMPAT
-long compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
-                      compat_uptr_t __user *envp);
-long _compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
-                       compat_uptr_t __user *envp, struct pt_regs *regs);
+long compat_sys_execve(const char __user *path,
+                      const compat_uptr_t __user *argv,
+                      const compat_uptr_t __user *envp);
+long _compat_sys_execve(const char __user *path,
+                       const compat_uptr_t __user *argv,
+                       const compat_uptr_t __user *envp,
+                       struct pt_regs *regs);
 long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
                            struct compat_sigaltstack __user *uoss_ptr);
 long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
index 84f296ca9e63c85da528ee9a8d403cc8362c4097..8f58bdff20d7f7dd9b77d16a6901eb9db82f056b 100644 (file)
@@ -1506,13 +1506,6 @@ handle_ill:
        }
        STD_ENDPROC(handle_ill)
 
-       .pushsection .rodata, "a"
-       .align  8
-bpt_code:
-       bpt
-       ENDPROC(bpt_code)
-       .popsection
-
 /* Various stub interrupt handlers and syscall handlers */
 
 STD_ENTRY_LOCAL(_kernel_double_fault)
index 985cc28c74c5696f91adcf82d6347439999bfa41..84c29111756c2212f02cd5bc841691c1cc0a0fd9 100644 (file)
@@ -408,6 +408,15 @@ static void save_arch_state(struct thread_struct *t)
 #if CHIP_HAS_PROC_STATUS_SPR()
        t->proc_status = __insn_mfspr(SPR_PROC_STATUS);
 #endif
+#if !CHIP_HAS_FIXED_INTVEC_BASE()
+       t->interrupt_vector_base = __insn_mfspr(SPR_INTERRUPT_VECTOR_BASE_0);
+#endif
+#if CHIP_HAS_TILE_RTF_HWM()
+       t->tile_rtf_hwm = __insn_mfspr(SPR_TILE_RTF_HWM);
+#endif
+#if CHIP_HAS_DSTREAM_PF()
+       t->dstream_pf = __insn_mfspr(SPR_DSTREAM_PF);
+#endif
 }
 
 static void restore_arch_state(const struct thread_struct *t)
@@ -428,14 +437,14 @@ static void restore_arch_state(const struct thread_struct *t)
 #if CHIP_HAS_PROC_STATUS_SPR()
        __insn_mtspr(SPR_PROC_STATUS, t->proc_status);
 #endif
+#if !CHIP_HAS_FIXED_INTVEC_BASE()
+       __insn_mtspr(SPR_INTERRUPT_VECTOR_BASE_0, t->interrupt_vector_base);
+#endif
 #if CHIP_HAS_TILE_RTF_HWM()
-       /*
-        * Clear this whenever we switch back to a process in case
-        * the previous process was monkeying with it.  Even if enabled
-        * in CBOX_MSR1 via TILE_RTF_HWM_MIN, it's still just a
-        * performance hint, so isn't worth a full save/restore.
-        */
-       __insn_mtspr(SPR_TILE_RTF_HWM, 0);
+       __insn_mtspr(SPR_TILE_RTF_HWM, t->tile_rtf_hwm);
+#endif
+#if CHIP_HAS_DSTREAM_PF()
+       __insn_mtspr(SPR_DSTREAM_PF, t->dstream_pf);
 #endif
 }
 
@@ -561,8 +570,9 @@ out:
 }
 
 #ifdef CONFIG_COMPAT
-long _compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
-                       compat_uptr_t __user *envp, struct pt_regs *regs)
+long _compat_sys_execve(const char __user *path,
+                       const compat_uptr_t __user *argv,
+                       const compat_uptr_t __user *envp, struct pt_regs *regs)
 {
        long error;
        char *filename;
@@ -657,7 +667,7 @@ void show_regs(struct pt_regs *regs)
               regs->regs[51], regs->regs[52], regs->tp);
        pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
 #else
-       for (i = 0; i < 52; i += 3)
+       for (i = 0; i < 52; i += 4)
                pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
                       " r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
                       i, regs->regs[i], i+1, regs->regs[i+1],
index 45b66a3c991ffbc79a7f93e3f00543e50b20de8a..ce183aa1492c7abfaadfca95d54ec76bddc11455 100644 (file)
@@ -61,13 +61,19 @@ int restore_sigcontext(struct pt_regs *regs,
        /* Always make any pending restarted system calls return -EINTR */
        current_thread_info()->restart_block.fn = do_no_restart_syscall;
 
+       /*
+        * Enforce that sigcontext is like pt_regs, and doesn't mess
+        * up our stack alignment rules.
+        */
+       BUILD_BUG_ON(sizeof(struct sigcontext) != sizeof(struct pt_regs));
+       BUILD_BUG_ON(sizeof(struct sigcontext) % 8 != 0);
+
        for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
-               err |= __get_user(((long *)regs)[i],
-                                 &((long __user *)(&sc->regs))[i]);
+               err |= __get_user(regs->regs[i], &sc->gregs[i]);
 
        regs->faultnum = INT_SWINT_1_SIGRETURN;
 
-       err |= __get_user(*pr0, &sc->regs.regs[0]);
+       err |= __get_user(*pr0, &sc->gregs[0]);
        return err;
 }
 
@@ -112,8 +118,7 @@ int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
        int i, err = 0;
 
        for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
-               err |= __put_user(((long *)regs)[i],
-                                 &((long __user *)(&sc->regs))[i]);
+               err |= __put_user(regs->regs[i], &sc->gregs[i]);
 
        return err;
 }
@@ -203,19 +208,17 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
         * Set up registers for signal handler.
         * Registers that we don't modify keep the value they had from
         * user-space at the time we took the signal.
+        * We always pass siginfo and mcontext, regardless of SA_SIGINFO,
+        * since some things rely on this (e.g. glibc's debug/segfault.c).
         */
        regs->pc = (unsigned long) ka->sa.sa_handler;
        regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */
        regs->sp = (unsigned long) frame;
        regs->lr = restorer;
        regs->regs[0] = (unsigned long) usig;
-
-       if (ka->sa.sa_flags & SA_SIGINFO) {
-               /* Need extra arguments, so mark to restore caller-saves. */
-               regs->regs[1] = (unsigned long) &frame->info;
-               regs->regs[2] = (unsigned long) &frame->uc;
-               regs->flags |= PT_FLAGS_CALLER_SAVES;
-       }
+       regs->regs[1] = (unsigned long) &frame->info;
+       regs->regs[2] = (unsigned long) &frame->uc;
+       regs->flags |= PT_FLAGS_CALLER_SAVES;
 
        /*
         * Notify any tracer that was single-stepping it.
index 38a68b0b45813474a791f18aabaa35eb33b8a38a..ea2e0ce28380a2d3fa59391cfc02cb4ca0022f1e 100644 (file)
@@ -175,7 +175,7 @@ static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt)
                        pr_err("  <received signal %d>\n",
                               frame->info.si_signo);
                }
-               return &frame->uc.uc_mcontext.regs;
+               return (struct pt_regs *)&frame->uc.uc_mcontext;
        }
        return NULL;
 }
index cd145eda357950b66b1ad2f05f55bd0094df6006..49b5e1eb32622abbdab4b3631207460bc80a1e14 100644 (file)
@@ -62,7 +62,7 @@ static long execve1(const char *file,
        return error;
 }
 
-long um_execve(const char *file, char __user *__user *argv, char __user *__user *env)
+long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env)
 {
        long err;
 
@@ -72,8 +72,8 @@ long um_execve(const char *file, char __user *__user *argv, char __user *__user
        return err;
 }
 
-long sys_execve(const char __user *file, char __user *__user *argv,
-               char __user *__user *env)
+long sys_execve(const char __user *file, const char __user *const __user *argv,
+               const char __user *const __user *env)
 {
        long error;
        char *filename;
index 1303a105fe91dc5aabca314e4e94faf8573b9c65..5bf97db24a046283f8704e2f7e0ee91e7b4d4844 100644 (file)
@@ -1 +1 @@
-extern long um_execve(const char *file, char __user *__user *argv, char __user *__user *env);
+extern long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env);
index 5ddb246626dbb87afe7484b87c0c495c8643de8f..f958cb876ee3d3e47ddff71094e8026c0a110f5d 100644 (file)
@@ -60,8 +60,8 @@ int kernel_execve(const char *filename,
 
        fs = get_fs();
        set_fs(KERNEL_DS);
-       ret = um_execve(filename, (char __user *__user *)argv,
-                       (char __user *__user *) envp);
+       ret = um_execve(filename, (const char __user *const __user *)argv,
+                       (const char __user *const __user *) envp);
        set_fs(fs);
 
        return ret;
index 8aa1b59b9074586e1fe9930b85d0cf945f14a695..e8c8881351b3a5a0b17429851732e0b12405d7c6 100644 (file)
@@ -74,7 +74,7 @@ endif
 
 ifdef CONFIG_CC_STACKPROTECTOR
        cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
-        ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(biarch)),y)
+        ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y)
                 stackp-y := -fstack-protector
                 KBUILD_CFLAGS += $(stackp-y)
         else
index 030f4b93e255ac00c9cd646a733eda26733a5324..5df2869c874baced33de00a78a7b693f3237ea0f 100644 (file)
@@ -58,7 +58,19 @@ static void parse_earlyprintk(void)
                if (arg[pos] == ',')
                        pos++;
 
-               if (!strncmp(arg, "ttyS", 4)) {
+               /*
+                * make sure we have
+                *      "serial,0x3f8,115200"
+                *      "serial,ttyS0,115200"
+                *      "ttyS0,115200"
+                */
+               if (pos == 7 && !strncmp(arg + pos, "0x", 2)) {
+                       port = simple_strtoull(arg + pos, &e, 16);
+                       if (port == 0 || arg + pos == e)
+                               port = DEFAULT_SERIAL_PORT;
+                       else
+                               pos = e - arg;
+               } else if (!strncmp(arg + pos, "ttyS", 4)) {
                        static const int bases[] = { 0x3f8, 0x2f8 };
                        int idx = 0;
 
index b86feabed69bfe8e74f81f179c91fbe3a4b799d8..518bb99c339480820fc3995b1456d29704d67f07 100644 (file)
        /*
         * Reload arg registers from stack in case ptrace changed them.
         * We don't reload %eax because syscall_trace_enter() returned
-        * the value it wants us to use in the table lookup.
+        * the %rax value we should see.  Instead, we just truncate that
+        * value to 32 bits again as we did on entry from user mode.
+        * If it's a new value set by user_regset during entry tracing,
+        * this matches the normal truncation of the user-mode value.
+        * If it's -1 to make us punt the syscall, then (u32)-1 is still
+        * an appropriately invalid value.
         */
        .macro LOAD_ARGS32 offset, _r9=0
        .if \_r9
@@ -60,6 +65,7 @@
        movl \offset+48(%rsp),%edx
        movl \offset+56(%rsp),%esi
        movl \offset+64(%rsp),%edi
+       movl %eax,%eax                  /* zero extension */
        .endm
        
        .macro CFI_STARTPROC32 simple
@@ -153,7 +159,7 @@ ENTRY(ia32_sysenter_target)
        testl  $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
        CFI_REMEMBER_STATE
        jnz  sysenter_tracesys
-       cmpl    $(IA32_NR_syscalls-1),%eax
+       cmpq    $(IA32_NR_syscalls-1),%rax
        ja      ia32_badsys
 sysenter_do_call:
        IA32_ARG_FIXUP
@@ -195,7 +201,7 @@ sysexit_from_sys_call:
        movl $AUDIT_ARCH_I386,%edi      /* 1st arg: audit arch */
        call audit_syscall_entry
        movl RAX-ARGOFFSET(%rsp),%eax   /* reload syscall number */
-       cmpl $(IA32_NR_syscalls-1),%eax
+       cmpq $(IA32_NR_syscalls-1),%rax
        ja ia32_badsys
        movl %ebx,%edi                  /* reload 1st syscall arg */
        movl RCX-ARGOFFSET(%rsp),%esi   /* reload 2nd syscall arg */
@@ -248,7 +254,7 @@ sysenter_tracesys:
        call    syscall_trace_enter
        LOAD_ARGS32 ARGOFFSET  /* reload args from stack in case ptrace changed it */
        RESTORE_REST
-       cmpl    $(IA32_NR_syscalls-1),%eax
+       cmpq    $(IA32_NR_syscalls-1),%rax
        ja      int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
        jmp     sysenter_do_call
        CFI_ENDPROC
@@ -314,7 +320,7 @@ ENTRY(ia32_cstar_target)
        testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
        CFI_REMEMBER_STATE
        jnz   cstar_tracesys
-       cmpl $IA32_NR_syscalls-1,%eax
+       cmpq $IA32_NR_syscalls-1,%rax
        ja  ia32_badsys
 cstar_do_call:
        IA32_ARG_FIXUP 1
@@ -367,7 +373,7 @@ cstar_tracesys:
        LOAD_ARGS32 ARGOFFSET, 1  /* reload args from stack in case ptrace changed it */
        RESTORE_REST
        xchgl %ebp,%r9d
-       cmpl $(IA32_NR_syscalls-1),%eax
+       cmpq $(IA32_NR_syscalls-1),%rax
        ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
        jmp cstar_do_call
 END(ia32_cstar_target)
@@ -425,7 +431,7 @@ ENTRY(ia32_syscall)
        orl   $TS_COMPAT,TI_status(%r10)
        testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
        jnz ia32_tracesys
-       cmpl $(IA32_NR_syscalls-1),%eax
+       cmpq $(IA32_NR_syscalls-1),%rax
        ja ia32_badsys
 ia32_do_call:
        IA32_ARG_FIXUP
@@ -444,7 +450,7 @@ ia32_tracesys:
        call syscall_trace_enter
        LOAD_ARGS32 ARGOFFSET  /* reload args from stack in case ptrace changed it */
        RESTORE_REST
-       cmpl $(IA32_NR_syscalls-1),%eax
+       cmpq $(IA32_NR_syscalls-1),%rax
        ja  int_ret_from_sys_call       /* ia32_tracesys has set RAX(%rsp) */
        jmp ia32_do_call
 END(ia32_syscall)
index d2544f1d705d3eb9cad310ac3486f94c2358b51d..cb030374b90aeb526ad7f985527def42643bc1a9 100644 (file)
@@ -38,4 +38,10 @@ static inline void amd_iommu_stats_init(void) { }
 
 #endif /* !CONFIG_AMD_IOMMU_STATS */
 
+static inline bool is_rd890_iommu(struct pci_dev *pdev)
+{
+       return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
+              (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
+}
+
 #endif /* _ASM_X86_AMD_IOMMU_PROTO_H  */
index 7014e88bc7798af33f681724ff90cf09eda52af5..08616180deaf5b1e94bfe9ad0079cec7e9c619a3 100644 (file)
@@ -368,6 +368,9 @@ struct amd_iommu {
        /* capabilities of that IOMMU read from ACPI */
        u32 cap;
 
+       /* flags read from acpi table */
+       u8 acpi_flags;
+
        /*
         * Capability pointer. There could be more than one IOMMU per PCI
         * device function if there are more than one AMD IOMMU capability
@@ -411,6 +414,15 @@ struct amd_iommu {
 
        /* default dma_ops domain for that IOMMU */
        struct dma_ops_domain *default_dom;
+
+       /*
+        * This array is required to work around a potential BIOS bug.
+        * The BIOS may miss to restore parts of the PCI configuration
+        * space when the system resumes from S3. The result is that the
+        * IOMMU does not execute commands anymore which leads to system
+        * failure.
+        */
+       u32 cache_cfg[4];
 };
 
 /*
index 545776efeb164c72d523f2c78f2c501e1535344c..bafd80defa4328ed2f49e808daee7a624434345d 100644 (file)
@@ -309,7 +309,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
 static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
 {
        return ((1UL << (nr % BITS_PER_LONG)) &
-               (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
+               (addr[nr / BITS_PER_LONG])) != 0;
 }
 
 static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
index 306160e58b48772ccef1f65c803a3a3813ff5d1e..1d9cd27c2920a326e5ac2440ebdbd998d0b925c0 100644 (file)
@@ -205,7 +205,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
        return (u32)(unsigned long)uptr;
 }
 
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
 {
        struct pt_regs *regs = task_pt_regs(current);
        return (void __user *)regs->sp - len;
index 781a50b29a4917545e71c3e74bdcbbda7faa383d..3f76523589afa8b9f30abcb6841962b6344d603e 100644 (file)
 #define X86_FEATURE_XSAVEOPT   (7*32+ 4) /* Optimized Xsave */
 #define X86_FEATURE_PLN                (7*32+ 5) /* Intel Power Limit Notification */
 #define X86_FEATURE_PTS                (7*32+ 6) /* Intel Package Thermal Status */
+#define X86_FEATURE_DTS                (7*32+ 7) /* Digital Thermal Sensor */
 
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW  (8*32+ 0) /* Intel TPR Shadow */
@@ -296,6 +297,7 @@ extern const char * const x86_power_flags[32];
 
 #endif /* CONFIG_X86_64 */
 
+#if __GNUC__ >= 4
 /*
  * Static testing of CPU features.  Used the same as boot_cpu_has().
  * These are only valid after alternatives have run, but will statically
@@ -304,7 +306,7 @@ extern const char * const x86_power_flags[32];
  */
 static __always_inline __pure bool __static_cpu_has(u16 bit)
 {
-#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
+#if __GNUC__ > 4 || __GNUC_MINOR__ >= 5
                asm goto("1: jmp %l[t_no]\n"
                         "2:\n"
                         ".section .altinstructions,\"a\"\n"
@@ -345,7 +347,6 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
 #endif
 }
 
-#if __GNUC__ >= 4
 #define static_cpu_has(bit)                                    \
 (                                                              \
        __builtin_constant_p(boot_cpu_has(bit)) ?               \
index 004e6e25e91301dd8a6d188d84a9c9ee795a7100..1d5c08a1bdfdb5b61c72cb286dfb0ac382c17e76 100644 (file)
@@ -68,7 +68,6 @@ extern unsigned long force_hpet_address;
 extern u8 hpet_blockid;
 extern int hpet_force_user;
 extern u8 hpet_msi_disable;
-extern u8 hpet_readback_cmp;
 extern int is_hpet_enabled(void);
 extern int hpet_enable(void);
 extern void hpet_disable(void);
index 528a11e8d3e35f64fea90202d6f196d77d48e708..824ca07860d012cdcc9c46886500d056fdb2aeed 100644 (file)
@@ -20,7 +20,7 @@ struct arch_hw_breakpoint {
 #include <linux/list.h>
 
 /* Available HW breakpoint length encodings */
-#define X86_BREAKPOINT_LEN_X           0x00
+#define X86_BREAKPOINT_LEN_X           0x40
 #define X86_BREAKPOINT_LEN_1           0x40
 #define X86_BREAKPOINT_LEN_2           0x44
 #define X86_BREAKPOINT_LEN_4           0x4c
index f35eb45d6576258e7dba242934d76376380b531c..c4191b3b7056c6ad16563375f529d1480668a26e 100644 (file)
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 
-void *
+void __iomem *
 iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
 
 void
-iounmap_atomic(void *kvaddr, enum km_type type);
+iounmap_atomic(void __iomem *kvaddr, enum km_type type);
 
 int
 iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
index 51cfd730ac5d145ed9f184f643c254d441dbeaab..1f99ecfc48e178312860f177160b735c2655946f 100644 (file)
@@ -152,9 +152,14 @@ struct x86_emulate_ops {
 struct operand {
        enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type;
        unsigned int bytes;
-       unsigned long orig_val, *ptr;
+       union {
+               unsigned long orig_val;
+               u64 orig_val64;
+       };
+       unsigned long *ptr;
        union {
                unsigned long val;
+               u64 val64;
                char valptr[sizeof(unsigned long) + 2];
        };
 };
index 0925676266bdbc9cbcf03811916b9f8b9b9e627e..fedf32a8c3ecdd7a1aaed41b2884d2c9b10bf033 100644 (file)
@@ -11,6 +11,8 @@ ifdef CONFIG_FUNCTION_TRACER
 CFLAGS_REMOVE_tsc.o = -pg
 CFLAGS_REMOVE_rtc.o = -pg
 CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
+CFLAGS_REMOVE_pvclock.o = -pg
+CFLAGS_REMOVE_kvmclock.o = -pg
 CFLAGS_REMOVE_ftrace.o = -pg
 CFLAGS_REMOVE_early_printk.o = -pg
 endif
index fb7a5f052e2b8766d11115e3f7fc174fadf6ac2f..fb16f17e59bea7dcde91e6ad6275c7bad5bbdfb8 100644 (file)
@@ -61,7 +61,7 @@ struct cstate_entry {
                unsigned int ecx;
        } states[ACPI_PROCESSOR_MAX_POWER];
 };
-static struct cstate_entry *cpu_cstate_entry;  /* per CPU ptr */
+static struct cstate_entry __percpu *cpu_cstate_entry; /* per CPU ptr */
 
 static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
 
index fa044e1e30a2ed081175480dccec352a7e381392..679b6450382b9c8bafa2d71e2677fcd5ae075b95 100644 (file)
@@ -1953,6 +1953,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
                           size_t size,
                           int dir)
 {
+       dma_addr_t flush_addr;
        dma_addr_t i, start;
        unsigned int pages;
 
@@ -1960,6 +1961,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
            (dma_addr + size > dma_dom->aperture_size))
                return;
 
+       flush_addr = dma_addr;
        pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
        dma_addr &= PAGE_MASK;
        start = dma_addr;
@@ -1974,7 +1976,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
        dma_ops_free_addresses(dma_dom, dma_addr, pages);
 
        if (amd_iommu_unmap_flush || dma_dom->need_flush) {
-               iommu_flush_pages(&dma_dom->domain, dma_addr, size);
+               iommu_flush_pages(&dma_dom->domain, flush_addr, size);
                dma_dom->need_flush = false;
        }
 }
index 3cc63e2b8dd4c4acc4ee7f77c3ad432d97beb169..5a170cbbbed86aa1376eb19336a5329aacc96e91 100644 (file)
@@ -632,6 +632,13 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu)
        iommu->last_device = calc_devid(MMIO_GET_BUS(range),
                                        MMIO_GET_LD(range));
        iommu->evt_msi_num = MMIO_MSI_NUM(misc);
+
+       if (is_rd890_iommu(iommu->dev)) {
+               pci_read_config_dword(iommu->dev, 0xf0, &iommu->cache_cfg[0]);
+               pci_read_config_dword(iommu->dev, 0xf4, &iommu->cache_cfg[1]);
+               pci_read_config_dword(iommu->dev, 0xf8, &iommu->cache_cfg[2]);
+               pci_read_config_dword(iommu->dev, 0xfc, &iommu->cache_cfg[3]);
+       }
 }
 
 /*
@@ -649,29 +656,9 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
        struct ivhd_entry *e;
 
        /*
-        * First set the recommended feature enable bits from ACPI
-        * into the IOMMU control registers
+        * First save the recommended feature enable bits from ACPI
         */
-       h->flags & IVHD_FLAG_HT_TUN_EN_MASK ?
-               iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
-               iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
-
-       h->flags & IVHD_FLAG_PASSPW_EN_MASK ?
-               iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
-               iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
-
-       h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
-               iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
-               iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
-
-       h->flags & IVHD_FLAG_ISOC_EN_MASK ?
-               iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
-               iommu_feature_disable(iommu, CONTROL_ISOC_EN);
-
-       /*
-        * make IOMMU memory accesses cache coherent
-        */
-       iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
+       iommu->acpi_flags = h->flags;
 
        /*
         * Done. Now parse the device entries
@@ -1116,6 +1103,40 @@ static void init_device_table(void)
        }
 }
 
+static void iommu_init_flags(struct amd_iommu *iommu)
+{
+       iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
+               iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
+               iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
+
+       iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
+               iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
+               iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
+
+       iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
+               iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
+               iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
+
+       iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
+               iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
+               iommu_feature_disable(iommu, CONTROL_ISOC_EN);
+
+       /*
+        * make IOMMU memory accesses cache coherent
+        */
+       iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
+}
+
+static void iommu_apply_quirks(struct amd_iommu *iommu)
+{
+       if (is_rd890_iommu(iommu->dev)) {
+               pci_write_config_dword(iommu->dev, 0xf0, iommu->cache_cfg[0]);
+               pci_write_config_dword(iommu->dev, 0xf4, iommu->cache_cfg[1]);
+               pci_write_config_dword(iommu->dev, 0xf8, iommu->cache_cfg[2]);
+               pci_write_config_dword(iommu->dev, 0xfc, iommu->cache_cfg[3]);
+       }
+}
+
 /*
  * This function finally enables all IOMMUs found in the system after
  * they have been initialized
@@ -1126,6 +1147,8 @@ static void enable_iommus(void)
 
        for_each_iommu(iommu) {
                iommu_disable(iommu);
+               iommu_apply_quirks(iommu);
+               iommu_init_flags(iommu);
                iommu_set_device_table(iommu);
                iommu_enable_command_buffer(iommu);
                iommu_enable_event_buffer(iommu);
index f1efebaf55105fa835ac7938c1654295fdd81562..5c5b8f3dddb58686ba4afc8b314237c0c318370b 100644 (file)
@@ -306,14 +306,19 @@ void arch_init_copy_chip_data(struct irq_desc *old_desc,
 
        old_cfg = old_desc->chip_data;
 
-       memcpy(cfg, old_cfg, sizeof(struct irq_cfg));
+       cfg->vector = old_cfg->vector;
+       cfg->move_in_progress = old_cfg->move_in_progress;
+       cpumask_copy(cfg->domain, old_cfg->domain);
+       cpumask_copy(cfg->old_domain, old_cfg->old_domain);
 
        init_copy_irq_2_pin(old_cfg, cfg, node);
 }
 
-static void free_irq_cfg(struct irq_cfg *old_cfg)
+static void free_irq_cfg(struct irq_cfg *cfg)
 {
-       kfree(old_cfg);
+       free_cpumask_var(cfg->domain);
+       free_cpumask_var(cfg->old_domain);
+       kfree(cfg);
 }
 
 void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
index 7b598b84c902e62f9f852d78c896acb03d4ba318..f744f54cb248e7ac0cd6defaa84a8e673468b30a 100644 (file)
@@ -698,9 +698,11 @@ void __init uv_system_init(void)
                for (j = 0; j < 64; j++) {
                        if (!test_bit(j, &present))
                                continue;
-                       uv_blade_info[blade].pnode = (i * 64 + j);
+                       pnode = (i * 64 + j);
+                       uv_blade_info[blade].pnode = pnode;
                        uv_blade_info[blade].nr_possible_cpus = 0;
                        uv_blade_info[blade].nr_online_cpus = 0;
+                       max_pnode = max(pnode, max_pnode);
                        blade++;
                }
        }
@@ -738,7 +740,6 @@ void __init uv_system_init(void)
                uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid);
                uv_node_to_blade[nid] = blade;
                uv_cpu_to_blade[cpu] = blade;
-               max_pnode = max(pnode, max_pnode);
        }
 
        /* Add blade/pnode info for nodes without cpus */
@@ -750,7 +751,6 @@ void __init uv_system_init(void)
                pnode = (paddr >> m_val) & pnode_mask;
                blade = boot_pnode_to_blade(pnode);
                uv_node_to_blade[nid] = blade;
-               max_pnode = max(pnode, max_pnode);
        }
 
        map_gru_high(max_pnode);
index 490dac63c2d21e90ab3af70c543b012f1c3f43b5..f2f9ac7da25ccfba6d5ba7ea44b63899ba672e97 100644 (file)
@@ -545,7 +545,7 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
        }
 }
 
-static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
+void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
 {
        u32 tfms, xlvl;
        u32 ebx;
index 3624e8a0f71bf72e4c3cd86abcedfbb1c53d9b4d..f668bb1f7d432811bc3f773d777ca2596b6f6e33 100644 (file)
@@ -33,5 +33,6 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],
                            *const __x86_cpu_dev_end[];
 
 extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
+extern void get_cpu_cap(struct cpuinfo_x86 *c);
 
 #endif
index 994230d4dc4e545986a8c982629589905f0f224d..4f6f679f27990198640f9a1e139ea3a838b05eb8 100644 (file)
@@ -368,16 +368,22 @@ static int __init pcc_cpufreq_do_osc(acpi_handle *handle)
                return -ENODEV;
 
        out_obj = output.pointer;
-       if (out_obj->type != ACPI_TYPE_BUFFER)
-               return -ENODEV;
+       if (out_obj->type != ACPI_TYPE_BUFFER) {
+               ret = -ENODEV;
+               goto out_free;
+       }
 
        errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
-       if (errors)
-               return -ENODEV;
+       if (errors) {
+               ret = -ENODEV;
+               goto out_free;
+       }
 
        supported = *((u32 *)(out_obj->buffer.pointer + 4));
-       if (!(supported & 0x1))
-               return -ENODEV;
+       if (!(supported & 0x1)) {
+               ret = -ENODEV;
+               goto out_free;
+       }
 
 out_free:
        kfree(output.pointer);
index 85f69cdeae1020a18e1c9097c8da276a8e50b992..b4389441efbbd8e791289aa936369e5e73917c18 100644 (file)
@@ -39,6 +39,7 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
                        misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
                        wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
                        c->cpuid_level = cpuid_eax(0);
+                       get_cpu_cap(c);
                }
        }
 
index 224392d8fe8c095390439a10ea45af2e21bc0930..5e975298fa819ff3ca648e647f531700edb1f408 100644 (file)
@@ -530,7 +530,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
                err = -ENOMEM;
                goto out;
        }
-       if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
+       if (!zalloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
                kfree(b);
                err = -ENOMEM;
                goto out;
@@ -543,7 +543,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
 #ifndef CONFIG_SMP
        cpumask_setall(b->cpus);
 #else
-       cpumask_copy(b->cpus, c->llc_shared_map);
+       cpumask_set_cpu(cpu, b->cpus);
 #endif
 
        per_cpu(threshold_banks, cpu)[bank] = b;
index c2a8b26d4feacf4ac6b6b022c0a9c590fbbcef85..d9368eeda3090eb9f53482704e000e600b6237e3 100644 (file)
@@ -202,10 +202,11 @@ static int therm_throt_process(bool new_event, int event, int level)
 
 #ifdef CONFIG_SYSFS
 /* Add/Remove thermal_throttle interface for CPU device: */
-static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev)
+static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev,
+                               unsigned int cpu)
 {
        int err;
-       struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
+       struct cpuinfo_x86 *c = &cpu_data(cpu);
 
        err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group);
        if (err)
@@ -251,7 +252,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
        case CPU_UP_PREPARE:
        case CPU_UP_PREPARE_FROZEN:
                mutex_lock(&therm_cpu_lock);
-               err = thermal_throttle_add_dev(sys_dev);
+               err = thermal_throttle_add_dev(sys_dev, cpu);
                mutex_unlock(&therm_cpu_lock);
                WARN_ON(err);
                break;
@@ -287,7 +288,7 @@ static __init int thermal_throttle_init_device(void)
 #endif
        /* connect live CPUs to sysfs */
        for_each_online_cpu(cpu) {
-               err = thermal_throttle_add_dev(get_cpu_sysdev(cpu));
+               err = thermal_throttle_add_dev(get_cpu_sysdev(cpu), cpu);
                WARN_ON(err);
        }
 #ifdef CONFIG_HOTPLUG_CPU
index f2da20fda02ddf6fcd449a88ba399fe4ed44af2a..03a5b0385ad6c4402d2192b42aa14e9bfaccc80e 100644 (file)
@@ -102,6 +102,7 @@ struct cpu_hw_events {
         */
        struct perf_event       *events[X86_PMC_IDX_MAX]; /* in counter order */
        unsigned long           active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+       unsigned long           running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
        int                     enabled;
 
        int                     n_events;
@@ -1010,6 +1011,7 @@ static int x86_pmu_start(struct perf_event *event)
        x86_perf_event_set_period(event);
        cpuc->events[idx] = event;
        __set_bit(idx, cpuc->active_mask);
+       __set_bit(idx, cpuc->running);
        x86_pmu.enable(event);
        perf_event_update_userpage(event);
 
@@ -1141,8 +1143,16 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
        cpuc = &__get_cpu_var(cpu_hw_events);
 
        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
-               if (!test_bit(idx, cpuc->active_mask))
+               if (!test_bit(idx, cpuc->active_mask)) {
+                       /*
+                        * Though we deactivated the counter some cpus
+                        * might still deliver spurious interrupts still
+                        * in flight. Catch them:
+                        */
+                       if (__test_and_clear_bit(idx, cpuc->running))
+                               handled++;
                        continue;
+               }
 
                event = cpuc->events[idx];
                hwc = &event->hw;
@@ -1154,7 +1164,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
                /*
                 * event overflow
                 */
-               handled         = 1;
+               handled++;
                data.period     = event->hw.last_period;
 
                if (!x86_perf_event_set_period(event))
@@ -1200,12 +1210,20 @@ void perf_events_lapic_init(void)
        apic_write(APIC_LVTPC, APIC_DM_NMI);
 }
 
+struct pmu_nmi_state {
+       unsigned int    marked;
+       int             handled;
+};
+
+static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
+
 static int __kprobes
 perf_event_nmi_handler(struct notifier_block *self,
                         unsigned long cmd, void *__args)
 {
        struct die_args *args = __args;
-       struct pt_regs *regs;
+       unsigned int this_nmi;
+       int handled;
 
        if (!atomic_read(&active_events))
                return NOTIFY_DONE;
@@ -1214,22 +1232,47 @@ perf_event_nmi_handler(struct notifier_block *self,
        case DIE_NMI:
        case DIE_NMI_IPI:
                break;
-
+       case DIE_NMIUNKNOWN:
+               this_nmi = percpu_read(irq_stat.__nmi_count);
+               if (this_nmi != __get_cpu_var(pmu_nmi).marked)
+                       /* let the kernel handle the unknown nmi */
+                       return NOTIFY_DONE;
+               /*
+                * This one is a PMU back-to-back nmi. Two events
+                * trigger 'simultaneously' raising two back-to-back
+                * NMIs. If the first NMI handles both, the latter
+                * will be empty and daze the CPU. So, we drop it to
+                * avoid false-positive 'unknown nmi' messages.
+                */
+               return NOTIFY_STOP;
        default:
                return NOTIFY_DONE;
        }
 
-       regs = args->regs;
-
        apic_write(APIC_LVTPC, APIC_DM_NMI);
-       /*
-        * Can't rely on the handled return value to say it was our NMI, two
-        * events could trigger 'simultaneously' raising two back-to-back NMIs.
-        *
-        * If the first NMI handles both, the latter will be empty and daze
-        * the CPU.
-        */
-       x86_pmu.handle_irq(regs);
+
+       handled = x86_pmu.handle_irq(args->regs);
+       if (!handled)
+               return NOTIFY_DONE;
+
+       this_nmi = percpu_read(irq_stat.__nmi_count);
+       if ((handled > 1) ||
+               /* the next nmi could be a back-to-back nmi */
+           ((__get_cpu_var(pmu_nmi).marked == this_nmi) &&
+            (__get_cpu_var(pmu_nmi).handled > 1))) {
+               /*
+                * We could have two subsequent back-to-back nmis: The
+                * first handles more than one counter, the 2nd
+                * handles only one counter and the 3rd handles no
+                * counter.
+                *
+                * This is the 2nd nmi because the previous was
+                * handling more than one counter. We will mark the
+                * next (3rd) and then drop it if unhandled.
+                */
+               __get_cpu_var(pmu_nmi).marked   = this_nmi + 1;
+               __get_cpu_var(pmu_nmi).handled  = handled;
+       }
 
        return NOTIFY_STOP;
 }
index d8d86d01400866c6320001fb715301276747cd52..ee05c90012d269e66de9ffb6a5952d1434317c1e 100644 (file)
@@ -712,7 +712,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
        struct perf_sample_data data;
        struct cpu_hw_events *cpuc;
        int bit, loops;
-       u64 ack, status;
+       u64 status;
+       int handled = 0;
 
        perf_sample_data_init(&data, 0);
 
@@ -728,6 +729,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
 
        loops = 0;
 again:
+       intel_pmu_ack_status(status);
        if (++loops > 100) {
                WARN_ONCE(1, "perfevents: irq loop stuck!\n");
                perf_event_print_debug();
@@ -736,19 +738,22 @@ again:
        }
 
        inc_irq_stat(apic_perf_irqs);
-       ack = status;
 
        intel_pmu_lbr_read();
 
        /*
         * PEBS overflow sets bit 62 in the global status register
         */
-       if (__test_and_clear_bit(62, (unsigned long *)&status))
+       if (__test_and_clear_bit(62, (unsigned long *)&status)) {
+               handled++;
                x86_pmu.drain_pebs(regs);
+       }
 
        for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
                struct perf_event *event = cpuc->events[bit];
 
+               handled++;
+
                if (!test_bit(bit, cpuc->active_mask))
                        continue;
 
@@ -761,8 +766,6 @@ again:
                        x86_pmu_stop(event);
        }
 
-       intel_pmu_ack_status(ack);
-
        /*
         * Repeat if there is more work to be done:
         */
@@ -772,7 +775,7 @@ again:
 
 done:
        intel_pmu_enable_all(0);
-       return 1;
+       return handled;
 }
 
 static struct event_constraint *
index 7e578e9cc58bd5062d30776d431dabdcf724ac67..2490151739921e074085a5d4a39a6cfb783f7187 100644 (file)
@@ -660,8 +660,12 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
                int overflow;
 
-               if (!test_bit(idx, cpuc->active_mask))
+               if (!test_bit(idx, cpuc->active_mask)) {
+                       /* catch in-flight IRQs */
+                       if (__test_and_clear_bit(idx, cpuc->running))
+                               handled++;
                        continue;
+               }
 
                event = cpuc->events[idx];
                hwc = &event->hw;
@@ -692,7 +696,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
                inc_irq_stat(apic_perf_irqs);
        }
 
-       return handled > 0;
+       return handled;
 }
 
 /*
index 34b4dad6f0b8e35a0fe80d01f8708c230592144d..d49079515122a6f47d2731cb1d2ac2f03efaf6e0 100644 (file)
@@ -31,6 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
        const struct cpuid_bit *cb;
 
        static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
+               { X86_FEATURE_DTS,              CR_EAX, 0, 0x00000006, 0 },
                { X86_FEATURE_IDA,              CR_EAX, 1, 0x00000006, 0 },
                { X86_FEATURE_ARAT,             CR_EAX, 2, 0x00000006, 0 },
                { X86_FEATURE_PLN,              CR_EAX, 4, 0x00000006, 0 },
index e5cc7e82e60ddbf1bd1ca2871fdb7d7fc7628e34..ebdb85cf2686fa36702cd4d50b657f22de85b3bd 100644 (file)
@@ -18,7 +18,6 @@
 #include <asm/apic.h>
 #include <asm/iommu.h>
 #include <asm/gart.h>
-#include <asm/hpet.h>
 
 static void __init fix_hypertransport_config(int num, int slot, int func)
 {
@@ -192,21 +191,6 @@ static void __init ati_bugs_contd(int num, int slot, int func)
 }
 #endif
 
-/*
- * Force the read back of the CMP register in hpet_next_event()
- * to work around the problem that the CMP register write seems to be
- * delayed. See hpet_next_event() for details.
- *
- * We do this on all SMBUS incarnations for now until we have more
- * information about the affected chipsets.
- */
-static void __init ati_hpet_bugs(int num, int slot, int func)
-{
-#ifdef CONFIG_HPET_TIMER
-       hpet_readback_cmp = 1;
-#endif
-}
-
 #define QFLAG_APPLY_ONCE       0x1
 #define QFLAG_APPLIED          0x2
 #define QFLAG_DONE             (QFLAG_APPLY_ONCE|QFLAG_APPLIED)
@@ -236,8 +220,6 @@ static struct chipset early_qrk[] __initdata = {
          PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs },
        { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
          PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd },
-       { PCI_VENDOR_ID_ATI, PCI_ANY_ID,
-         PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_hpet_bugs },
        {}
 };
 
index 351f9c0fea1f20714321d01260003173faf548dc..7494999141b3ae9c9ab67302481a2e6c15e969e6 100644 (file)
@@ -35,7 +35,6 @@
 unsigned long                          hpet_address;
 u8                                     hpet_blockid; /* OS timer block num */
 u8                                     hpet_msi_disable;
-u8                                     hpet_readback_cmp;
 
 #ifdef CONFIG_PCI_MSI
 static unsigned long                   hpet_num_timers;
@@ -395,23 +394,27 @@ static int hpet_next_event(unsigned long delta,
         * at that point and we would wait for the next hpet interrupt
         * forever. We found out that reading the CMP register back
         * forces the transfer so we can rely on the comparison with
-        * the counter register below.
+        * the counter register below. If the read back from the
+        * compare register does not match the value we programmed
+        * then we might have a real hardware problem. We can not do
+        * much about it here, but at least alert the user/admin with
+        * a prominent warning.
         *
-        * That works fine on those ATI chipsets, but on newer Intel
-        * chipsets (ICH9...) this triggers due to an erratum: Reading
-        * the comparator immediately following a write is returning
-        * the old value.
+        * An erratum on some chipsets (ICH9,..), results in
+        * comparator read immediately following a write returning old
+        * value. Workaround for this is to read this value second
+        * time, when first read returns old value.
         *
-        * We restrict the read back to the affected ATI chipsets (set
-        * by quirks) and also run it with hpet=verbose for debugging
-        * purposes.
+        * In fact the write to the comparator register is delayed up
+        * to two HPET cycles so the workaround we tried to restrict
+        * the readback to those known to be borked ATI chipsets
+        * failed miserably. So we give up on optimizations forever
+        * and penalize all HPET incarnations unconditionally.
         */
-       if (hpet_readback_cmp || hpet_verbose) {
-               u32 cmp = hpet_readl(HPET_Tn_CMP(timer));
-
-               if (cmp != cnt)
+       if (unlikely((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt)) {
+               if (hpet_readl(HPET_Tn_CMP(timer)) != cnt)
                        printk_once(KERN_WARNING
-                           "hpet: compare register read back failed.\n");
+                               "hpet: compare register read back failed.\n");
        }
 
        return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
@@ -503,7 +506,7 @@ static int hpet_assign_irq(struct hpet_dev *dev)
 {
        unsigned int irq;
 
-       irq = create_irq();
+       irq = create_irq_nr(0, -1);
        if (!irq)
                return -EINVAL;
 
index a474ec37c32f84df372d39eac5730532d60d0228..ff15c9dcc25de8be8144fd4d15f1dfd3069314e4 100644 (file)
@@ -206,11 +206,27 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp)
 int arch_bp_generic_fields(int x86_len, int x86_type,
                           int *gen_len, int *gen_type)
 {
-       /* Len */
-       switch (x86_len) {
-       case X86_BREAKPOINT_LEN_X:
+       /* Type */
+       switch (x86_type) {
+       case X86_BREAKPOINT_EXECUTE:
+               if (x86_len != X86_BREAKPOINT_LEN_X)
+                       return -EINVAL;
+
+               *gen_type = HW_BREAKPOINT_X;
                *gen_len = sizeof(long);
+               return 0;
+       case X86_BREAKPOINT_WRITE:
+               *gen_type = HW_BREAKPOINT_W;
                break;
+       case X86_BREAKPOINT_RW:
+               *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /* Len */
+       switch (x86_len) {
        case X86_BREAKPOINT_LEN_1:
                *gen_len = HW_BREAKPOINT_LEN_1;
                break;
@@ -229,21 +245,6 @@ int arch_bp_generic_fields(int x86_len, int x86_type,
                return -EINVAL;
        }
 
-       /* Type */
-       switch (x86_type) {
-       case X86_BREAKPOINT_EXECUTE:
-               *gen_type = HW_BREAKPOINT_X;
-               break;
-       case X86_BREAKPOINT_WRITE:
-               *gen_type = HW_BREAKPOINT_W;
-               break;
-       case X86_BREAKPOINT_RW:
-               *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
-               break;
-       default:
-               return -EINVAL;
-       }
-
        return 0;
 }
 
@@ -316,9 +317,6 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
        ret = -EINVAL;
 
        switch (info->len) {
-       case X86_BREAKPOINT_LEN_X:
-               align = sizeof(long) -1;
-               break;
        case X86_BREAKPOINT_LEN_1:
                align = 0;
                break;
index e0bc186d7501f123265ae288ae071e772016e89b..1c355c550960ab75279c644b52269cd5a368239a 100644 (file)
@@ -239,11 +239,10 @@ int module_finalize(const Elf_Ehdr *hdr,
                apply_paravirt(pseg, pseg + para->sh_size);
        }
 
-       return module_bug_finalize(hdr, sechdrs, me);
+       return 0;
 }
 
 void module_arch_cleanup(struct module *mod)
 {
        alternatives_smp_module_del(mod);
-       module_bug_cleanup(mod);
 }
index a874495b3673baeb27467d144995d885f2f94ebc..e2a5952573905b2eeac3d18060be54532dbfdfde 100644 (file)
@@ -45,8 +45,7 @@ void __init setup_trampoline_page_table(void)
        /* Copy kernel address range */
        clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY,
                        swapper_pg_dir + KERNEL_PGD_BOUNDARY,
-                       min_t(unsigned long, KERNEL_PGD_PTRS,
-                             KERNEL_PGD_BOUNDARY));
+                       KERNEL_PGD_PTRS);
 
        /* Initialize low mappings */
        clone_pgd_range(trampoline_pg_dir,
index d632934cb6386947352650f262745eb3c93c68ce..26a863a9c2a815ec797b6211cd3de5e0b9cbed8e 100644 (file)
@@ -655,7 +655,7 @@ void restore_sched_clock_state(void)
 
        local_irq_save(flags);
 
-       get_cpu_var(cyc2ns_offset) = 0;
+       __get_cpu_var(cyc2ns_offset) = 0;
        offset = cyc2ns_suspend - sched_clock();
 
        for_each_possible_cpu(cpu)
index b38bd8b92aa6c84ed9a00295eeaac73bade26458..66ca98aafdd6a73d7eea8834d1f1c930384b8268 100644 (file)
@@ -1870,17 +1870,16 @@ static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
                               struct x86_emulate_ops *ops)
 {
        struct decode_cache *c = &ctxt->decode;
-       u64 old = c->dst.orig_val;
+       u64 old = c->dst.orig_val64;
 
        if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
            ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
-
                c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
                c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
                ctxt->eflags &= ~EFLG_ZF;
        } else {
-               c->dst.val = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
-                      (u32) c->regs[VCPU_REGS_RBX];
+               c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
+                       (u32) c->regs[VCPU_REGS_RBX];
 
                ctxt->eflags |= EFLG_ZF;
        }
@@ -2616,7 +2615,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
                                        c->src.valptr, c->src.bytes);
                if (rc != X86EMUL_CONTINUE)
                        goto done;
-               c->src.orig_val = c->src.val;
+               c->src.orig_val64 = c->src.val64;
        }
 
        if (c->src2.type == OP_MEM) {
index 8d10c063d7f207451b087a11a8d7bf0c888f3695..4b7b73ce209894442eddd9df2b8f076408389b12 100644 (file)
@@ -64,6 +64,9 @@ static void pic_unlock(struct kvm_pic *s)
                if (!found)
                        found = s->kvm->bsp_vcpu;
 
+               if (!found)
+                       return;
+
                kvm_vcpu_kick(found);
        }
 }
index ffed06871c5cf389594244086ecb3685496b4258..63c314502993b33c1a53773d111eb69133668179 100644 (file)
@@ -43,7 +43,6 @@ struct kvm_kpic_state {
        u8 irr;         /* interrupt request register */
        u8 imr;         /* interrupt mask register */
        u8 isr;         /* interrupt service register */
-       u8 isr_ack;     /* interrupt ack detection */
        u8 priority_add;        /* highest irq priority */
        u8 irq_base;
        u8 read_reg_select;
@@ -56,6 +55,7 @@ struct kvm_kpic_state {
        u8 init4;               /* true if 4 byte init */
        u8 elcr;                /* PIIX edge/trigger selection */
        u8 elcr_mask;
+       u8 isr_ack;     /* interrupt ack detection */
        struct kvm_pic *pics_state;
 };
 
index 9257510b4836837eb4cdff719000b716ff3bdd63..9d5f5584845587acbf5f544d7405e260a5e16b95 100644 (file)
@@ -324,9 +324,8 @@ static void lguest_load_gdt(const struct desc_ptr *desc)
 }
 
 /*
- * For a single GDT entry which changes, we do the lazy thing: alter our GDT,
- * then tell the Host to reload the entire thing.  This operation is so rare
- * that this naive implementation is reasonable.
+ * For a single GDT entry which changes, we simply change our copy and
+ * then tell the host about it.
  */
 static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
                                   const void *desc, int type)
@@ -338,9 +337,13 @@ static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
 }
 
 /*
- * OK, I lied.  There are three "thread local storage" GDT entries which change
+ * There are three "thread local storage" GDT entries which change
  * on every context switch (these three entries are how glibc implements
- * __thread variables).  So we have a hypercall specifically for this case.
+ * __thread variables).  As an optimization, we have a hypercall
+ * specifically for this case.
+ *
+ * Wouldn't it be nicer to have a general LOAD_GDT_ENTRIES hypercall
+ * which took a range of entries?
  */
 static void lguest_load_tls(struct thread_struct *t, unsigned int cpu)
 {
index 84e236ce76ba9a8afd624cfc4c506ebaa654b926..72fc70cf6184c756b1157f272b0d5e2b7bcc0609 100644 (file)
@@ -74,7 +74,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
 /*
  * Map 'pfn' using fixed map 'type' and protections 'prot'
  */
-void *
+void __iomem *
 iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
 {
        /*
@@ -86,12 +86,12 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
        if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
                prot = PAGE_KERNEL_UC_MINUS;
 
-       return kmap_atomic_prot_pfn(pfn, type, prot);
+       return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, type, prot);
 }
 EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
 
 void
-iounmap_atomic(void *kvaddr, enum km_type type)
+iounmap_atomic(void __iomem *kvaddr, enum km_type type)
 {
        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
        enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
index f6b48f6c595176a59c8e2fe5fa145bc11acca118..f1575c9a2572074cad464ae2f9c51ee88e63bce3 100644 (file)
@@ -568,8 +568,13 @@ static int __init init_sysfs(void)
        int error;
 
        error = sysdev_class_register(&oprofile_sysclass);
-       if (!error)
-               error = sysdev_register(&device_oprofile);
+       if (error)
+               return error;
+
+       error = sysdev_register(&device_oprofile);
+       if (error)
+               sysdev_class_unregister(&oprofile_sysclass);
+
        return error;
 }
 
@@ -580,8 +585,10 @@ static void exit_sysfs(void)
 }
 
 #else
-#define init_sysfs() do { } while (0)
-#define exit_sysfs() do { } while (0)
+
+static inline int  init_sysfs(void) { return 0; }
+static inline void exit_sysfs(void) { }
+
 #endif /* CONFIG_PM */
 
 static int __init p4_init(char **cpu_type)
@@ -664,7 +671,10 @@ static int __init ppro_init(char **cpu_type)
        case 14:
                *cpu_type = "i386/core";
                break;
-       case 15: case 23:
+       case 0x0f:
+       case 0x16:
+       case 0x17:
+       case 0x1d:
                *cpu_type = "i386/core_2";
                break;
        case 0x1a:
@@ -695,6 +705,8 @@ int __init op_nmi_init(struct oprofile_operations *ops)
        char *cpu_type = NULL;
        int ret = 0;
 
+       using_nmi = 0;
+
        if (!cpu_has_apic)
                return -ENODEV;
 
@@ -774,7 +786,10 @@ int __init op_nmi_init(struct oprofile_operations *ops)
 
        mux_init(ops);
 
-       init_sysfs();
+       ret = init_sysfs();
+       if (ret)
+               return ret;
+
        using_nmi = 1;
        printk(KERN_INFO "oprofile: using NMI interrupt.\n");
        return 0;
index 1a5353a753fcd10e1330c03d1af43a6fe9c5a5b7..b2bb5aa3b0540e42847a7664aa529cf5d2c0fb83 100644 (file)
@@ -489,8 +489,9 @@ static void xen_hvm_setup_cpu_clockevents(void)
 __init void xen_hvm_init_time_ops(void)
 {
        /* vector callback is needed otherwise we cannot receive interrupts
-        * on cpu > 0 */
-       if (!xen_have_vector_callback && num_present_cpus() > 1)
+        * on cpu > 0 and at this point we don't know how many cpus are
+        * available */
+       if (!xen_have_vector_callback)
                return;
        if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
                printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
index a6809645d212d9cf970b85473e3d9c05bc4ba652..2fef1ef931a06756d364707c28d97e6c440aa484 100644 (file)
@@ -966,7 +966,7 @@ blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
 
        /* Currently we do not support hierarchy deeper than two level (0,1) */
        if (parent != cgroup->top_cgroup)
-               return ERR_PTR(-EINVAL);
+               return ERR_PTR(-EPERM);
 
        blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
        if (!blkcg)
index ee1a1e7e63ccfc3e735566e3725febee1890bb2e..32a1c123dfb36a5fff24857221a9a4d1b79131bf 100644 (file)
@@ -1198,9 +1198,9 @@ static int __make_request(struct request_queue *q, struct bio *bio)
        int el_ret;
        unsigned int bytes = bio->bi_size;
        const unsigned short prio = bio_prio(bio);
-       const bool sync = (bio->bi_rw & REQ_SYNC);
-       const bool unplug = (bio->bi_rw & REQ_UNPLUG);
-       const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
+       const bool sync = !!(bio->bi_rw & REQ_SYNC);
+       const bool unplug = !!(bio->bi_rw & REQ_UNPLUG);
+       const unsigned long ff = bio->bi_rw & REQ_FAILFAST_MASK;
        int rw_flags;
 
        if ((bio->bi_rw & REQ_HARDBARRIER) &&
index c65d7593f7f1deba5511347ceeb6dd5c881e5b3c..ade0a08c9099afdc487916924c0fe6718185536d 100644 (file)
@@ -307,7 +307,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
                return PTR_ERR(bio);
 
        if (rq_data_dir(rq) == WRITE)
-               bio->bi_rw |= (1 << REQ_WRITE);
+               bio->bi_rw |= REQ_WRITE;
 
        if (do_copy)
                rq->cmd_flags |= REQ_COPY_USER;
index 3b0cd4249671d9826b42ea10473a11486c403368..eafc94f68d79f2ceb1c0a89fce8e449c3a5ff3f6 100644 (file)
@@ -361,6 +361,18 @@ static int attempt_merge(struct request_queue *q, struct request *req,
        if (!rq_mergeable(req) || !rq_mergeable(next))
                return 0;
 
+       /*
+        * Don't merge file system requests and discard requests
+        */
+       if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD))
+               return 0;
+
+       /*
+        * Don't merge discard requests and secure discard requests
+        */
+       if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE))
+               return 0;
+
        /*
         * not contiguous
         */
index 001ab18078f5ba1b8c6f34e021cc02ef401d98b6..0749b89c68852fc824e4afa862d871814c56ddd8 100644 (file)
@@ -511,6 +511,7 @@ int blk_register_queue(struct gendisk *disk)
                kobject_uevent(&q->kobj, KOBJ_REMOVE);
                kobject_del(&q->kobj);
                blk_trace_remove_sysfs(disk_to_dev(disk));
+               kobject_put(&dev->kobj);
                return ret;
        }
 
index 6e7dc87141e48230d0eb82c2bdcf770b0ac581a9..d6b911ac002cd9ef14c76535a6b3b6814243079b 100644 (file)
@@ -142,14 +142,18 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
 
 static inline int blk_cpu_to_group(int cpu)
 {
+       int group = NR_CPUS;
 #ifdef CONFIG_SCHED_MC
        const struct cpumask *mask = cpu_coregroup_mask(cpu);
-       return cpumask_first(mask);
+       group = cpumask_first(mask);
 #elif defined(CONFIG_SCHED_SMT)
-       return cpumask_first(topology_thread_cpumask(cpu));
+       group = cpumask_first(topology_thread_cpumask(cpu));
 #else
        return cpu;
 #endif
+       if (likely(group < NR_CPUS))
+               return group;
+       return cpu;
 }
 
 /*
index eb4086f7dfef9eb7efc6d202fb7a979919a551b8..9eba291eb6fd23854aee14d8a35b5b1108fc0629 100644 (file)
@@ -30,6 +30,7 @@ static const int cfq_slice_sync = HZ / 10;
 static int cfq_slice_async = HZ / 25;
 static const int cfq_slice_async_rq = 2;
 static int cfq_slice_idle = HZ / 125;
+static int cfq_group_idle = HZ / 125;
 static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
 static const int cfq_hist_divisor = 4;
 
@@ -147,6 +148,8 @@ struct cfq_queue {
        struct cfq_queue *new_cfqq;
        struct cfq_group *cfqg;
        struct cfq_group *orig_cfqg;
+       /* Number of sectors dispatched from queue in single dispatch round */
+       unsigned long nr_sectors;
 };
 
 /*
@@ -198,6 +201,8 @@ struct cfq_group {
        struct hlist_node cfqd_node;
        atomic_t ref;
 #endif
+       /* number of requests that are on the dispatch list or inside driver */
+       int dispatched;
 };
 
 /*
@@ -271,6 +276,7 @@ struct cfq_data {
        unsigned int cfq_slice[2];
        unsigned int cfq_slice_async_rq;
        unsigned int cfq_slice_idle;
+       unsigned int cfq_group_idle;
        unsigned int cfq_latency;
        unsigned int cfq_group_isolation;
 
@@ -378,6 +384,21 @@ CFQ_CFQQ_FNS(wait_busy);
                        &cfqg->service_trees[i][j]: NULL) \
 
 
+static inline bool iops_mode(struct cfq_data *cfqd)
+{
+       /*
+        * If we are not idling on queues and it is a NCQ drive, parallel
+        * execution of requests is on and measuring time is not possible
+        * in most of the cases until and unless we drive shallower queue
+        * depths and that becomes a performance bottleneck. In such cases
+        * switch to start providing fairness in terms of number of IOs.
+        */
+       if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
+               return true;
+       else
+               return false;
+}
+
 static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
 {
        if (cfq_class_idle(cfqq))
@@ -906,7 +927,6 @@ static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
                        slice_used = cfqq->allocated_slice;
        }
 
-       cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u", slice_used);
        return slice_used;
 }
 
@@ -914,19 +934,21 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
                                struct cfq_queue *cfqq)
 {
        struct cfq_rb_root *st = &cfqd->grp_service_tree;
-       unsigned int used_sl, charge_sl;
+       unsigned int used_sl, charge;
        int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
                        - cfqg->service_tree_idle.count;
 
        BUG_ON(nr_sync < 0);
-       used_sl = charge_sl = cfq_cfqq_slice_usage(cfqq);
+       used_sl = charge = cfq_cfqq_slice_usage(cfqq);
 
-       if (!cfq_cfqq_sync(cfqq) && !nr_sync)
-               charge_sl = cfqq->allocated_slice;
+       if (iops_mode(cfqd))
+               charge = cfqq->slice_dispatch;
+       else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
+               charge = cfqq->allocated_slice;
 
        /* Can't update vdisktime while group is on service tree */
        cfq_rb_erase(&cfqg->rb_node, st);
-       cfqg->vdisktime += cfq_scale_slice(charge_sl, cfqg);
+       cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
        __cfq_group_service_tree_add(st, cfqg);
 
        /* This group is being expired. Save the context */
@@ -940,6 +962,9 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
 
        cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
                                        st->min_vdisktime);
+       cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u"
+                       " sect=%u", used_sl, cfqq->slice_dispatch, charge,
+                       iops_mode(cfqd), cfqq->nr_sectors);
        cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
        cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
 }
@@ -994,10 +1019,20 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
         */
        atomic_set(&cfqg->ref, 1);
 
-       /* Add group onto cgroup list */
-       sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
-       cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
+       /*
+        * Add group onto cgroup list. It might happen that bdi->dev is
+        * not initiliazed yet. Initialize this new group without major
+        * and minor info and this info will be filled in once a new thread
+        * comes for IO. See code above.
+        */
+       if (bdi->dev) {
+               sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
+               cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
                                        MKDEV(major, minor));
+       } else
+               cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
+                                       0);
+
        cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
 
        /* Add group on cfqd list */
@@ -1587,6 +1622,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
                cfqq->allocated_slice = 0;
                cfqq->slice_end = 0;
                cfqq->slice_dispatch = 0;
+               cfqq->nr_sectors = 0;
 
                cfq_clear_cfqq_wait_request(cfqq);
                cfq_clear_cfqq_must_dispatch(cfqq);
@@ -1839,6 +1875,9 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
        BUG_ON(!service_tree);
        BUG_ON(!service_tree->count);
 
+       if (!cfqd->cfq_slice_idle)
+               return false;
+
        /* We never do for idle class queues. */
        if (prio == IDLE_WORKLOAD)
                return false;
@@ -1863,7 +1902,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
 {
        struct cfq_queue *cfqq = cfqd->active_queue;
        struct cfq_io_context *cic;
-       unsigned long sl;
+       unsigned long sl, group_idle = 0;
 
        /*
         * SSD device without seek penalty, disable idling. But only do so
@@ -1879,8 +1918,13 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
        /*
         * idle is disabled, either manually or by past process history
         */
-       if (!cfqd->cfq_slice_idle || !cfq_should_idle(cfqd, cfqq))
-               return;
+       if (!cfq_should_idle(cfqd, cfqq)) {
+               /* no queue idling. Check for group idling */
+               if (cfqd->cfq_group_idle)
+                       group_idle = cfqd->cfq_group_idle;
+               else
+                       return;
+       }
 
        /*
         * still active requests from this queue, don't idle
@@ -1907,13 +1951,21 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
                return;
        }
 
+       /* There are other queues in the group, don't do group idle */
+       if (group_idle && cfqq->cfqg->nr_cfqq > 1)
+               return;
+
        cfq_mark_cfqq_wait_request(cfqq);
 
-       sl = cfqd->cfq_slice_idle;
+       if (group_idle)
+               sl = cfqd->cfq_group_idle;
+       else
+               sl = cfqd->cfq_slice_idle;
 
        mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
        cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
-       cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
+       cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
+                       group_idle ? 1 : 0);
 }
 
 /*
@@ -1929,9 +1981,11 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
        cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
        cfq_remove_request(rq);
        cfqq->dispatched++;
+       (RQ_CFQG(rq))->dispatched++;
        elv_dispatch_sort(q, rq);
 
        cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
+       cfqq->nr_sectors += blk_rq_sectors(rq);
        cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
                                        rq_data_dir(rq), rq_is_sync(rq));
 }
@@ -2198,7 +2252,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
                        cfqq = NULL;
                        goto keep_queue;
                } else
-                       goto expire;
+                       goto check_group_idle;
        }
 
        /*
@@ -2226,8 +2280,23 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
         * flight or is idling for a new request, allow either of these
         * conditions to happen (or time out) before selecting a new queue.
         */
-       if (timer_pending(&cfqd->idle_slice_timer) ||
-           (cfqq->dispatched && cfq_should_idle(cfqd, cfqq))) {
+       if (timer_pending(&cfqd->idle_slice_timer)) {
+               cfqq = NULL;
+               goto keep_queue;
+       }
+
+       if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
+               cfqq = NULL;
+               goto keep_queue;
+       }
+
+       /*
+        * If group idle is enabled and there are requests dispatched from
+        * this group, wait for requests to complete.
+        */
+check_group_idle:
+       if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1
+           && cfqq->cfqg->dispatched) {
                cfqq = NULL;
                goto keep_queue;
        }
@@ -3375,6 +3444,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
        WARN_ON(!cfqq->dispatched);
        cfqd->rq_in_driver--;
        cfqq->dispatched--;
+       (RQ_CFQG(rq))->dispatched--;
        cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
                        rq_start_time_ns(rq), rq_io_start_time_ns(rq),
                        rq_data_dir(rq), rq_is_sync(rq));
@@ -3404,7 +3474,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
                 * the queue.
                 */
                if (cfq_should_wait_busy(cfqd, cfqq)) {
-                       cfqq->slice_end = jiffies + cfqd->cfq_slice_idle;
+                       unsigned long extend_sl = cfqd->cfq_slice_idle;
+                       if (!cfqd->cfq_slice_idle)
+                               extend_sl = cfqd->cfq_group_idle;
+                       cfqq->slice_end = jiffies + extend_sl;
                        cfq_mark_cfqq_wait_busy(cfqq);
                        cfq_log_cfqq(cfqd, cfqq, "will busy wait");
                }
@@ -3850,6 +3923,7 @@ static void *cfq_init_queue(struct request_queue *q)
        cfqd->cfq_slice[1] = cfq_slice_sync;
        cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
        cfqd->cfq_slice_idle = cfq_slice_idle;
+       cfqd->cfq_group_idle = cfq_group_idle;
        cfqd->cfq_latency = 1;
        cfqd->cfq_group_isolation = 0;
        cfqd->hw_tag = -1;
@@ -3922,6 +3996,7 @@ SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
+SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
@@ -3954,6 +4029,7 @@ STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
                UINT_MAX, 0);
 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
+STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
@@ -3975,6 +4051,7 @@ static struct elv_fs_entry cfq_attrs[] = {
        CFQ_ATTR(slice_async),
        CFQ_ATTR(slice_async_rq),
        CFQ_ATTR(slice_idle),
+       CFQ_ATTR(group_idle),
        CFQ_ATTR(low_latency),
        CFQ_ATTR(group_isolation),
        __ATTR_NULL
@@ -4028,6 +4105,12 @@ static int __init cfq_init(void)
        if (!cfq_slice_idle)
                cfq_slice_idle = 1;
 
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+       if (!cfq_group_idle)
+               cfq_group_idle = 1;
+#else
+               cfq_group_idle = 0;
+#endif
        if (cfq_slab_setup())
                return -ENOMEM;
 
index ec585c9554d33c04b973537f9ac3216e332afa50..205b09a5bd9ead9e64d7bcfb60cec924f1a22cbd 100644 (file)
@@ -1009,18 +1009,19 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
 {
        struct elevator_queue *old_elevator, *e;
        void *data;
+       int err;
 
        /*
         * Allocate new elevator
         */
        e = elevator_alloc(q, new_e);
        if (!e)
-               return 0;
+               return -ENOMEM;
 
        data = elevator_init_queue(q, e);
        if (!data) {
                kobject_put(&e->kobj);
-               return 0;
+               return -ENOMEM;
        }
 
        /*
@@ -1043,7 +1044,8 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
 
        __elv_unregister_queue(old_elevator);
 
-       if (elv_register_queue(q))
+       err = elv_register_queue(q);
+       if (err)
                goto fail_register;
 
        /*
@@ -1056,7 +1058,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
 
        blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
 
-       return 1;
+       return 0;
 
 fail_register:
        /*
@@ -1071,17 +1073,19 @@ fail_register:
        queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
        spin_unlock_irq(q->queue_lock);
 
-       return 0;
+       return err;
 }
 
-ssize_t elv_iosched_store(struct request_queue *q, const char *name,
-                         size_t count)
+/*
+ * Switch this queue to the given IO scheduler.
+ */
+int elevator_change(struct request_queue *q, const char *name)
 {
        char elevator_name[ELV_NAME_MAX];
        struct elevator_type *e;
 
        if (!q->elevator)
-               return count;
+               return -ENXIO;
 
        strlcpy(elevator_name, name, sizeof(elevator_name));
        e = elevator_get(strstrip(elevator_name));
@@ -1092,13 +1096,27 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
 
        if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
                elevator_put(e);
-               return count;
+               return 0;
        }
 
-       if (!elevator_switch(q, e))
-               printk(KERN_ERR "elevator: switch to %s failed\n",
-                                                       elevator_name);
-       return count;
+       return elevator_switch(q, e);
+}
+EXPORT_SYMBOL(elevator_change);
+
+ssize_t elv_iosched_store(struct request_queue *q, const char *name,
+                         size_t count)
+{
+       int ret;
+
+       if (!q->elevator)
+               return count;
+
+       ret = elevator_change(q, name);
+       if (!ret)
+               return count;
+
+       printk(KERN_ERR "elevator: switch to %s failed\n", name);
+       return ret;
 }
 
 ssize_t elv_iosched_show(struct request_queue *q, char *name)
index ae473445ad6daf11254ee269c3194c9ed03e0cb1..a2aea53a75ed9fdf99058f9676429c84104c2f27 100644 (file)
@@ -50,7 +50,7 @@ obj-$(CONFIG_SPI)             += spi/
 obj-y                          += net/
 obj-$(CONFIG_ATM)              += atm/
 obj-$(CONFIG_FUSION)           += message/
-obj-$(CONFIG_FIREWIRE)         += firewire/
+obj-y                          += firewire/
 obj-y                          += ieee1394/
 obj-$(CONFIG_UIO)              += uio/
 obj-y                          += cdrom/
index b811f2173f6f167c84700040fe2720bafb43156f..88681aca88c581891399e18b056d9a6df94a9e01 100644 (file)
@@ -105,7 +105,7 @@ config ACPI_EC_DEBUGFS
 
          Be aware that using this interface can confuse your Embedded
          Controller in a way that a normal reboot is not enough. You then
-         have to power of your system, and remove the laptop battery for
+         have to power off your system, and remove the laptop battery for
          some seconds.
          An Embedded Controller typically is available on laptops and reads
          sensor values like battery state and temperature.
index b76848c80be34729c56bd20d04f75f9f534cbb36..6b115f6c43133c211acd79c68a83785506a4357a 100644 (file)
@@ -382,31 +382,32 @@ static void acpi_pad_remove_sysfs(struct acpi_device *device)
        device_remove_file(&device->dev, &dev_attr_rrtime);
 }
 
-/* Query firmware how many CPUs should be idle */
-static int acpi_pad_pur(acpi_handle handle, int *num_cpus)
+/*
+ * Query firmware how many CPUs should be idle
+ * return -1 on failure
+ */
+static int acpi_pad_pur(acpi_handle handle)
 {
        struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
        union acpi_object *package;
-       int rev, num, ret = -EINVAL;
+       int num = -1;
 
        if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer)))
-               return -EINVAL;
+               return num;
 
        if (!buffer.length || !buffer.pointer)
-               return -EINVAL;
+               return num;
 
        package = buffer.pointer;
-       if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2)
-               goto out;
-       rev = package->package.elements[0].integer.value;
-       num = package->package.elements[1].integer.value;
-       if (rev != 1 || num < 0)
-               goto out;
-       *num_cpus = num;
-       ret = 0;
-out:
+
+       if (package->type == ACPI_TYPE_PACKAGE &&
+               package->package.count == 2 &&
+               package->package.elements[0].integer.value == 1) /* rev 1 */
+
+               num = package->package.elements[1].integer.value;
+
        kfree(buffer.pointer);
-       return ret;
+       return num;
 }
 
 /* Notify firmware how many CPUs are idle */
@@ -433,7 +434,8 @@ static void acpi_pad_handle_notify(acpi_handle handle)
        uint32_t idle_cpus;
 
        mutex_lock(&isolated_cpus_lock);
-       if (acpi_pad_pur(handle, &num_cpus)) {
+       num_cpus = acpi_pad_pur(handle);
+       if (num_cpus < 0) {
                mutex_unlock(&isolated_cpus_lock);
                return;
        }
index df85b53a674fc33105fa1434b3800db7ab26a423..7dad9160f20998112cc302d0a239c9fa27fcd429 100644 (file)
@@ -854,6 +854,7 @@ struct acpi_bit_register_info {
        ACPI_BITMASK_POWER_BUTTON_STATUS   | \
        ACPI_BITMASK_SLEEP_BUTTON_STATUS   | \
        ACPI_BITMASK_RT_CLOCK_STATUS       | \
+       ACPI_BITMASK_PCIEXP_WAKE_DISABLE   | \
        ACPI_BITMASK_WAKE_STATUS)
 
 #define ACPI_BITMASK_TIMER_ENABLE               0x0001
index 74c24d517f81768a6a00418db71c9f93be335b2e..4093522eed45692012b5da617c57187ef20c4458 100644 (file)
@@ -109,7 +109,7 @@ void acpi_ex_enter_interpreter(void)
  *
  * DESCRIPTION: Reacquire the interpreter execution region from within the
  *              interpreter code. Failure to enter the interpreter region is a
- *              fatal system error. Used in  conjuction with
+ *              fatal system error. Used in  conjunction with
  *              relinquish_interpreter
  *
  ******************************************************************************/
index 22cfcfbd9fff77cd4cd5bb77be80c244461b2df5..491191e6cf692bffe1811a22674a0eb0720c3771 100644 (file)
@@ -149,7 +149,7 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
 
                        /*
                         * 16-, 32-, and 64-bit cases must use the move macros that perform
-                        * endian conversion and/or accomodate hardware that cannot perform
+                        * endian conversion and/or accommodate hardware that cannot perform
                         * misaligned memory transfers
                         */
                case ACPI_RSC_MOVE16:
index 907e350f1c7df58370cb0e68a399b91d903ec466..fca34ccfd294a782e3f05312d99d5b5fdef5bfb4 100644 (file)
@@ -34,6 +34,6 @@ config ACPI_APEI_ERST_DEBUG
        depends on ACPI_APEI
        help
          ERST is a way provided by APEI to save and retrieve hardware
-         error infomation to and from a persistent store. Enable this
+         error information to and from a persistent store. Enable this
          if you want to debugging and testing the ERST kernel support
          and firmware implementation.
index 73fd0c7487c1ae0b1311f4d7e1c048ddc809153a..4a904a4bf05f83a1b952ec2365811f8c5db931e1 100644 (file)
@@ -445,11 +445,15 @@ EXPORT_SYMBOL_GPL(apei_resources_sub);
 int apei_resources_request(struct apei_resources *resources,
                           const char *desc)
 {
-       struct apei_res *res, *res_bak;
+       struct apei_res *res, *res_bak = NULL;
        struct resource *r;
+       int rc;
 
-       apei_resources_sub(resources, &apei_resources_all);
+       rc = apei_resources_sub(resources, &apei_resources_all);
+       if (rc)
+               return rc;
 
+       rc = -EINVAL;
        list_for_each_entry(res, &resources->iomem, list) {
                r = request_mem_region(res->start, res->end - res->start,
                                       desc);
@@ -475,7 +479,11 @@ int apei_resources_request(struct apei_resources *resources,
                }
        }
 
-       apei_resources_merge(&apei_resources_all, resources);
+       rc = apei_resources_merge(&apei_resources_all, resources);
+       if (rc) {
+               pr_err(APEI_PFX "Fail to merge resources!\n");
+               goto err_unmap_ioport;
+       }
 
        return 0;
 err_unmap_ioport:
@@ -491,12 +499,13 @@ err_unmap_iomem:
                        break;
                release_mem_region(res->start, res->end - res->start);
        }
-       return -EINVAL;
+       return rc;
 }
 EXPORT_SYMBOL_GPL(apei_resources_request);
 
 void apei_resources_release(struct apei_resources *resources)
 {
+       int rc;
        struct apei_res *res;
 
        list_for_each_entry(res, &resources->iomem, list)
@@ -504,7 +513,9 @@ void apei_resources_release(struct apei_resources *resources)
        list_for_each_entry(res, &resources->ioport, list)
                release_region(res->start, res->end - res->start);
 
-       apei_resources_sub(&apei_resources_all, resources);
+       rc = apei_resources_sub(&apei_resources_all, resources);
+       if (rc)
+               pr_err(APEI_PFX "Fail to sub resources!\n");
 }
 EXPORT_SYMBOL_GPL(apei_resources_release);
 
index 465c885938ee89a45db10f1f39d1ceb8cba41cf9..cf29df69380b8dd32585a074c53efc7af7a65471 100644 (file)
@@ -426,7 +426,9 @@ DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL,
 
 static int einj_check_table(struct acpi_table_einj *einj_tab)
 {
-       if (einj_tab->header_length != sizeof(struct acpi_table_einj))
+       if ((einj_tab->header_length !=
+            (sizeof(struct acpi_table_einj) - sizeof(einj_tab->header)))
+           && (einj_tab->header_length != sizeof(struct acpi_table_einj)))
                return -EINVAL;
        if (einj_tab->header.length < sizeof(struct acpi_table_einj))
                return -EINVAL;
index 5281ddda2777c99c40f0830c1d4e5022cc5b37aa..da1228a9a544f026bab6c549e65892d439367266 100644 (file)
@@ -2,7 +2,7 @@
  * APEI Error Record Serialization Table debug support
  *
  * ERST is a way provided by APEI to save and retrieve hardware error
- * infomation to and from a persistent store. This file provide the
+ * information to and from a persistent store. This file provide the
  * debugging/testing support for ERST kernel support and firmware
  * implementation.
  *
@@ -111,11 +111,13 @@ retry:
                goto out;
        }
        if (len > erst_dbg_buf_len) {
-               kfree(erst_dbg_buf);
+               void *p;
                rc = -ENOMEM;
-               erst_dbg_buf = kmalloc(len, GFP_KERNEL);
-               if (!erst_dbg_buf)
+               p = kmalloc(len, GFP_KERNEL);
+               if (!p)
                        goto out;
+               kfree(erst_dbg_buf);
+               erst_dbg_buf = p;
                erst_dbg_buf_len = len;
                goto retry;
        }
@@ -150,11 +152,13 @@ static ssize_t erst_dbg_write(struct file *filp, const char __user *ubuf,
        if (mutex_lock_interruptible(&erst_dbg_mutex))
                return -EINTR;
        if (usize > erst_dbg_buf_len) {
-               kfree(erst_dbg_buf);
+               void *p;
                rc = -ENOMEM;
-               erst_dbg_buf = kmalloc(usize, GFP_KERNEL);
-               if (!erst_dbg_buf)
+               p = kmalloc(usize, GFP_KERNEL);
+               if (!p)
                        goto out;
+               kfree(erst_dbg_buf);
+               erst_dbg_buf = p;
                erst_dbg_buf_len = usize;
        }
        rc = copy_from_user(erst_dbg_buf, ubuf, usize);
index 18645f4e83cdd2f22d526276b320dac631be8940..1211c03149e8c7c258fee89dd1109e1e6b901c26 100644 (file)
@@ -2,7 +2,7 @@
  * APEI Error Record Serialization Table support
  *
  * ERST is a way provided by APEI to save and retrieve hardware error
- * infomation to and from a persistent store.
+ * information to and from a persistent store.
  *
  * For more information about ERST, please refer to ACPI Specification
  * version 4.0, section 17.4.
@@ -266,13 +266,30 @@ static int erst_exec_move_data(struct apei_exec_context *ctx,
 {
        int rc;
        u64 offset;
+       void *src, *dst;
+
+       /* ioremap does not work in interrupt context */
+       if (in_interrupt()) {
+               pr_warning(ERST_PFX
+                          "MOVE_DATA can not be used in interrupt context");
+               return -EBUSY;
+       }
 
        rc = __apei_exec_read_register(entry, &offset);
        if (rc)
                return rc;
-       memmove((void *)ctx->dst_base + offset,
-               (void *)ctx->src_base + offset,
-               ctx->var2);
+
+       src = ioremap(ctx->src_base + offset, ctx->var2);
+       if (!src)
+               return -ENOMEM;
+       dst = ioremap(ctx->dst_base + offset, ctx->var2);
+       if (!dst)
+               return -ENOMEM;
+
+       memmove(dst, src, ctx->var2);
+
+       iounmap(src);
+       iounmap(dst);
 
        return 0;
 }
@@ -750,7 +767,9 @@ __setup("erst_disable", setup_erst_disable);
 
 static int erst_check_table(struct acpi_table_erst *erst_tab)
 {
-       if (erst_tab->header_length != sizeof(struct acpi_table_erst))
+       if ((erst_tab->header_length !=
+            (sizeof(struct acpi_table_erst) - sizeof(erst_tab->header)))
+           && (erst_tab->header_length != sizeof(struct acpi_table_einj)))
                return -EINVAL;
        if (erst_tab->header.length < sizeof(struct acpi_table_erst))
                return -EINVAL;
index 385a6059714a72dd32256685a2606586b92d771e..0d505e59214df73dfb40b67b7d5fc45a2d48e127 100644 (file)
@@ -302,7 +302,7 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev)
        struct ghes *ghes = NULL;
        int rc = -EINVAL;
 
-       generic = ghes_dev->dev.platform_data;
+       generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;
        if (!generic->enabled)
                return -ENODEV;
 
index 343168d1826626c202171c3a53c5b5e4c2090a65..1a3508a7fe03f157c2e0144727bbeb5c244d2a46 100644 (file)
@@ -137,20 +137,23 @@ static int hest_parse_ghes_count(struct acpi_hest_header *hest_hdr, void *data)
 
 static int hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data)
 {
-       struct acpi_hest_generic *generic;
        struct platform_device *ghes_dev;
        struct ghes_arr *ghes_arr = data;
        int rc;
 
        if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR)
                return 0;
-       generic = (struct acpi_hest_generic *)hest_hdr;
-       if (!generic->enabled)
+
+       if (!((struct acpi_hest_generic *)hest_hdr)->enabled)
                return 0;
        ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id);
        if (!ghes_dev)
                return -ENOMEM;
-       ghes_dev->dev.platform_data = generic;
+
+       rc = platform_device_add_data(ghes_dev, &hest_hdr, sizeof(void *));
+       if (rc)
+               goto err;
+
        rc = platform_device_add(ghes_dev);
        if (rc)
                goto err;
index 8f8bd736d4ff11919656e79d2ef9442b037fafff..542e5390389120de7ecd7da4cb7bd059baa18b01 100644 (file)
@@ -142,7 +142,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
        list_add_tail_rcu(&map->list, &acpi_iomaps);
        spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
 
-       return vaddr + (paddr - pg_off);
+       return map->vaddr + (paddr - map->paddr);
 err_unmap:
        iounmap(vaddr);
        return NULL;
index dc58402b0a177a4e03e8dc54e7a094804edc1d36..98417201e9ce3881257354e7c2a360ee019a4e39 100644 (file)
@@ -273,7 +273,6 @@ static enum power_supply_property energy_battery_props[] = {
        POWER_SUPPLY_PROP_CYCLE_COUNT,
        POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
        POWER_SUPPLY_PROP_VOLTAGE_NOW,
-       POWER_SUPPLY_PROP_CURRENT_NOW,
        POWER_SUPPLY_PROP_POWER_NOW,
        POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
        POWER_SUPPLY_PROP_ENERGY_FULL,
index 2bb28b9d91c4c2643106ed0d63069ae85799d136..f7619600270a6c6279986fed3ec6b115a59893f3 100644 (file)
@@ -183,6 +183,8 @@ static int __init dmi_disable_osi_vista(const struct dmi_system_id *d)
 {
        printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
        acpi_osi_setup("!Windows 2006");
+       acpi_osi_setup("!Windows 2006 SP1");
+       acpi_osi_setup("!Windows 2006 SP2");
        return 0;
 }
 static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
@@ -226,6 +228,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
                },
        },
        {
+       .callback = dmi_disable_osi_vista,
+       .ident = "Toshiba Satellite L355",
+       .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+                    DMI_MATCH(DMI_PRODUCT_VERSION, "Satellite L355"),
+               },
+       },
+       {
        .callback = dmi_disable_osi_win7,
        .ident = "ASUS K50IJ",
        .matches = {
@@ -233,6 +243,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
                     DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"),
                },
        },
+       {
+       .callback = dmi_disable_osi_vista,
+       .ident = "Toshiba P305D",
+       .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+                    DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P305D"),
+               },
+       },
 
        /*
         * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
index 5c221ab535d5b0a459516a16034f80a94a7d1bb4..310e3b9749cbbacdabb3c03d288a6b42874aa41b 100644 (file)
@@ -55,7 +55,7 @@ EXPORT_SYMBOL(acpi_root_dir);
 static int set_power_nocheck(const struct dmi_system_id *id)
 {
        printk(KERN_NOTICE PREFIX "%s detected - "
-               "disable power check in power transistion\n", id->ident);
+               "disable power check in power transition\n", id->ident);
        acpi_power_nocheck = 1;
        return 0;
 }
@@ -80,23 +80,15 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
 
 static struct dmi_system_id dsdt_dmi_table[] __initdata = {
        /*
-        * Insyde BIOS on some TOSHIBA machines corrupt the DSDT.
+        * Invoke DSDT corruption work-around on all Toshiba Satellite.
         * https://bugzilla.kernel.org/show_bug.cgi?id=14679
         */
        {
         .callback = set_copy_dsdt,
-        .ident = "TOSHIBA Satellite A505",
+        .ident = "TOSHIBA Satellite",
         .matches = {
                DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
-               DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A505"),
-               },
-       },
-       {
-        .callback = set_copy_dsdt,
-        .ident = "TOSHIBA Satellite L505D",
-        .matches = {
-               DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
-               DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L505D"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "Satellite"),
                },
        },
        {}
@@ -1027,7 +1019,7 @@ static int __init acpi_init(void)
 
        /*
         * If the laptop falls into the DMI check table, the power state check
-        * will be disabled in the course of device power transistion.
+        * will be disabled in the course of device power transition.
         */
        dmi_check_system(power_nocheck_dmi_table);
 
index 8a3b840c0bb268d0580cd5550c41986bf3c09662..d94d2953c9740f34675eeb576e00e74ee621bfd2 100644 (file)
@@ -369,7 +369,9 @@ static void __exit acpi_fan_exit(void)
 
        acpi_bus_unregister_driver(&acpi_fan_driver);
 
+#ifdef CONFIG_ACPI_PROCFS
        remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir);
+#endif
 
        return;
 }
index e9699aaed1092874b0f275d2ca86a8142850db1c..b618f888d66b48f91365e1e4d8ef0b4d67dca81a 100644 (file)
@@ -28,12 +28,6 @@ static int set_no_mwait(const struct dmi_system_id *id)
 }
 
 static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = {
-       {
-       set_no_mwait, "IFL91 board", {
-       DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
-       DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"),
-       DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"),
-       DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL},
        {
        set_no_mwait, "Extensa 5220", {
        DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
index 15602189238942cc9db03b9d9f25c18823128ea0..347eb21b235302d44d0c5e2d768ce3a8870142f2 100644 (file)
@@ -850,7 +850,7 @@ static int __init acpi_processor_init(void)
                printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
                        acpi_idle_driver.name);
        } else {
-               printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s",
+               printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s\n",
                        cpuidle_get_driver()->name);
        }
 
index ba1bd263d903094692c3683c9557e8b919d1d985..3a73a93596e88a29c1e66fabec91dd2d0db96f6c 100644 (file)
@@ -447,8 +447,8 @@ int acpi_processor_notify_smm(struct module *calling_module)
        if (!try_module_get(calling_module))
                return -EINVAL;
 
-       /* is_done is set to negative if an error occured,
-        * and to postitive if _no_ error occured, but SMM
+       /* is_done is set to negative if an error occurred,
+        * and to postitive if _no_ error occurred, but SMM
         * was already notified. This avoids double notification
         * which might lead to unexpected results...
         */
index cf82989ae7568c3c54ded69d16829a2851119d80..4754ff6e70e6daa25b13df6f115745eb7bfb7e55 100644 (file)
@@ -363,6 +363,12 @@ static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
        return 0;
 }
 
+static int __init init_nvs_nosave(const struct dmi_system_id *d)
+{
+       acpi_nvs_nosave();
+       return 0;
+}
+
 static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
        {
        .callback = init_old_suspend_ordering,
@@ -397,6 +403,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
                DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
                },
        },
+       {
+       .callback = init_nvs_nosave,
+       .ident = "Sony Vaio VGN-SR11M",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"),
+               },
+       },
+       {
+       .callback = init_nvs_nosave,
+       .ident = "Everex StepNote Series",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
+               },
+       },
        {},
 };
 #endif /* CONFIG_SUSPEND */
index 68e2e4582fa2f18968058c8125704a23f151cb62..f8588f81048ac989d6af1d727693a234f54bc27b 100644 (file)
@@ -100,7 +100,7 @@ static const struct acpi_dlevel acpi_debug_levels[] = {
        ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
 };
 
-static int param_get_debug_layer(char *buffer, struct kernel_param *kp)
+static int param_get_debug_layer(char *buffer, const struct kernel_param *kp)
 {
        int result = 0;
        int i;
@@ -128,7 +128,7 @@ static int param_get_debug_layer(char *buffer, struct kernel_param *kp)
        return result;
 }
 
-static int param_get_debug_level(char *buffer, struct kernel_param *kp)
+static int param_get_debug_level(char *buffer, const struct kernel_param *kp)
 {
        int result = 0;
        int i;
@@ -149,10 +149,18 @@ static int param_get_debug_level(char *buffer, struct kernel_param *kp)
        return result;
 }
 
-module_param_call(debug_layer, param_set_uint, param_get_debug_layer,
-                 &acpi_dbg_layer, 0644);
-module_param_call(debug_level, param_set_uint, param_get_debug_level,
-                 &acpi_dbg_level, 0644);
+static struct kernel_param_ops param_ops_debug_layer = {
+       .set = param_set_uint,
+       .get = param_get_debug_layer,
+};
+
+static struct kernel_param_ops param_ops_debug_level = {
+       .set = param_set_uint,
+       .get = param_get_debug_level,
+};
+
+module_param_cb(debug_layer, &param_ops_debug_layer, &acpi_dbg_layer, 0644);
+module_param_cb(debug_level, &param_ops_debug_level, &acpi_dbg_level, 0644);
 
 static char trace_method_name[6];
 module_param_string(trace_method_name, trace_method_name, 6, 0644);
index c5fef01b3c9591701fb03ae86290a6b5b643c32a..b836761265988590cea46dbcffc39d30ea3c5578 100644 (file)
@@ -59,8 +59,8 @@ acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context,
                                  "support\n"));
                *cap |= ACPI_VIDEO_BACKLIGHT;
                if (ACPI_FAILURE(acpi_get_handle(handle, "_BQC", &h_dummy)))
-                       printk(KERN_WARNING FW_BUG PREFIX "ACPI brightness "
-                                       "control misses _BQC function\n");
+                       printk(KERN_WARNING FW_BUG PREFIX "No _BQC method, "
+                               "cannot determine initial brightness\n");
                /* We have backlight support, no need to scan further */
                return AE_CTRL_TERMINATE;
        }
index 013727b20417226249ca942d28708164a497c2dc..99d0e5a511482215a7b3c59ffaed9f0827cfa2b1 100644 (file)
@@ -90,6 +90,10 @@ static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
 static int ahci_pci_device_resume(struct pci_dev *pdev);
 #endif
 
+static struct scsi_host_template ahci_sht = {
+       AHCI_SHT("ahci"),
+};
+
 static struct ata_port_operations ahci_vt8251_ops = {
        .inherits               = &ahci_ops,
        .hardreset              = ahci_vt8251_hardreset,
@@ -253,6 +257,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
        { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
        { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
+       { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
+       { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
+       { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
 
        /* JMicron 360/1/3/5/6, match class to avoid IDE function */
        { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
index 474427b6f99f47a8dd997042cc9f2d271a743228..e5fdeebf9ef0610d6f43999baefc4fd06fcd354b 100644 (file)
@@ -298,7 +298,17 @@ struct ahci_host_priv {
 
 extern int ahci_ignore_sss;
 
-extern struct scsi_host_template ahci_sht;
+extern struct device_attribute *ahci_shost_attrs[];
+extern struct device_attribute *ahci_sdev_attrs[];
+
+#define AHCI_SHT(drv_name)                                             \
+       ATA_NCQ_SHT(drv_name),                                          \
+       .can_queue              = AHCI_MAX_CMDS - 1,                    \
+       .sg_tablesize           = AHCI_MAX_SG,                          \
+       .dma_boundary           = AHCI_DMA_BOUNDARY,                    \
+       .shost_attrs            = ahci_shost_attrs,                     \
+       .sdev_attrs             = ahci_sdev_attrs
+
 extern struct ata_port_operations ahci_ops;
 
 void ahci_save_initial_config(struct device *dev,
index 4e97f33cca4406212b9769c6c13fc11afc02bc4a..84b643270e7af5c49d8a73db78f6e6230ceabf7b 100644 (file)
 #include <linux/ahci_platform.h>
 #include "ahci.h"
 
+static struct scsi_host_template ahci_platform_sht = {
+       AHCI_SHT("ahci_platform"),
+};
+
 static int __init ahci_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -145,7 +149,7 @@ static int __init ahci_probe(struct platform_device *pdev)
        ahci_print_info(host, "platform");
 
        rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
-                              &ahci_sht);
+                              &ahci_platform_sht);
        if (rc)
                goto err0;
 
index 3971bc0a4838235ea26f4bff0b97ec7dd3335b69..d712675d0a9657d3cc7742b74220aaf05d0fa537 100644 (file)
@@ -302,6 +302,10 @@ static const struct pci_device_id piix_pci_tbl[] = {
        { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
        /* SATA Controller IDE (CPT) */
        { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+       /* SATA Controller IDE (PBG) */
+       { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+       /* SATA Controller IDE (PBG) */
+       { 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
        { }     /* terminate list */
 };
 
index 666850d31df2c304b9d1409e21e20355d52f2a65..8eea309ea21231fcb50ff0498a366ff8a8a1dcf7 100644 (file)
@@ -121,7 +121,7 @@ static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
 static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
                   ahci_read_em_buffer, ahci_store_em_buffer);
 
-static struct device_attribute *ahci_shost_attrs[] = {
+struct device_attribute *ahci_shost_attrs[] = {
        &dev_attr_link_power_management_policy,
        &dev_attr_em_message_type,
        &dev_attr_em_message,
@@ -132,22 +132,14 @@ static struct device_attribute *ahci_shost_attrs[] = {
        &dev_attr_em_buffer,
        NULL
 };
+EXPORT_SYMBOL_GPL(ahci_shost_attrs);
 
-static struct device_attribute *ahci_sdev_attrs[] = {
+struct device_attribute *ahci_sdev_attrs[] = {
        &dev_attr_sw_activity,
        &dev_attr_unload_heads,
        NULL
 };
-
-struct scsi_host_template ahci_sht = {
-       ATA_NCQ_SHT("ahci"),
-       .can_queue              = AHCI_MAX_CMDS - 1,
-       .sg_tablesize           = AHCI_MAX_SG,
-       .dma_boundary           = AHCI_DMA_BOUNDARY,
-       .shost_attrs            = ahci_shost_attrs,
-       .sdev_attrs             = ahci_sdev_attrs,
-};
-EXPORT_SYMBOL_GPL(ahci_sht);
+EXPORT_SYMBOL_GPL(ahci_sdev_attrs);
 
 struct ata_port_operations ahci_ops = {
        .inherits               = &sata_pmp_port_ops,
@@ -1326,7 +1318,7 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
        /* issue the first D2H Register FIS */
        msecs = 0;
        now = jiffies;
-       if (time_after(now, deadline))
+       if (time_after(deadline, now))
                msecs = jiffies_to_msecs(deadline - now);
 
        tf.ctl |= ATA_SRST;
index c035b3d041ee1572492d7a17f56323bde0fccacd..932eaee5024527f4e617064726e39db808c17e87 100644 (file)
@@ -5418,6 +5418,7 @@ static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
  */
 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
 {
+       unsigned int ehi_flags = ATA_EHI_QUIET;
        int rc;
 
        /*
@@ -5426,7 +5427,18 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
         */
        ata_lpm_enable(host);
 
-       rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
+       /*
+        * On some hardware, device fails to respond after spun down
+        * for suspend.  As the device won't be used before being
+        * resumed, we don't need to touch the device.  Ask EH to skip
+        * the usual stuff and proceed directly to suspend.
+        *
+        * http://thread.gmane.org/gmane.linux.ide/46764
+        */
+       if (mesg.event == PM_EVENT_SUSPEND)
+               ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
+
+       rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1);
        if (rc == 0)
                host->dev->power.power_state = mesg;
        return rc;
index c9ae299b83428d039c13cce845e7acc497bcff13..e48302eae55fcd2ad619564c9cd8237c722efb29 100644 (file)
@@ -3235,6 +3235,10 @@ static int ata_eh_skip_recovery(struct ata_link *link)
        if (link->flags & ATA_LFLAG_DISABLED)
                return 1;
 
+       /* skip if explicitly requested */
+       if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
+               return 1;
+
        /* thaw frozen port and recover failed devices */
        if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
                return 0;
index 3b82d8ef76f0ffc81e6bb62e9d3ea19d1be57195..e30c537cce32f99ab7fcb07f7f124cd920c4cd4b 100644 (file)
@@ -418,6 +418,7 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
                if (ioaddr->ctl_addr)
                        iowrite8(tf->ctl, ioaddr->ctl_addr);
                ap->last_ctl = tf->ctl;
+               ata_wait_idle(ap);
        }
 
        if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
@@ -453,6 +454,8 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
                iowrite8(tf->device, ioaddr->device_addr);
                VPRINTK("device 0x%X\n", tf->device);
        }
+
+       ata_wait_idle(ap);
 }
 EXPORT_SYMBOL_GPL(ata_sff_tf_load);
 
@@ -1042,7 +1045,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
 int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
                     u8 status, int in_wq)
 {
-       struct ata_eh_info *ehi = &ap->link.eh_info;
+       struct ata_link *link = qc->dev->link;
+       struct ata_eh_info *ehi = &link->eh_info;
        unsigned long flags = 0;
        int poll_next;
 
@@ -1298,8 +1302,14 @@ fsm_start:
 }
 EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
 
-void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay)
+void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
 {
+       struct ata_port *ap = link->ap;
+
+       WARN_ON((ap->sff_pio_task_link != NULL) &&
+               (ap->sff_pio_task_link != link));
+       ap->sff_pio_task_link = link;
+
        /* may fail if ata_sff_flush_pio_task() in progress */
        queue_delayed_work(ata_sff_wq, &ap->sff_pio_task,
                           msecs_to_jiffies(delay));
@@ -1321,14 +1331,18 @@ static void ata_sff_pio_task(struct work_struct *work)
 {
        struct ata_port *ap =
                container_of(work, struct ata_port, sff_pio_task.work);
+       struct ata_link *link = ap->sff_pio_task_link;
        struct ata_queued_cmd *qc;
        u8 status;
        int poll_next;
 
+       BUG_ON(ap->sff_pio_task_link == NULL); 
        /* qc can be NULL if timeout occurred */
-       qc = ata_qc_from_tag(ap, ap->link.active_tag);
-       if (!qc)
+       qc = ata_qc_from_tag(ap, link->active_tag);
+       if (!qc) {
+               ap->sff_pio_task_link = NULL;
                return;
+       }
 
 fsm_start:
        WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
@@ -1345,11 +1359,16 @@ fsm_start:
                msleep(2);
                status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
                if (status & ATA_BUSY) {
-                       ata_sff_queue_pio_task(ap, ATA_SHORT_PAUSE);
+                       ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
                        return;
                }
        }
 
+       /*
+        * hsm_move() may trigger another command to be processed.
+        * clean the link beforehand.
+        */
+       ap->sff_pio_task_link = NULL;
        /* move the HSM */
        poll_next = ata_sff_hsm_move(ap, qc, status, 1);
 
@@ -1376,6 +1395,7 @@ fsm_start:
 unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
 {
        struct ata_port *ap = qc->ap;
+       struct ata_link *link = qc->dev->link;
 
        /* Use polling pio if the LLD doesn't handle
         * interrupt driven pio and atapi CDB interrupt.
@@ -1396,7 +1416,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
                ap->hsm_task_state = HSM_ST_LAST;
 
                if (qc->tf.flags & ATA_TFLAG_POLLING)
-                       ata_sff_queue_pio_task(ap, 0);
+                       ata_sff_queue_pio_task(link, 0);
 
                break;
 
@@ -1409,7 +1429,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
                if (qc->tf.flags & ATA_TFLAG_WRITE) {
                        /* PIO data out protocol */
                        ap->hsm_task_state = HSM_ST_FIRST;
-                       ata_sff_queue_pio_task(ap, 0);
+                       ata_sff_queue_pio_task(link, 0);
 
                        /* always send first data block using the
                         * ata_sff_pio_task() codepath.
@@ -1419,7 +1439,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
                        ap->hsm_task_state = HSM_ST;
 
                        if (qc->tf.flags & ATA_TFLAG_POLLING)
-                               ata_sff_queue_pio_task(ap, 0);
+                               ata_sff_queue_pio_task(link, 0);
 
                        /* if polling, ata_sff_pio_task() handles the
                         * rest.  otherwise, interrupt handler takes
@@ -1441,7 +1461,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
                /* send cdb by polling if no cdb interrupt */
                if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
                    (qc->tf.flags & ATA_TFLAG_POLLING))
-                       ata_sff_queue_pio_task(ap, 0);
+                       ata_sff_queue_pio_task(link, 0);
                break;
 
        default:
@@ -2734,6 +2754,7 @@ EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
 unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
 {
        struct ata_port *ap = qc->ap;
+       struct ata_link *link = qc->dev->link;
 
        /* defer PIO handling to sff_qc_issue */
        if (!ata_is_dma(qc->tf.protocol))
@@ -2762,7 +2783,7 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
 
                /* send cdb by polling if no cdb interrupt */
                if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
-                       ata_sff_queue_pio_task(ap, 0);
+                       ata_sff_queue_pio_task(link, 0);
                break;
 
        default:
index ba43f0f8c880a21aa1b8b1d3b9ae3d8c3487d415..2215632e4b317684a818b0ee5cbb7037fe064079 100644 (file)
@@ -74,7 +74,8 @@ static int artop6260_pre_reset(struct ata_link *link, unsigned long deadline)
        struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 
        /* Odd numbered device ids are the units with enable bits (the -R cards) */
-       if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
+       if ((pdev->device & 1) &&
+           !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
                return -ENOENT;
 
        return ata_sff_prereset(link, deadline);
index 5e659885de162e7ef3cf38e3e1f8a67098108712..ac8d7d97e4085d4122b33fbd58bde6951f3ef2e7 100644 (file)
@@ -417,6 +417,8 @@ static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
                        tf->lbam,
                        tf->lbah);
        }
+
+       ata_wait_idle(ap);
 }
 
 static int via_port_start(struct ata_port *ap)
index 81982594a014b603aa2ebd8b5944ed13049c929b..a9fd9709c2627c7514fe74a8ec9ebc26f09b44f9 100644 (file)
@@ -2284,7 +2284,7 @@ static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
        }
 
        if (qc->tf.flags & ATA_TFLAG_POLLING)
-               ata_sff_queue_pio_task(ap, 0);
+               ata_sff_queue_pio_task(link, 0);
        return 0;
 }
 
index 5419a49ff135121ce608004f40a211a9587b5cf5..276d5a701dc37cfbebc828a88efc824abc163774 100644 (file)
@@ -59,6 +59,7 @@ void device_pm_init(struct device *dev)
 {
        dev->power.status = DPM_ON;
        init_completion(&dev->power.completion);
+       complete_all(&dev->power.completion);
        dev->power.wakeup_count = 0;
        pm_runtime_init(dev);
 }
index 31064df1370a96320f548d1ce5f191eaf2e97051..5e4fadcdece979b76571462210f60195a6531f27 100644 (file)
@@ -297,6 +297,8 @@ static void enqueue_cmd_and_start_io(ctlr_info_t *h,
        spin_lock_irqsave(&h->lock, flags);
        addQ(&h->reqQ, c);
        h->Qdepth++;
+       if (h->Qdepth > h->maxQsinceinit)
+               h->maxQsinceinit = h->Qdepth;
        start_io(h);
        spin_unlock_irqrestore(&h->lock, flags);
 }
@@ -4519,6 +4521,12 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
        misc_fw_support = readl(&cfgtable->misc_fw_support);
        use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
 
+       /* The doorbell reset seems to cause lockups on some Smart
+        * Arrays (e.g. P410, P410i, maybe others).  Until this is
+        * fixed or at least isolated, avoid the doorbell reset.
+        */
+       use_doorbell = 0;
+
        rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
        if (rc)
                goto unmap_cfgtable;
@@ -4712,6 +4720,9 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
        h->scatter_list = kmalloc(h->max_commands *
                                                sizeof(struct scatterlist *),
                                                GFP_KERNEL);
+       if (!h->scatter_list)
+               goto clean4;
+
        for (k = 0; k < h->nr_cmds; k++) {
                h->scatter_list[k] = kmalloc(sizeof(struct scatterlist) *
                                                        h->maxsgentries,
@@ -4781,7 +4792,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
 clean4:
        kfree(h->cmd_pool_bits);
        /* Free up sg elements */
-       for (k = 0; k < h->nr_cmds; k++)
+       for (k-- ; k >= 0; k--)
                kfree(h->scatter_list[k]);
        kfree(h->scatter_list);
        cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
index f3c636d237187df21879c0e8acc4a4c943e59ad2..91797bbbe702f01afc5770f45618487ab21054da 100644 (file)
@@ -477,7 +477,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
        pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
 
        if (bio_rw(bio) == WRITE) {
-               bool barrier = (bio->bi_rw & REQ_HARDBARRIER);
+               bool barrier = !!(bio->bi_rw & REQ_HARDBARRIER);
                struct file *file = lo->lo_backing_file;
 
                if (barrier) {
index b82c5ce5e9dfaf3bcc1d81b68554de8cdff0493c..76fa3deaee84059d4431f7763981311210f16f06 100644 (file)
@@ -974,8 +974,7 @@ static int mg_probe(struct platform_device *plat_dev)
        host->breq->queuedata = host;
 
        /* mflash is random device, thanx for the noop */
-       elevator_exit(host->breq->elevator);
-       err = elevator_init(host->breq, "noop");
+       err = elevator_change(host->breq, "noop");
        if (err) {
                printk(KERN_ERR "%s:%d (elevator_init) fail\n",
                                __func__, __LINE__);
index b1cbeb59bb7622e61f75bc58f373cadf1e822218..37a2bb5950761fcfa6cd5ea16f30b5c74dc51a1e 100644 (file)
@@ -2369,7 +2369,7 @@ static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
        pkt_shrink_pktlist(pd);
 }
 
-static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor)
+static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
 {
        if (dev_minor >= MAX_WRITERS)
                return NULL;
index eab58db5f91cd9cfd6ffa5302761780ac8b3f373..cd18493c952795317904229197016e40d7c408cd 100644 (file)
@@ -806,6 +806,8 @@ static const struct intel_driver_description {
            "G45/G43", NULL, &intel_i965_driver },
        { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG,
            "B43", NULL, &intel_i965_driver },
+       { PCI_DEVICE_ID_INTEL_B43_1_HB, PCI_DEVICE_ID_INTEL_B43_1_IG,
+           "B43", NULL, &intel_i965_driver },
        { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG,
            "G41", NULL, &intel_i965_driver },
        { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
index ee189c74d345ea98062ddbb914c87cb206691c59..d09b1ab7e8abeac5bbdf0cc8a8dd5f333f9c22a3 100644 (file)
 #define PCI_DEVICE_ID_INTEL_Q33_IG          0x29D2
 #define PCI_DEVICE_ID_INTEL_B43_HB          0x2E40
 #define PCI_DEVICE_ID_INTEL_B43_IG          0x2E42
+#define PCI_DEVICE_ID_INTEL_B43_1_HB        0x2E90
+#define PCI_DEVICE_ID_INTEL_B43_1_IG        0x2E92
 #define PCI_DEVICE_ID_INTEL_GM45_HB         0x2A40
 #define PCI_DEVICE_ID_INTEL_GM45_IG         0x2A42
 #define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB        0x2E00
index 3822b4f49c84a360145a085e329fe0b76df4d47c..7bd7c45b53efc849225f7f0a604c41f2f0a4fded 100644 (file)
@@ -305,6 +305,9 @@ static int num_force_kipmid;
 #ifdef CONFIG_PCI
 static int pci_registered;
 #endif
+#ifdef CONFIG_ACPI
+static int pnp_registered;
+#endif
 #ifdef CONFIG_PPC_OF
 static int of_registered;
 #endif
@@ -2126,7 +2129,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
 {
        struct acpi_device *acpi_dev;
        struct smi_info *info;
-       struct resource *res;
+       struct resource *res, *res_second;
        acpi_handle handle;
        acpi_status status;
        unsigned long long tmp;
@@ -2182,13 +2185,13 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
        info->io.addr_data = res->start;
 
        info->io.regspacing = DEFAULT_REGSPACING;
-       res = pnp_get_resource(dev,
+       res_second = pnp_get_resource(dev,
                               (info->io.addr_type == IPMI_IO_ADDR_SPACE) ?
                                        IORESOURCE_IO : IORESOURCE_MEM,
                               1);
-       if (res) {
-               if (res->start > info->io.addr_data)
-                       info->io.regspacing = res->start - info->io.addr_data;
+       if (res_second) {
+               if (res_second->start > info->io.addr_data)
+                       info->io.regspacing = res_second->start - info->io.addr_data;
        }
        info->io.regsize = DEFAULT_REGSPACING;
        info->io.regshift = 0;
@@ -3359,6 +3362,7 @@ static __devinit int init_ipmi_si(void)
 
 #ifdef CONFIG_ACPI
        pnp_register_driver(&ipmi_pnp_driver);
+       pnp_registered = 1;
 #endif
 
 #ifdef CONFIG_DMI
@@ -3526,7 +3530,8 @@ static __exit void cleanup_ipmi_si(void)
                pci_unregister_driver(&ipmi_pci_driver);
 #endif
 #ifdef CONFIG_ACPI
-       pnp_unregister_driver(&ipmi_pnp_driver);
+       if (pnp_registered)
+               pnp_unregister_driver(&ipmi_pnp_driver);
 #endif
 
 #ifdef CONFIG_PPC_OF
index a398ecdbd758058104e81223f1cbdfe7057e9a4b..1f528fad3516827754270806a4c6afdee505c9d0 100644 (file)
@@ -788,10 +788,11 @@ static const struct file_operations zero_fops = {
 /*
  * capabilities for /dev/zero
  * - permits private mappings, "copies" are taken of the source of zeros
+ * - no writeback happens
  */
 static struct backing_dev_info zero_bdi = {
        .name           = "char/mem",
-       .capabilities   = BDI_CAP_MAP_COPY,
+       .capabilities   = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
 };
 
 static const struct file_operations full_fops = {
index 942a9826bd23ed64b83095fcdeb65ed059c9ce02..c810481a5bc23ae3ca57127729c83e454d681534 100644 (file)
@@ -596,6 +596,10 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
        ssize_t ret;
        bool nonblock;
 
+       /* Userspace could be out to fool us */
+       if (!count)
+               return 0;
+
        port = filp->private_data;
 
        nonblock = filp->f_flags & O_NONBLOCK;
@@ -642,7 +646,7 @@ static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
        poll_wait(filp, &port->waitqueue, wait);
 
        ret = 0;
-       if (port->inbuf)
+       if (!will_read_block(port))
                ret |= POLLIN | POLLRDNORM;
        if (!will_write_block(port))
                ret |= POLLOUT;
index 2bbeaaea46e9b7765ce983374fb125b7dd5422c9..38df8c19e74cc56903d5985cdbee7d52df3dc0d9 100644 (file)
@@ -533,11 +533,14 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
        case KIOCSOUND:
                if (!perm)
                        goto eperm;
-               /* FIXME: This is an old broken API but we need to keep it
-                  supported and somehow separate the historic advertised
-                  tick rate from any real one */
+               /*
+                * The use of PIT_TICK_RATE is historic, it used to be
+                * the platform-dependent CLOCK_TICK_RATE between 2.6.12
+                * and 2.6.36, which was a minor but unfortunate ABI
+                * change.
+                */
                if (arg)
-                       arg = CLOCK_TICK_RATE / arg;
+                       arg = PIT_TICK_RATE / arg;
                kd_mksound(arg, 0);
                break;
 
@@ -553,11 +556,8 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                 */
                ticks = HZ * ((arg >> 16) & 0xffff) / 1000;
                count = ticks ? (arg & 0xffff) : 0;
-               /* FIXME: This is an old broken API but we need to keep it
-                  supported and somehow separate the historic advertised
-                  tick rate from any real one */
                if (count)
-                       count = CLOCK_TICK_RATE / count;
+                       count = PIT_TICK_RATE / count;
                kd_mksound(count, ticks);
                break;
        }
index c2408bbe9c2eed3521f4eb21a86c1e9671774534..f508690eb95859ef80e217f68db827daba606f29 100644 (file)
@@ -80,7 +80,7 @@
  * Limiting Performance Impact
  * ---------------------------
  * C states, especially those with large exit latencies, can have a real
- * noticable impact on workloads, which is not acceptable for most sysadmins,
+ * noticeable impact on workloads, which is not acceptable for most sysadmins,
  * and in addition, less performance has a power price of its own.
  *
  * As a general rule of thumb, menu assumes that the following heuristic
index 86c5ae9fde34d3cf0f3f372a33739ccb4286fdf3..411d5bf50fc43cab437dff34d3d9f25dd9928fe0 100644 (file)
@@ -162,7 +162,7 @@ static int mv_is_err_intr(u32 intr_cause)
 
 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
 {
-       u32 val = (1 << (1 + (chan->idx * 16)));
+       u32 val = ~(1 << (chan->idx * 16));
        dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
        __raw_writel(val, XOR_INTR_CAUSE(chan));
 }
index fb64cf36ba61d0e786ecfeb802f43909ade4f2f2..eb6b54dbb8064a9a5d2e71eb3261132195ff6f8d 100644 (file)
@@ -580,7 +580,6 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
 
        sh_chan = to_sh_chan(chan);
        param = chan->private;
-       slave_addr = param->config->addr;
 
        /* Someone calling slave DMA on a public channel? */
        if (!param || !sg_len) {
@@ -589,6 +588,8 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
                return NULL;
        }
 
+       slave_addr = param->config->addr;
+
        /*
         * if (param != NULL), this is a successfully requested slave channel,
         * therefore param->config != NULL too.
index 3630308e7b811a66f398193eae71cc12f5d7383f..6b21e25f7a84cc99ad6ea710b788745992d1170f 100644 (file)
@@ -339,6 +339,9 @@ static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
 {
        int status;
 
+       if (mci->op_state != OP_RUNNING_POLL)
+               return;
+
        status = cancel_delayed_work(&mci->work);
        if (status == 0) {
                debugf0("%s() not canceled, flush the queue\n",
index e0187d16dd7c53fd240b58d62005e4c17df14bc4..0fd5b85a0f756745bd1074ae673e89d6c81a237a 100644 (file)
@@ -1140,6 +1140,7 @@ static struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = {
        ATTR_COUNTER(0),
        ATTR_COUNTER(1),
        ATTR_COUNTER(2),
+       { .attr = { .name = NULL } }
 };
 
 static struct mcidev_sysfs_group i7core_udimm_counters = {
index be29b0bb247101442a63fa623467e98ad7b77524..1b05896648bce47cff20ed04acbf5d1ee6175a8b 100644 (file)
@@ -263,6 +263,7 @@ static const struct {
        {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, QUIRK_NO_MSI},
        {PCI_VENDOR_ID_NEC,     PCI_ANY_ID,     QUIRK_CYCLE_TIMER},
        {PCI_VENDOR_ID_VIA,     PCI_ANY_ID,     QUIRK_CYCLE_TIMER},
+       {PCI_VENDOR_ID_RICOH,   PCI_ANY_ID,     QUIRK_CYCLE_TIMER},
        {PCI_VENDOR_ID_APPLE,   PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS},
 };
 
index b42f42ca70c3c9454bb00ff7cf2e8505f74e482d..823559ab0e243610ca8e5ff3b5983aca3d53b7bb 100644 (file)
@@ -459,17 +459,33 @@ static int sx150x_init_io(struct sx150x_chip *chip, u8 base, u16 cfg)
        return err;
 }
 
-static int sx150x_init_hw(struct sx150x_chip *chip,
-                       struct sx150x_platform_data *pdata)
+static int sx150x_reset(struct sx150x_chip *chip)
 {
-       int err = 0;
+       int err;
 
-       err = i2c_smbus_write_word_data(chip->client,
+       err = i2c_smbus_write_byte_data(chip->client,
                                        chip->dev_cfg->reg_reset,
-                                       0x3412);
+                                       0x12);
        if (err < 0)
                return err;
 
+       err = i2c_smbus_write_byte_data(chip->client,
+                                       chip->dev_cfg->reg_reset,
+                                       0x34);
+       return err;
+}
+
+static int sx150x_init_hw(struct sx150x_chip *chip,
+                       struct sx150x_platform_data *pdata)
+{
+       int err = 0;
+
+       if (pdata->reset_during_probe) {
+               err = sx150x_reset(chip);
+               if (err < 0)
+                       return err;
+       }
+
        err = sx150x_i2c_write(chip->client,
                        chip->dev_cfg->reg_misc,
                        0x01);
index 55d03ed050006c3de3a2b7470c4ededeafbc8871..529a0dbe9fc65960e62bc7840320754fb61393e8 100644 (file)
@@ -98,8 +98,8 @@ EXPORT_SYMBOL(drm_buffer_alloc);
  *   user_data: A pointer the data that is copied to the buffer.
  *   size: The Number of bytes to copy.
  */
-extern int drm_buffer_copy_from_user(struct drm_buffer *buf,
-               void __user *user_data, int size)
+int drm_buffer_copy_from_user(struct drm_buffer *buf,
+                             void __user *user_data, int size)
 {
        int nr_pages = size / PAGE_SIZE + 1;
        int idx;
@@ -163,7 +163,7 @@ void *drm_buffer_read_object(struct drm_buffer *buf,
 {
        int idx = drm_buffer_index(buf);
        int page = drm_buffer_page(buf);
-       void *obj = 0;
+       void *obj = NULL;
 
        if (idx + objsize <= PAGE_SIZE) {
                obj = &buf->data[page][idx];
index d2ab01e90a96315fee72015f4a76f194211d4a81..dcbeb98f195a7addf665e0134af9ee799280a279 100644 (file)
@@ -103,8 +103,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
                if (connector->funcs->force)
                        connector->funcs->force(connector);
        } else {
-               connector->status = connector->funcs->detect(connector);
-               drm_helper_hpd_irq_event(dev);
+               connector->status = connector->funcs->detect(connector, true);
+               drm_kms_helper_poll_enable(dev);
        }
 
        if (connector->status == connector_status_disconnected) {
@@ -637,13 +637,13 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
                mode_changed = true;
 
        if (mode_changed) {
-               old_fb = set->crtc->fb;
-               set->crtc->fb = set->fb;
                set->crtc->enabled = (set->mode != NULL);
                if (set->mode != NULL) {
                        DRM_DEBUG_KMS("attempting to set mode from"
                                        " userspace\n");
                        drm_mode_debug_printmodeline(set->mode);
+                       old_fb = set->crtc->fb;
+                       set->crtc->fb = set->fb;
                        if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
                                                      set->x, set->y,
                                                      old_fb)) {
@@ -866,7 +866,7 @@ static void output_poll_execute(struct work_struct *work)
                    !(connector->polled & DRM_CONNECTOR_POLL_HPD))
                        continue;
 
-               status = connector->funcs->detect(connector);
+               status = connector->funcs->detect(connector, false);
                if (old_status != status)
                        changed = true;
        }
index bf92d07510df740d856c173e7dcce292659fe6f7..5663d2719063de9231ca6cc153b63b30422e17aa 100644 (file)
@@ -148,7 +148,7 @@ int drm_gem_object_init(struct drm_device *dev,
                return -ENOMEM;
 
        kref_init(&obj->refcount);
-       kref_init(&obj->handlecount);
+       atomic_set(&obj->handle_count, 0);
        obj->size = size;
 
        atomic_inc(&dev->object_count);
@@ -462,28 +462,6 @@ drm_gem_object_free(struct kref *kref)
 }
 EXPORT_SYMBOL(drm_gem_object_free);
 
-/**
- * Called after the last reference to the object has been lost.
- * Must be called without holding struct_mutex
- *
- * Frees the object
- */
-void
-drm_gem_object_free_unlocked(struct kref *kref)
-{
-       struct drm_gem_object *obj = (struct drm_gem_object *) kref;
-       struct drm_device *dev = obj->dev;
-
-       if (dev->driver->gem_free_object_unlocked != NULL)
-               dev->driver->gem_free_object_unlocked(obj);
-       else if (dev->driver->gem_free_object != NULL) {
-               mutex_lock(&dev->struct_mutex);
-               dev->driver->gem_free_object(obj);
-               mutex_unlock(&dev->struct_mutex);
-       }
-}
-EXPORT_SYMBOL(drm_gem_object_free_unlocked);
-
 static void drm_gem_object_ref_bug(struct kref *list_kref)
 {
        BUG();
@@ -496,12 +474,8 @@ static void drm_gem_object_ref_bug(struct kref *list_kref)
  * called before drm_gem_object_free or we'll be touching
  * freed memory
  */
-void
-drm_gem_object_handle_free(struct kref *kref)
+void drm_gem_object_handle_free(struct drm_gem_object *obj)
 {
-       struct drm_gem_object *obj = container_of(kref,
-                                                 struct drm_gem_object,
-                                                 handlecount);
        struct drm_device *dev = obj->dev;
 
        /* Remove any name for this object */
@@ -528,6 +502,10 @@ void drm_gem_vm_open(struct vm_area_struct *vma)
        struct drm_gem_object *obj = vma->vm_private_data;
 
        drm_gem_object_reference(obj);
+
+       mutex_lock(&obj->dev->struct_mutex);
+       drm_vm_open_locked(vma);
+       mutex_unlock(&obj->dev->struct_mutex);
 }
 EXPORT_SYMBOL(drm_gem_vm_open);
 
@@ -535,7 +513,10 @@ void drm_gem_vm_close(struct vm_area_struct *vma)
 {
        struct drm_gem_object *obj = vma->vm_private_data;
 
-       drm_gem_object_unreference_unlocked(obj);
+       mutex_lock(&obj->dev->struct_mutex);
+       drm_vm_close_locked(vma);
+       drm_gem_object_unreference(obj);
+       mutex_unlock(&obj->dev->struct_mutex);
 }
 EXPORT_SYMBOL(drm_gem_vm_close);
 
index 2ef2c78272434dcb6b32dc17fd96e34dd7a8d959..974e970ce3f81ce014170b90ad1b8adc8a1dd5a9 100644 (file)
@@ -255,7 +255,7 @@ int drm_gem_one_name_info(int id, void *ptr, void *data)
 
        seq_printf(m, "%6d %8zd %7d %8d\n",
                   obj->name, obj->size,
-                  atomic_read(&obj->handlecount.refcount),
+                  atomic_read(&obj->handle_count),
                   atomic_read(&obj->refcount.refcount));
        return 0;
 }
index e20f78b542a756644a29693c8da5927dd72f5a7d..f5bd9e590c801b50b0d3629ddbadd6c54a69923a 100644 (file)
@@ -164,6 +164,8 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
        dev->hose = pdev->sysdata;
 #endif
 
+       mutex_lock(&drm_global_mutex);
+
        if ((ret = drm_fill_in_dev(dev, ent, driver))) {
                printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
                goto err_g2;
@@ -199,6 +201,7 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
                 driver->name, driver->major, driver->minor, driver->patchlevel,
                 driver->date, pci_name(pdev), dev->primary->index);
 
+       mutex_unlock(&drm_global_mutex);
        return 0;
 
 err_g4:
@@ -210,6 +213,7 @@ err_g2:
        pci_disable_device(pdev);
 err_g1:
        kfree(dev);
+       mutex_unlock(&drm_global_mutex);
        return ret;
 }
 EXPORT_SYMBOL(drm_get_pci_dev);
index 460e9a3afa8d4bd43ac752cbf2e1bae3e0f2b832..92d1d0fb7b7581821756ebcdf01089f55037198c 100644 (file)
@@ -53,6 +53,8 @@ int drm_get_platform_dev(struct platform_device *platdev,
        dev->platformdev = platdev;
        dev->dev = &platdev->dev;
 
+       mutex_lock(&drm_global_mutex);
+
        ret = drm_fill_in_dev(dev, NULL, driver);
 
        if (ret) {
@@ -87,6 +89,8 @@ int drm_get_platform_dev(struct platform_device *platdev,
 
        list_add_tail(&dev->driver_item, &driver->device_list);
 
+       mutex_unlock(&drm_global_mutex);
+
        DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
                 driver->name, driver->major, driver->minor, driver->patchlevel,
                 driver->date, dev->primary->index);
@@ -100,6 +104,7 @@ err_g2:
                drm_put_minor(&dev->control);
 err_g1:
        kfree(dev);
+       mutex_unlock(&drm_global_mutex);
        return ret;
 }
 EXPORT_SYMBOL(drm_get_platform_dev);
index 86118a742231b42fd711f18b64deb182a8d60d3a..85da4c40694cc8a99c2b3224e31af548b76e198a 100644 (file)
@@ -159,7 +159,7 @@ static ssize_t status_show(struct device *device,
        struct drm_connector *connector = to_drm_connector(device);
        enum drm_connector_status status;
 
-       status = connector->funcs->detect(connector);
+       status = connector->funcs->detect(connector, true);
        return snprintf(buf, PAGE_SIZE, "%s\n",
                        drm_get_connector_status_name(status));
 }
index fda67468e603b6169393b92bc4922afef8b4d8ce..5df450683aab8649511aaa96aaa759452b022fc0 100644 (file)
@@ -433,15 +433,7 @@ static void drm_vm_open(struct vm_area_struct *vma)
        mutex_unlock(&dev->struct_mutex);
 }
 
-/**
- * \c close method for all virtual memory types.
- *
- * \param vma virtual memory area.
- *
- * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
- * free it.
- */
-static void drm_vm_close(struct vm_area_struct *vma)
+void drm_vm_close_locked(struct vm_area_struct *vma)
 {
        struct drm_file *priv = vma->vm_file->private_data;
        struct drm_device *dev = priv->minor->dev;
@@ -451,7 +443,6 @@ static void drm_vm_close(struct vm_area_struct *vma)
                  vma->vm_start, vma->vm_end - vma->vm_start);
        atomic_dec(&dev->vma_count);
 
-       mutex_lock(&dev->struct_mutex);
        list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
                if (pt->vma == vma) {
                        list_del(&pt->head);
@@ -459,6 +450,23 @@ static void drm_vm_close(struct vm_area_struct *vma)
                        break;
                }
        }
+}
+
+/**
+ * \c close method for all virtual memory types.
+ *
+ * \param vma virtual memory area.
+ *
+ * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
+ * free it.
+ */
+static void drm_vm_close(struct vm_area_struct *vma)
+{
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->minor->dev;
+
+       mutex_lock(&dev->struct_mutex);
+       drm_vm_close_locked(vma);
        mutex_unlock(&dev->struct_mutex);
 }
 
index 61b4caf220fa83bd15815ea0f82b627f2d773727..fb07e73581e84467ab59ebe744e21ff6f712d2ce 100644 (file)
@@ -116,7 +116,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
 static const struct file_operations i810_buffer_fops = {
        .open = drm_open,
        .release = drm_release,
-       .unlocked_ioctl = drm_ioctl,
+       .unlocked_ioctl = i810_ioctl,
        .mmap = i810_mmap_buffers,
        .fasync = drm_fasync,
 };
index 671aa18415ac52d17164e79b4c2a9f287b02da0d..cc92c7e6236fbdffb86078290b5b93dffad2cbad 100644 (file)
@@ -118,7 +118,7 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
 static const struct file_operations i830_buffer_fops = {
        .open = drm_open,
        .release = drm_release,
-       .unlocked_ioctl = drm_ioctl,
+       .unlocked_ioctl = i830_ioctl,
        .mmap = i830_mmap_buffers,
        .fasync = drm_fasync,
 };
index 9d67b485303005771a090ea7a3c1e1c6f8b74e9e..c74e4e8006d4b28e02ae9fb44ee11f491e130775 100644 (file)
@@ -1787,9 +1787,9 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
                }
        }
 
-       div_u64(diff, diff1);
+       diff = div_u64(diff, diff1);
        ret = ((m * diff) + c);
-       div_u64(ret, 10);
+       ret = div_u64(ret, 10);
 
        dev_priv->last_count1 = total_count;
        dev_priv->last_time1 = now;
@@ -1858,7 +1858,7 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv)
 
        /* More magic constants... */
        diff = diff * 1181;
-       div_u64(diff, diffms * 10);
+       diff = div_u64(diff, diffms * 10);
        dev_priv->gfx_power = diff;
 }
 
index 216deb579785eb93e27e2ba57a0556471a13daf2..6dbe14cc4f7474aa57221c46fa59286caf362b9f 100644 (file)
@@ -170,6 +170,7 @@ static const struct pci_device_id pciidlist[] = {           /* aka */
        INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),              /* G45_G */
        INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),              /* G41_G */
        INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),              /* B43_G */
+       INTEL_VGA_DEVICE(0x2e92, &intel_g45_info),              /* B43_G.1 */
        INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
        INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
        INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
index 16fca1d1799a4211474a91e7fc52b605eceafbfc..90b1d6753b9d493d3ed8d2c45153bf2047b54d8f 100644 (file)
@@ -136,14 +136,12 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
                return -ENOMEM;
 
        ret = drm_gem_handle_create(file_priv, obj, &handle);
+       /* drop reference from allocate - handle holds it now */
+       drm_gem_object_unreference_unlocked(obj);
        if (ret) {
-               drm_gem_object_unreference_unlocked(obj);
                return ret;
        }
 
-       /* Sink the floating reference from kref_init(handlecount) */
-       drm_gem_object_handle_unreference_unlocked(obj);
-
        args->handle = handle;
        return 0;
 }
@@ -471,14 +469,17 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
                return -ENOENT;
        obj_priv = to_intel_bo(obj);
 
-       /* Bounds check source.
-        *
-        * XXX: This could use review for overflow issues...
-        */
-       if (args->offset > obj->size || args->size > obj->size ||
-           args->offset + args->size > obj->size) {
-               drm_gem_object_unreference_unlocked(obj);
-               return -EINVAL;
+       /* Bounds check source.  */
+       if (args->offset > obj->size || args->size > obj->size - args->offset) {
+               ret = -EINVAL;
+               goto err;
+       }
+
+       if (!access_ok(VERIFY_WRITE,
+                      (char __user *)(uintptr_t)args->data_ptr,
+                      args->size)) {
+               ret = -EFAULT;
+               goto err;
        }
 
        if (i915_gem_object_needs_bit17_swizzle(obj)) {
@@ -490,8 +491,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
                                                        file_priv);
        }
 
+err:
        drm_gem_object_unreference_unlocked(obj);
-
        return ret;
 }
 
@@ -580,8 +581,6 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
 
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
-       if (!access_ok(VERIFY_READ, user_data, remain))
-               return -EFAULT;
 
 
        mutex_lock(&dev->struct_mutex);
@@ -934,14 +933,17 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                return -ENOENT;
        obj_priv = to_intel_bo(obj);
 
-       /* Bounds check destination.
-        *
-        * XXX: This could use review for overflow issues...
-        */
-       if (args->offset > obj->size || args->size > obj->size ||
-           args->offset + args->size > obj->size) {
-               drm_gem_object_unreference_unlocked(obj);
-               return -EINVAL;
+       /* Bounds check destination. */
+       if (args->offset > obj->size || args->size > obj->size - args->offset) {
+               ret = -EINVAL;
+               goto err;
+       }
+
+       if (!access_ok(VERIFY_READ,
+                      (char __user *)(uintptr_t)args->data_ptr,
+                      args->size)) {
+               ret = -EFAULT;
+               goto err;
        }
 
        /* We can only do the GTT pwrite on untiled buffers, as otherwise
@@ -975,8 +977,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                DRM_INFO("pwrite failed %d\n", ret);
 #endif
 
+err:
        drm_gem_object_unreference_unlocked(obj);
-
        return ret;
 }
 
@@ -2351,14 +2353,21 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
 
        reg->obj = obj;
 
-       if (IS_GEN6(dev))
+       switch (INTEL_INFO(dev)->gen) {
+       case 6:
                sandybridge_write_fence_reg(reg);
-       else if (IS_I965G(dev))
+               break;
+       case 5:
+       case 4:
                i965_write_fence_reg(reg);
-       else if (IS_I9XX(dev))
+               break;
+       case 3:
                i915_write_fence_reg(reg);
-       else
+               break;
+       case 2:
                i830_write_fence_reg(reg);
+               break;
+       }
 
        trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
                        obj_priv->tiling_mode);
@@ -2381,22 +2390,26 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        struct drm_i915_fence_reg *reg =
                &dev_priv->fence_regs[obj_priv->fence_reg];
+       uint32_t fence_reg;
 
-       if (IS_GEN6(dev)) {
+       switch (INTEL_INFO(dev)->gen) {
+       case 6:
                I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
                             (obj_priv->fence_reg * 8), 0);
-       } else if (IS_I965G(dev)) {
+               break;
+       case 5:
+       case 4:
                I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
-       } else {
-               uint32_t fence_reg;
-
-               if (obj_priv->fence_reg < 8)
-                       fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
+               break;
+       case 3:
+               if (obj_priv->fence_reg >= 8)
+                       fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
                else
-                       fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
-                                                      8) * 4;
+       case 2:
+                       fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
 
                I915_WRITE(fence_reg, 0);
+               break;
        }
 
        reg->obj = NULL;
@@ -3247,6 +3260,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
                                  (int) reloc->offset,
                                  reloc->read_domains,
                                  reloc->write_domain);
+                       drm_gem_object_unreference(target_obj);
+                       i915_gem_object_unpin(obj);
                        return -EINVAL;
                }
                if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
index 72cae3cccad8802641d973542ab6c440445b81db..5c428fa3e0b34049e94786184b646a98ee87c06d 100644 (file)
@@ -79,6 +79,7 @@ mark_free(struct drm_i915_gem_object *obj_priv,
           struct list_head *unwind)
 {
        list_add(&obj_priv->evict_list, unwind);
+       drm_gem_object_reference(&obj_priv->base);
        return drm_mm_scan_add_block(obj_priv->gtt_space);
 }
 
@@ -92,7 +93,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct list_head eviction_list, unwind_list;
-       struct drm_i915_gem_object *obj_priv, *tmp_obj_priv;
+       struct drm_i915_gem_object *obj_priv;
        struct list_head *render_iter, *bsd_iter;
        int ret = 0;
 
@@ -165,6 +166,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
        list_for_each_entry(obj_priv, &unwind_list, evict_list) {
                ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
                BUG_ON(ret);
+               drm_gem_object_unreference(&obj_priv->base);
        }
 
        /* We expect the caller to unpin, evict all and try again, or give up.
@@ -173,36 +175,34 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
        return -ENOSPC;
 
 found:
+       /* drm_mm doesn't allow any other other operations while
+        * scanning, therefore store to be evicted objects on a
+        * temporary list. */
        INIT_LIST_HEAD(&eviction_list);
-       list_for_each_entry_safe(obj_priv, tmp_obj_priv,
-                                &unwind_list, evict_list) {
+       while (!list_empty(&unwind_list)) {
+               obj_priv = list_first_entry(&unwind_list,
+                                           struct drm_i915_gem_object,
+                                           evict_list);
                if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
-                       /* drm_mm doesn't allow any other other operations while
-                        * scanning, therefore store to be evicted objects on a
-                        * temporary list. */
                        list_move(&obj_priv->evict_list, &eviction_list);
+                       continue;
                }
+               list_del(&obj_priv->evict_list);
+               drm_gem_object_unreference(&obj_priv->base);
        }
 
        /* Unbinding will emit any required flushes */
-       list_for_each_entry_safe(obj_priv, tmp_obj_priv,
-                                &eviction_list, evict_list) {
-#if WATCH_LRU
-               DRM_INFO("%s: evicting %p\n", __func__, obj);
-#endif
-               ret = i915_gem_object_unbind(&obj_priv->base);
-               if (ret)
-                       return ret;
+       while (!list_empty(&eviction_list)) {
+               obj_priv = list_first_entry(&eviction_list,
+                                           struct drm_i915_gem_object,
+                                           evict_list);
+               if (ret == 0)
+                       ret = i915_gem_object_unbind(&obj_priv->base);
+               list_del(&obj_priv->evict_list);
+               drm_gem_object_unreference(&obj_priv->base);
        }
 
-       /* The just created free hole should be on the top of the free stack
-        * maintained by drm_mm, so this BUG_ON actually executes in O(1).
-        * Furthermore all accessed data has just recently been used, so it
-        * should be really fast, too. */
-       BUG_ON(!drm_mm_search_free(&dev_priv->mm.gtt_space, min_size,
-                                  alignment, 0));
-
-       return 0;
+       return ret;
 }
 
 int
index 59457e83b011aa3bbac119faf6836a66df7affa5..744225ebb4b25d5988fab454441de95d7db94115 100644 (file)
@@ -1350,17 +1350,25 @@ void i915_hangcheck_elapsed(unsigned long data)
                i915_seqno_passed(i915_get_gem_seqno(dev,
                                &dev_priv->render_ring),
                        i915_get_tail_request(dev)->seqno)) {
+               bool missed_wakeup = false;
+
                dev_priv->hangcheck_count = 0;
 
                /* Issue a wake-up to catch stuck h/w. */
-               if (dev_priv->render_ring.waiting_gem_seqno |
-                   dev_priv->bsd_ring.waiting_gem_seqno) {
-                       DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
-                       if (dev_priv->render_ring.waiting_gem_seqno)
-                               DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
-                       if (dev_priv->bsd_ring.waiting_gem_seqno)
-                               DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+               if (dev_priv->render_ring.waiting_gem_seqno &&
+                   waitqueue_active(&dev_priv->render_ring.irq_queue)) {
+                       DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
+                       missed_wakeup = true;
+               }
+
+               if (dev_priv->bsd_ring.waiting_gem_seqno &&
+                   waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
+                       DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+                       missed_wakeup = true;
                }
+
+               if (missed_wakeup)
+                       DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
                return;
        }
 
index d094e91292234507c82ea57cb57947a019318aa6..4f5e15577e89e3e6f7005cd92f9f87ab0636b4eb 100644 (file)
 #define  WM1_LP_SR_EN          (1<<31)
 #define  WM1_LP_LATENCY_SHIFT  24
 #define  WM1_LP_LATENCY_MASK   (0x7f<<24)
+#define  WM1_LP_FBC_LP1_MASK   (0xf<<20)
+#define  WM1_LP_FBC_LP1_SHIFT  20
 #define  WM1_LP_SR_MASK                (0x1ff<<8)
 #define  WM1_LP_SR_SHIFT       8
 #define  WM1_LP_CURSOR_MASK    (0x3f)
+#define WM2_LP_ILK             0x4510c
+#define  WM2_LP_EN             (1<<31)
+#define WM3_LP_ILK             0x45110
+#define  WM3_LP_EN             (1<<31)
+#define WM1S_LP_ILK            0x45120
+#define  WM1S_LP_EN            (1<<31)
 
 /* Memory latency timer register */
 #define MLTR_ILK               0x11222
index 2c6b98f2440eff4fdee79ba3c7c95a5bbdf07c4a..31f08581e93a46dbdc2ca1cfa563faec31e9ffc5 100644 (file)
@@ -789,16 +789,25 @@ int i915_save_state(struct drm_device *dev)
                dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
 
        /* Fences */
-       if (IS_I965G(dev)) {
+       switch (INTEL_INFO(dev)->gen) {
+       case 6:
+               for (i = 0; i < 16; i++)
+                       dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+               break;
+       case 5:
+       case 4:
                for (i = 0; i < 16; i++)
                        dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
-       } else {
-               for (i = 0; i < 8; i++)
-                       dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
-
+               break;
+       case 3:
                if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
                        for (i = 0; i < 8; i++)
                                dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+       case 2:
+               for (i = 0; i < 8; i++)
+                       dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+               break;
+
        }
 
        return 0;
@@ -815,15 +824,24 @@ int i915_restore_state(struct drm_device *dev)
        I915_WRITE(HWS_PGA, dev_priv->saveHWS);
 
        /* Fences */
-       if (IS_I965G(dev)) {
+       switch (INTEL_INFO(dev)->gen) {
+       case 6:
+               for (i = 0; i < 16; i++)
+                       I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
+               break;
+       case 5:
+       case 4:
                for (i = 0; i < 16; i++)
                        I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
-       } else {
-               for (i = 0; i < 8; i++)
-                       I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
+               break;
+       case 3:
+       case 2:
                if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
                        for (i = 0; i < 8; i++)
                                I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
+               for (i = 0; i < 8; i++)
+                       I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
+               break;
        }
 
        i915_restore_display(dev);
index 4b7735196cd5a516eb3bb34b9a843450da3f1fe4..197d4f32585a59b5b336328b470fa038bc922781 100644 (file)
@@ -188,7 +188,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
 
        if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
                     1000, 1))
-               DRM_ERROR("timed out waiting for FORCE_TRIGGER");
+               DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
 
        if (turn_off_dac) {
                I915_WRITE(PCH_ADPA, temp);
@@ -245,7 +245,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
                if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
                              CRT_HOTPLUG_FORCE_DETECT) == 0,
                             1000, 1))
-                       DRM_ERROR("timed out waiting for FORCE_DETECT to go off");
+                       DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
        }
 
        stat = I915_READ(PORT_HOTPLUG_STAT);
@@ -400,7 +400,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
        return status;
 }
 
-static enum drm_connector_status intel_crt_detect(struct drm_connector *connector)
+static enum drm_connector_status
+intel_crt_detect(struct drm_connector *connector, bool force)
 {
        struct drm_device *dev = connector->dev;
        struct drm_encoder *encoder = intel_attached_encoder(connector);
@@ -419,6 +420,9 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
        if (intel_crt_detect_ddc(encoder))
                return connector_status_connected;
 
+       if (!force)
+               return connector->status;
+
        /* for pre-945g platforms use load detect */
        if (encoder->crtc && encoder->crtc->enabled) {
                status = intel_crt_load_detect(encoder->crtc, intel_encoder);
index 40cc5da264a9bdf5520909ba39ca2cb21c1ca5ca..979228594599a28ac7737762679f1c97fd5981bf 100644 (file)
@@ -1013,8 +1013,8 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
                DRM_DEBUG_KMS("vblank wait timed out\n");
 }
 
-/**
- * intel_wait_for_vblank_off - wait for vblank after disabling a pipe
+/*
+ * intel_wait_for_pipe_off - wait for pipe to turn off
  * @dev: drm device
  * @pipe: pipe to wait for
  *
@@ -1022,25 +1022,39 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
  * spinning on the vblank interrupt status bit, since we won't actually
  * see an interrupt when the pipe is disabled.
  *
- * So this function waits for the display line value to settle (it
- * usually ends up stopping at the start of the next frame).
+ * On Gen4 and above:
+ *   wait for the pipe register state bit to turn off
+ *
+ * Otherwise:
+ *   wait for the display line value to settle (it usually
+ *   ends up stopping at the start of the next frame).
+ *  
  */
-void intel_wait_for_vblank_off(struct drm_device *dev, int pipe)
+static void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL);
-       unsigned long timeout = jiffies + msecs_to_jiffies(100);
-       u32 last_line;
-
-       /* Wait for the display line to settle */
-       do {
-               last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK;
-               mdelay(5);
-       } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) &&
-                time_after(timeout, jiffies));
-
-       if (time_after(jiffies, timeout))
-               DRM_DEBUG_KMS("vblank wait timed out\n");
+
+       if (INTEL_INFO(dev)->gen >= 4) {
+               int pipeconf_reg = (pipe == 0 ? PIPEACONF : PIPEBCONF);
+
+               /* Wait for the Pipe State to go off */
+               if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0,
+                            100, 0))
+                       DRM_DEBUG_KMS("pipe_off wait timed out\n");
+       } else {
+               u32 last_line;
+               int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL);
+               unsigned long timeout = jiffies + msecs_to_jiffies(100);
+
+               /* Wait for the display line to settle */
+               do {
+                       last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK;
+                       mdelay(5);
+               } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) &&
+                        time_after(timeout, jiffies));
+               if (time_after(jiffies, timeout))
+                       DRM_DEBUG_KMS("pipe_off wait timed out\n");
+       }
 }
 
 /* Parameters have changed, update FBC info */
@@ -2328,13 +2342,13 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
                        I915_READ(dspbase_reg);
                }
 
-               /* Wait for vblank for the disable to take effect */
-               intel_wait_for_vblank_off(dev, pipe);
-
                /* Don't disable pipe A or pipe A PLLs if needed */
                if (pipeconf_reg == PIPEACONF &&
-                   (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+                   (dev_priv->quirks & QUIRK_PIPEA_FORCE)) {
+                       /* Wait for vblank for the disable to take effect */
+                       intel_wait_for_vblank(dev, pipe);
                        goto skip_pipe_off;
+               }
 
                /* Next, disable display pipes */
                temp = I915_READ(pipeconf_reg);
@@ -2343,8 +2357,8 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
                        I915_READ(pipeconf_reg);
                }
 
-               /* Wait for vblank for the disable to take effect. */
-               intel_wait_for_vblank_off(dev, pipe);
+               /* Wait for the pipe to turn off */
+               intel_wait_for_pipe_off(dev, pipe);
 
                temp = I915_READ(dpll_reg);
                if ((temp & DPLL_VCO_ENABLE) != 0) {
@@ -2463,11 +2477,19 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
                                  struct drm_display_mode *adjusted_mode)
 {
        struct drm_device *dev = crtc->dev;
+
        if (HAS_PCH_SPLIT(dev)) {
                /* FDI link clock is fixed at 2.7G */
                if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
                        return false;
        }
+
+       /* XXX some encoders set the crtcinfo, others don't.
+        * Obviously we need some form of conflict resolution here...
+        */
+       if (adjusted_mode->crtc_htotal == 0)
+               drm_mode_set_crtcinfo(adjusted_mode, 0);
+
        return true;
 }
 
@@ -2767,14 +2789,8 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
        /* Don't promote wm_size to unsigned... */
        if (wm_size > (long)wm->max_wm)
                wm_size = wm->max_wm;
-       if (wm_size <= 0) {
+       if (wm_size <= 0)
                wm_size = wm->default_wm;
-               DRM_ERROR("Insufficient FIFO for plane, expect flickering:"
-                         " entries required = %ld, available = %lu.\n",
-                         entries_required + wm->guard_size,
-                         wm->fifo_size);
-       }
-
        return wm_size;
 }
 
@@ -3388,8 +3404,7 @@ static void ironlake_update_wm(struct drm_device *dev,  int planea_clock,
                reg_value = I915_READ(WM1_LP_ILK);
                reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
                               WM1_LP_CURSOR_MASK);
-               reg_value |= WM1_LP_SR_EN |
-                            (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
+               reg_value |= (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
                             (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
 
                I915_WRITE(WM1_LP_ILK, reg_value);
@@ -5675,6 +5690,9 @@ void intel_init_clock_gating(struct drm_device *dev)
                        I915_WRITE(DISP_ARB_CTL,
                                        (I915_READ(DISP_ARB_CTL) |
                                                DISP_FBC_WM_DIS));
+               I915_WRITE(WM3_LP_ILK, 0);
+               I915_WRITE(WM2_LP_ILK, 0);
+               I915_WRITE(WM1_LP_ILK, 0);
                }
                /*
                 * Based on the document from hardware guys the following bits
@@ -5696,8 +5714,7 @@ void intel_init_clock_gating(struct drm_device *dev)
                                   ILK_DPFC_DIS2 |
                                   ILK_CLK_FBC);
                }
-               if (IS_GEN6(dev))
-                       return;
+               return;
        } else if (IS_G4X(dev)) {
                uint32_t dspclk_gate;
                I915_WRITE(RENCLK_GATE_D1, 0);
@@ -5758,11 +5775,9 @@ void intel_init_clock_gating(struct drm_device *dev)
                                OUT_RING(MI_FLUSH);
                                ADVANCE_LP_RING();
                        }
-               } else {
+               } else
                        DRM_DEBUG_KMS("Failed to allocate render context."
-                                     "Disable RC6\n");
-                       return;
-               }
+                                      "Disable RC6\n");
        }
 
        if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
index 51d142939a26e9abe76fdfe8a90aa94ff0f3b612..9ab8708ac6ba1370cea75680d6a660daa5f9b147 100644 (file)
@@ -1138,18 +1138,14 @@ static bool
 intel_dp_set_link_train(struct intel_dp *intel_dp,
                        uint32_t dp_reg_value,
                        uint8_t dp_train_pat,
-                       uint8_t train_set[4],
-                       bool first)
+                       uint8_t train_set[4])
 {
        struct drm_device *dev = intel_dp->base.enc.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
        int ret;
 
        I915_WRITE(intel_dp->output_reg, dp_reg_value);
        POSTING_READ(intel_dp->output_reg);
-       if (first)
-               intel_wait_for_vblank(dev, intel_crtc->pipe);
 
        intel_dp_aux_native_write_1(intel_dp,
                                    DP_TRAINING_PATTERN_SET,
@@ -1174,10 +1170,15 @@ intel_dp_link_train(struct intel_dp *intel_dp)
        uint8_t voltage;
        bool clock_recovery = false;
        bool channel_eq = false;
-       bool first = true;
        int tries;
        u32 reg;
        uint32_t DP = intel_dp->DP;
+       struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
+
+       /* Enable output, wait for it to become active */
+       I915_WRITE(intel_dp->output_reg, intel_dp->DP);
+       POSTING_READ(intel_dp->output_reg);
+       intel_wait_for_vblank(dev, intel_crtc->pipe);
 
        /* Write the link configuration data */
        intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
@@ -1210,9 +1211,8 @@ intel_dp_link_train(struct intel_dp *intel_dp)
                        reg = DP | DP_LINK_TRAIN_PAT_1;
 
                if (!intel_dp_set_link_train(intel_dp, reg,
-                                            DP_TRAINING_PATTERN_1, train_set, first))
+                                            DP_TRAINING_PATTERN_1, train_set))
                        break;
-               first = false;
                /* Set training pattern 1 */
 
                udelay(100);
@@ -1266,8 +1266,7 @@ intel_dp_link_train(struct intel_dp *intel_dp)
 
                /* channel eq pattern */
                if (!intel_dp_set_link_train(intel_dp, reg,
-                                            DP_TRAINING_PATTERN_2, train_set,
-                                            false))
+                                            DP_TRAINING_PATTERN_2, train_set))
                        break;
 
                udelay(400);
@@ -1386,7 +1385,7 @@ ironlake_dp_detect(struct drm_connector *connector)
  * \return false if DP port is disconnected.
  */
 static enum drm_connector_status
-intel_dp_detect(struct drm_connector *connector)
+intel_dp_detect(struct drm_connector *connector, bool force)
 {
        struct drm_encoder *encoder = intel_attached_encoder(connector);
        struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
index ad312ca6b3e570125732168b3c2f670467264beb..8828b3ac6414eabff93134e34a41ae5c38d1cd34 100644 (file)
@@ -229,7 +229,6 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
                                                    struct drm_crtc *crtc);
 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
-extern void intel_wait_for_vblank_off(struct drm_device *dev, int pipe);
 extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
 extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
 extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
index a399f4b2c1c526cdfd64aa2d476f6b97c3fd69d2..7c9ec1472d46ab3cbb08f6bffc8257af952a64b0 100644 (file)
@@ -221,7 +221,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
  *
  * Unimplemented.
  */
-static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
+static enum drm_connector_status
+intel_dvo_detect(struct drm_connector *connector, bool force)
 {
        struct drm_encoder *encoder = intel_attached_encoder(connector);
        struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
index 7bdc96256bf55b6e87d102377b428a871792be91..56ad9df2ccb58925bdd74a2f3b64bac01360f562 100644 (file)
@@ -237,8 +237,10 @@ int intel_fbdev_destroy(struct drm_device *dev,
        drm_fb_helper_fini(&ifbdev->helper);
 
        drm_framebuffer_cleanup(&ifb->base);
-       if (ifb->obj)
+       if (ifb->obj) {
+               drm_gem_object_handle_unreference(ifb->obj);
                drm_gem_object_unreference(ifb->obj);
+       }
 
        return 0;
 }
index ccd4c97e652492f19d85abf532d56aafcab6968a..926934a482ec085c63256567e27f0309b51b24cf 100644 (file)
@@ -139,7 +139,7 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
 }
 
 static enum drm_connector_status
-intel_hdmi_detect(struct drm_connector *connector)
+intel_hdmi_detect(struct drm_connector *connector, bool force)
 {
        struct drm_encoder *encoder = intel_attached_encoder(connector);
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
index b819c10811470775b19826e407ba75fffd6a9b68..6ec39a86ed06d2bd6e716611f3ab4d384d950636 100644 (file)
@@ -445,7 +445,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
  * connected and closed means disconnected.  We also send hotplug events as
  * needed, using lid status notification from the input layer.
  */
-static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector)
+static enum drm_connector_status
+intel_lvds_detect(struct drm_connector *connector, bool force)
 {
        struct drm_device *dev = connector->dev;
        enum drm_connector_status status = connector_status_connected;
@@ -540,7 +541,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
         * the LID nofication event.
         */
        if (connector)
-               connector->status = connector->funcs->detect(connector);
+               connector->status = connector->funcs->detect(connector,
+                                                            false);
+
        /* Don't force modeset on machines where it causes a GPU lockup */
        if (dmi_check_system(intel_no_modeset_on_lid))
                return NOTIFY_OK;
@@ -875,8 +878,6 @@ void intel_lvds_init(struct drm_device *dev)
 
        intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
        intel_encoder->crtc_mask = (1 << 1);
-       if (IS_I965G(dev))
-               intel_encoder->crtc_mask |= (1 << 0);
        drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
        drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
        connector->display_info.subpixel_order = SubPixelHorizontalRGB;
index e3b7a7ee39cb97b390048c1a5ce9187b94b6e51d..ee73e428a84a800dd8d70a1747457033f7e8da8e 100644 (file)
@@ -1417,7 +1417,7 @@ intel_analog_is_connected(struct drm_device *dev)
        if (!analog_connector)
                return false;
 
-       if (analog_connector->funcs->detect(analog_connector) ==
+       if (analog_connector->funcs->detect(analog_connector, false) ==
                        connector_status_disconnected)
                return false;
 
@@ -1486,7 +1486,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
        return status;
 }
 
-static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector)
+static enum drm_connector_status
+intel_sdvo_detect(struct drm_connector *connector, bool force)
 {
        uint16_t response;
        struct drm_encoder *encoder = intel_attached_encoder(connector);
@@ -2169,8 +2170,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
         return true;
 
 err:
-       intel_sdvo_destroy_enhance_property(connector);
-       kfree(intel_sdvo_connector);
+       intel_sdvo_destroy(connector);
        return false;
 }
 
@@ -2242,8 +2242,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
        return true;
 
 err:
-       intel_sdvo_destroy_enhance_property(connector);
-       kfree(intel_sdvo_connector);
+       intel_sdvo_destroy(connector);
        return false;
 }
 
@@ -2521,11 +2520,10 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
                uint16_t response;
        } enhancements;
 
-       if (!intel_sdvo_get_value(intel_sdvo,
-                                 SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
-                                 &enhancements, sizeof(enhancements)))
-               return false;
-
+       enhancements.response = 0;
+       intel_sdvo_get_value(intel_sdvo,
+                            SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
+                            &enhancements, sizeof(enhancements));
        if (enhancements.response == 0) {
                DRM_DEBUG_KMS("No enhancement is supported\n");
                return true;
index c671f60ce80bac917a61c1c60cdb02e692afcb85..4a117e318a73a0a44c7ae4cc0be3447d11a403da 100644 (file)
@@ -1341,7 +1341,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
  * we have a pipe programmed in order to probe the TV.
  */
 static enum drm_connector_status
-intel_tv_detect(struct drm_connector *connector)
+intel_tv_detect(struct drm_connector *connector, bool force)
 {
        struct drm_display_mode mode;
        struct drm_encoder *encoder = intel_attached_encoder(connector);
@@ -1353,7 +1353,7 @@ intel_tv_detect(struct drm_connector *connector)
 
        if (encoder->crtc && encoder->crtc->enabled) {
                type = intel_tv_detect_type(intel_tv);
-       } else {
+       } else if (force) {
                struct drm_crtc *crtc;
                int dpms_mode;
 
@@ -1364,10 +1364,9 @@ intel_tv_detect(struct drm_connector *connector)
                        intel_release_load_detect_pipe(&intel_tv->base, connector,
                                                       dpms_mode);
                } else
-                       type = -1;
-       }
-
-       intel_tv->type = type;
+                       return connector_status_unknown;
+       } else
+               return connector->status;
 
        if (type < 0)
                return connector_status_disconnected;
index a1473fff06ac2d61bd3f629dcc9527be5975f165..fc737037f751c3690dfb09239e3439df1fa4191c 100644 (file)
@@ -168,7 +168,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
 }
 
 static enum drm_connector_status
-nouveau_connector_detect(struct drm_connector *connector)
+nouveau_connector_detect(struct drm_connector *connector, bool force)
 {
        struct drm_device *dev = connector->dev;
        struct nouveau_connector *nv_connector = nouveau_connector(connector);
@@ -246,7 +246,7 @@ detect_analog:
 }
 
 static enum drm_connector_status
-nouveau_connector_detect_lvds(struct drm_connector *connector)
+nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
 {
        struct drm_device *dev = connector->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -267,7 +267,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector)
 
        /* Try retrieving EDID via DDC */
        if (!dev_priv->vbios.fp_no_ddc) {
-               status = nouveau_connector_detect(connector);
+               status = nouveau_connector_detect(connector, force);
                if (status == connector_status_connected)
                        goto out;
        }
@@ -558,8 +558,10 @@ nouveau_connector_get_modes(struct drm_connector *connector)
        if (nv_encoder->dcb->type == OUTPUT_LVDS &&
            (nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
             dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) {
-               nv_connector->native_mode = drm_mode_create(dev);
-               nouveau_bios_fp_mode(dev, nv_connector->native_mode);
+               struct drm_display_mode mode;
+
+               nouveau_bios_fp_mode(dev, &mode);
+               nv_connector->native_mode = drm_mode_duplicate(dev, &mode);
        }
 
        /* Find the native mode if this is a digital panel, if we didn't
index dbd30b2e43fd13b39aaa59eb0abc4fb0332bd1ee..d2047713dc59318fe94aff82eccf98bd28dd6841 100644 (file)
@@ -352,6 +352,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
 
        if (nouveau_fb->nvbo) {
                nouveau_bo_unmap(nouveau_fb->nvbo);
+               drm_gem_object_handle_unreference_unlocked(nouveau_fb->nvbo->gem);
                drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
                nouveau_fb->nvbo = NULL;
        }
index ead7b8fc53fcbcd473dbdc7a97d893a3e2e9c454..19620a6709f55c00e97efd5d2f816705788420f8 100644 (file)
@@ -167,11 +167,9 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
                goto out;
 
        ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
+       /* drop reference from allocate - handle holds it now */
+       drm_gem_object_unreference_unlocked(nvbo->gem);
 out:
-       drm_gem_object_handle_unreference_unlocked(nvbo->gem);
-
-       if (ret)
-               drm_gem_object_unreference_unlocked(nvbo->gem);
        return ret;
 }
 
index 3ec181ff50cea7a45b22a28a6ed43857b2a841cb..3c9964a8fbad00eb4a8371f94900432521f1367f 100644 (file)
@@ -79,6 +79,7 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
        mutex_lock(&dev->struct_mutex);
        nouveau_bo_unpin(chan->notifier_bo);
        mutex_unlock(&dev->struct_mutex);
+       drm_gem_object_handle_unreference_unlocked(chan->notifier_bo->gem);
        drm_gem_object_unreference_unlocked(chan->notifier_bo->gem);
        drm_mm_takedown(&chan->notifier_heap);
 }
index 1bc72c3190a9dcfe9b8f70f16087a231560d9b08..fe359a239df343437cce0b0c79d2692c559ffb9e 100644 (file)
@@ -4999,7 +4999,7 @@ typedef struct _SW_I2C_IO_DATA_PARAMETERS
 #define SW_I2C_CNTL_WRITE1BIT 6
 
 //==============================VESA definition Portion===============================
-#define VESA_OEM_PRODUCT_REV                               '01.00'
+#define VESA_OEM_PRODUCT_REV                               "01.00"
 #define VESA_MODE_ATTRIBUTE_MODE_SUPPORT            0xBB       //refer to VBE spec p.32, no TTY support
 #define VESA_MODE_WIN_ATTRIBUTE                                                     7
 #define VESA_WIN_SIZE                                                                                       64
index 464a81a1990f6f274d46bd0535283bb315680305..cd0290f946cff51e8aa8c702dda5e650ec5f9af0 100644 (file)
@@ -539,14 +539,15 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                                        pll->algo = PLL_ALGO_LEGACY;
                                        pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
                                }
-                               /* There is some evidence (often anecdotal) that RV515 LVDS
+                               /* There is some evidence (often anecdotal) that RV515/RV620 LVDS
                                 * (on some boards at least) prefers the legacy algo.  I'm not
                                 * sure whether this should handled generically or on a
                                 * case-by-case quirk basis.  Both algos should work fine in the
                                 * majority of cases.
                                 */
                                if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) &&
-                                   (rdev->family == CHIP_RV515)) {
+                                   ((rdev->family == CHIP_RV515) ||
+                                    (rdev->family == CHIP_RV620))) {
                                        /* allow the user to overrride just in case */
                                        if (radeon_new_pll == 1)
                                                pll->algo = PLL_ALGO_NEW;
index b8b7f010b25f8df49e20329932c1735482c03ecf..79082d4398ae156378609bbbbb4e8a9c900124cc 100644 (file)
@@ -1160,14 +1160,25 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                                                                        EVERGREEN_MAX_BACKENDS_MASK));
                        break;
                }
-       } else
-               gb_backend_map =
-                       evergreen_get_tile_pipe_to_backend_map(rdev,
-                                                              rdev->config.evergreen.max_tile_pipes,
-                                                              rdev->config.evergreen.max_backends,
-                                                              ((EVERGREEN_MAX_BACKENDS_MASK <<
-                                                                rdev->config.evergreen.max_backends) &
-                                                               EVERGREEN_MAX_BACKENDS_MASK));
+       } else {
+               switch (rdev->family) {
+               case CHIP_CYPRESS:
+               case CHIP_HEMLOCK:
+                       gb_backend_map = 0x66442200;
+                       break;
+               case CHIP_JUNIPER:
+                       gb_backend_map = 0x00006420;
+                       break;
+               default:
+                       gb_backend_map =
+                               evergreen_get_tile_pipe_to_backend_map(rdev,
+                                                                      rdev->config.evergreen.max_tile_pipes,
+                                                                      rdev->config.evergreen.max_backends,
+                                                                      ((EVERGREEN_MAX_BACKENDS_MASK <<
+                                                                        rdev->config.evergreen.max_backends) &
+                                                                       EVERGREEN_MAX_BACKENDS_MASK));
+               }
+       }
 
        rdev->config.evergreen.tile_config = gb_addr_config;
        WREG32(GB_BACKEND_MAP, gb_backend_map);
index e817a0bb5eb4a71550d0c9f6f8697cd80c31914e..e151f16a8f86d73090ec6a4eb17a3590661868db 100644 (file)
@@ -2020,18 +2020,7 @@ bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *l
                return false;
        }
        elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
-       if (elapsed >= 3000) {
-               /* very likely the improbable case where current
-                * rptr is equal to last recorded, a while ago, rptr
-                * this is more likely a false positive update tracking
-                * information which should force us to be recall at
-                * latter point
-                */
-               lockup->last_cp_rptr = cp->rptr;
-               lockup->last_jiffies = jiffies;
-               return false;
-       }
-       if (elapsed >= 1000) {
+       if (elapsed >= 10000) {
                dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
                return true;
        }
@@ -3308,13 +3297,14 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
        unsigned long size;
        unsigned prim_walk;
        unsigned nverts;
+       unsigned num_cb = track->num_cb;
 
-       for (i = 0; i < track->num_cb; i++) {
+       if (!track->zb_cb_clear && !track->color_channel_mask &&
+           !track->blend_read_enable)
+               num_cb = 0;
+
+       for (i = 0; i < num_cb; i++) {
                if (track->cb[i].robj == NULL) {
-                       if (!(track->zb_cb_clear || track->color_channel_mask ||
-                             track->blend_read_enable)) {
-                               continue;
-                       }
                        DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
                        return -EINVAL;
                }
index afc18d87fdca7409e4c7462fe3a1b03eeaa6d3ca..7a04959ba0eefacad6ab9894cb1d5732bbf4677e 100644 (file)
@@ -2729,7 +2729,7 @@ int r600_ib_test(struct radeon_device *rdev)
        if (i < rdev->usec_timeout) {
                DRM_INFO("ib test succeeded in %u usecs\n", i);
        } else {
-               DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
+               DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
                          scratch, tmp);
                r = -EINVAL;
        }
@@ -3528,7 +3528,8 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
        /* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
         * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
         */
-       if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) {
+       if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
+           rdev->vram_scratch.ptr) {
                void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
                u32 tmp;
 
index d13622ae74e9799d75045cd282d2797071a23d21..9ceb2a1ce7996c85f36b86f4ddf0fa834b091adf 100644 (file)
@@ -1,3 +1,28 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
 #include "drmP.h"
 #include "drm.h"
 #include "radeon_drm.h"
index fdc3b378cbb0d78542987e111fc1693462dc2b1b..f437d36dd98c2f33d195f4fdaaae034c535be6e8 100644 (file)
@@ -1,3 +1,27 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
 
 #ifndef R600_BLIT_SHADERS_H
 #define R600_BLIT_SHADERS_H
index d8864949e387a30d838e59cb5eb6d95a4549b1e3..250a3a918193e9821aa6f5836d463553cf6f903d 100644 (file)
@@ -1170,9 +1170,8 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 i
        /* using get ib will give us the offset into the mipmap bo */
        word0 = radeon_get_ib_value(p, idx + 3) << 8;
        if ((mipmap_size + word0) > radeon_bo_size(mipmap)) {
-               dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
-                       w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));
-               return -EINVAL;
+               /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
+                 w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));*/
        }
        return 0;
 }
index ebae14c4b768b4413990e84c1055782a72590009..68932ba7b8a47d0e7a360be37fb38f16075e4d3b 100644 (file)
@@ -317,6 +317,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
                        *connector_type = DRM_MODE_CONNECTOR_DVID;
        }
 
+       /* MSI K9A2GM V2/V3 board has no HDMI or DVI */
+       if ((dev->pdev->device == 0x796e) &&
+           (dev->pdev->subsystem_vendor == 0x1462) &&
+           (dev->pdev->subsystem_device == 0x7302)) {
+               if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) ||
+                   (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
+                       return false;
+       }
+
        /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
        if ((dev->pdev->device == 0x7941) &&
            (dev->pdev->subsystem_vendor == 0x147b) &&
index bd74e428bd147d0df444cc64858f28acbed0d8ed..a04b7a6ad95f3225b1df2879e45e3ced89304c17 100644 (file)
@@ -1485,6 +1485,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                        /* PowerMac8,1 ? */
                        /* imac g5 isight */
                        rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT;
+               } else if ((rdev->pdev->device == 0x4a48) &&
+                          (rdev->pdev->subsystem_vendor == 0x1002) &&
+                          (rdev->pdev->subsystem_device == 0x4a48)) {
+                       /* Mac X800 */
+                       rdev->mode_info.connector_table = CT_MAC_X800;
                } else
 #endif /* CONFIG_PPC_PMAC */
 #ifdef CONFIG_PPC64
@@ -1961,6 +1966,48 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                                            CONNECTOR_OBJECT_ID_VGA,
                                            &hpd);
                break;
+       case CT_MAC_X800:
+               DRM_INFO("Connector Table: %d (mac x800)\n",
+                        rdev->mode_info.connector_table);
+               /* DVI - primary dac, internal tmds */
+               ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+               hpd.hpd = RADEON_HPD_1; /* ??? */
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_enum(dev,
+                                                                 ATOM_DEVICE_DFP1_SUPPORT,
+                                                                 0),
+                                         ATOM_DEVICE_DFP1_SUPPORT);
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_enum(dev,
+                                                                 ATOM_DEVICE_CRT1_SUPPORT,
+                                                                 1),
+                                         ATOM_DEVICE_CRT1_SUPPORT);
+               radeon_add_legacy_connector(dev, 0,
+                                           ATOM_DEVICE_DFP1_SUPPORT |
+                                           ATOM_DEVICE_CRT1_SUPPORT,
+                                           DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+                                           CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+                                           &hpd);
+               /* DVI - tv dac, dvo */
+               ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+               hpd.hpd = RADEON_HPD_2; /* ??? */
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_enum(dev,
+                                                                 ATOM_DEVICE_DFP2_SUPPORT,
+                                                                 0),
+                                         ATOM_DEVICE_DFP2_SUPPORT);
+               radeon_add_legacy_encoder(dev,
+                                         radeon_get_encoder_enum(dev,
+                                                                 ATOM_DEVICE_CRT2_SUPPORT,
+                                                                 2),
+                                         ATOM_DEVICE_CRT2_SUPPORT);
+               radeon_add_legacy_connector(dev, 1,
+                                           ATOM_DEVICE_DFP2_SUPPORT |
+                                           ATOM_DEVICE_CRT2_SUPPORT,
+                                           DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+                                           CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
+                                           &hpd);
+               break;
        default:
                DRM_INFO("Connector table: %d (invalid)\n",
                         rdev->mode_info.connector_table);
index a9dd7847d96ed673e4548efc343e9112390e7c75..ecc1a8fafbfd3eb3c12c0c4d45b4b091a1bee03b 100644 (file)
@@ -481,7 +481,8 @@ static int radeon_lvds_mode_valid(struct drm_connector *connector,
        return MODE_OK;
 }
 
-static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector)
+static enum drm_connector_status
+radeon_lvds_detect(struct drm_connector *connector, bool force)
 {
        struct radeon_connector *radeon_connector = to_radeon_connector(connector);
        struct drm_encoder *encoder = radeon_best_single_encoder(connector);
@@ -594,7 +595,8 @@ static int radeon_vga_mode_valid(struct drm_connector *connector,
        return MODE_OK;
 }
 
-static enum drm_connector_status radeon_vga_detect(struct drm_connector *connector)
+static enum drm_connector_status
+radeon_vga_detect(struct drm_connector *connector, bool force)
 {
        struct radeon_connector *radeon_connector = to_radeon_connector(connector);
        struct drm_encoder *encoder;
@@ -691,7 +693,8 @@ static int radeon_tv_mode_valid(struct drm_connector *connector,
        return MODE_OK;
 }
 
-static enum drm_connector_status radeon_tv_detect(struct drm_connector *connector)
+static enum drm_connector_status
+radeon_tv_detect(struct drm_connector *connector, bool force)
 {
        struct drm_encoder *encoder;
        struct drm_encoder_helper_funcs *encoder_funcs;
@@ -748,7 +751,8 @@ static int radeon_dvi_get_modes(struct drm_connector *connector)
  * we have to check if this analog encoder is shared with anyone else (TV)
  * if its shared we have to set the other connector to disconnected.
  */
-static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connector)
+static enum drm_connector_status
+radeon_dvi_detect(struct drm_connector *connector, bool force)
 {
        struct radeon_connector *radeon_connector = to_radeon_connector(connector);
        struct drm_encoder *encoder = NULL;
@@ -972,7 +976,8 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
        return ret;
 }
 
-static enum drm_connector_status radeon_dp_detect(struct drm_connector *connector)
+static enum drm_connector_status
+radeon_dp_detect(struct drm_connector *connector, bool force)
 {
        struct radeon_connector *radeon_connector = to_radeon_connector(connector);
        enum drm_connector_status ret = connector_status_disconnected;
index 6dd434ad2429b9d9689ed861d33a2b1c0b09f092..b92d2f2fcbed6a8bd472ce9f4b936aa82f309278 100644 (file)
@@ -349,6 +349,8 @@ static void radeon_print_display_setup(struct drm_device *dev)
                                        DRM_INFO("    DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]);
                                if (devices & ATOM_DEVICE_DFP5_SUPPORT)
                                        DRM_INFO("    DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]);
+                               if (devices & ATOM_DEVICE_DFP6_SUPPORT)
+                                       DRM_INFO("    DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]);
                                if (devices & ATOM_DEVICE_TV1_SUPPORT)
                                        DRM_INFO("    TV1: %s\n", encoder_names[radeon_encoder->encoder_id]);
                                if (devices & ATOM_DEVICE_CV_SUPPORT)
@@ -841,8 +843,9 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
        struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
 
-       if (radeon_fb->obj)
+       if (radeon_fb->obj) {
                drm_gem_object_unreference_unlocked(radeon_fb->obj);
+       }
        drm_framebuffer_cleanup(fb);
        kfree(radeon_fb);
 }
@@ -1140,17 +1143,18 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
                                radeon_crtc->rmx_type = radeon_encoder->rmx_type;
                        else
                                radeon_crtc->rmx_type = RMX_OFF;
-                       src_v = crtc->mode.vdisplay;
-                       dst_v = radeon_crtc->native_mode.vdisplay;
-                       src_h = crtc->mode.hdisplay;
-                       dst_h = radeon_crtc->native_mode.vdisplay;
                        /* copy native mode */
                        memcpy(&radeon_crtc->native_mode,
                               &radeon_encoder->native_mode,
                                sizeof(struct drm_display_mode));
+                       src_v = crtc->mode.vdisplay;
+                       dst_v = radeon_crtc->native_mode.vdisplay;
+                       src_h = crtc->mode.hdisplay;
+                       dst_h = radeon_crtc->native_mode.hdisplay;
 
                        /* fix up for overscan on hdmi */
                        if (ASIC_IS_AVIVO(rdev) &&
+                           (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
                            ((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
                             ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
                              drm_detect_hdmi_monitor(radeon_connector->edid) &&
index c74a8b20d9413e921bc6a03cd92578146a9ee8ef..9cdf6a35bc2c3f4efbd5daab2a9506078308aa31 100644 (file)
@@ -94,8 +94,10 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
        ret = radeon_bo_reserve(rbo, false);
        if (likely(ret == 0)) {
                radeon_bo_kunmap(rbo);
+               radeon_bo_unpin(rbo);
                radeon_bo_unreserve(rbo);
        }
+       drm_gem_object_handle_unreference(gobj);
        drm_gem_object_unreference_unlocked(gobj);
 }
 
@@ -325,8 +327,6 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
 {
        struct fb_info *info;
        struct radeon_framebuffer *rfb = &rfbdev->rfb;
-       struct radeon_bo *rbo;
-       int r;
 
        if (rfbdev->helper.fbdev) {
                info = rfbdev->helper.fbdev;
@@ -338,14 +338,8 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
        }
 
        if (rfb->obj) {
-               rbo = rfb->obj->driver_private;
-               r = radeon_bo_reserve(rbo, false);
-               if (likely(r == 0)) {
-                       radeon_bo_kunmap(rbo);
-                       radeon_bo_unpin(rbo);
-                       radeon_bo_unreserve(rbo);
-               }
-               drm_gem_object_unreference_unlocked(rfb->obj);
+               radeonfb_destroy_pinned_object(rfb->obj);
+               rfb->obj = NULL;
        }
        drm_fb_helper_fini(&rfbdev->helper);
        drm_framebuffer_cleanup(&rfb->base);
index c578f265b24cefc6dce21734783b01c1aed1ce27..d1e595d9172396b8104d19c7a1d0a87d3b14b772 100644 (file)
@@ -201,11 +201,11 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
                return r;
        }
        r = drm_gem_handle_create(filp, gobj, &handle);
+       /* drop reference from allocate - handle holds it now */
+       drm_gem_object_unreference_unlocked(gobj);
        if (r) {
-               drm_gem_object_unreference_unlocked(gobj);
                return r;
        }
-       drm_gem_object_handle_unreference_unlocked(gobj);
        args->handle = handle;
        return 0;
 }
index 5eee3c41d124bf49fbd5dfbc7264fb062699e961..8fbbe1c6ebbda854f7bf9dc9f76ae1c4eefdafc5 100644 (file)
@@ -203,6 +203,10 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
  */
 int radeon_driver_firstopen_kms(struct drm_device *dev)
 {
+       struct radeon_device *rdev = dev->dev_private;
+
+       if (rdev->powered_down)
+               return -EINVAL;
        return 0;
 }
 
index efbe975312dc42342c2add89c417b753b0e92791..17a6602b5885786fecd6dd3231940a7b5d3c4261 100644 (file)
@@ -204,7 +204,7 @@ struct radeon_i2c_chan {
 
 /* mostly for macs, but really any system without connector tables */
 enum radeon_connector_table {
-       CT_NONE,
+       CT_NONE = 0,
        CT_GENERIC,
        CT_IBOOK,
        CT_POWERBOOK_EXTERNAL,
@@ -215,6 +215,7 @@ enum radeon_connector_table {
        CT_IMAC_G5_ISIGHT,
        CT_EMAC,
        CT_RN50_POWER,
+       CT_MAC_X800,
 };
 
 enum radeon_dvo_chip {
index 7cffb3e0423249ec4f78f7c7cbdc72b6df921e50..3451a82adba76c31672ee96f086146f5da1ab12b 100644 (file)
@@ -351,6 +351,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
        INIT_LIST_HEAD(&fbo->lru);
        INIT_LIST_HEAD(&fbo->swap);
        fbo->vm_node = NULL;
+       atomic_set(&fbo->cpu_writers, 0);
 
        fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
        kref_init(&fbo->list_kref);
index ca904799f018a6e3ae23c80933bd1e007aec8f7f..b1e02fffd3ccdebf256d38bb55bed9a37ea1c8d7 100644 (file)
@@ -69,7 +69,7 @@ struct ttm_page_pool {
        spinlock_t              lock;
        bool                    fill_lock;
        struct list_head        list;
-       int                     gfp_flags;
+       gfp_t                   gfp_flags;
        unsigned                npages;
        char                    *name;
        unsigned long           nfrees;
@@ -475,7 +475,7 @@ static void ttm_handle_caching_state_failure(struct list_head *pages,
  * This function is reentrant if caller updates count depending on number of
  * pages returned in pages array.
  */
-static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
+static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
                int ttm_flags, enum ttm_caching_state cstate, unsigned count)
 {
        struct page **caching_array;
@@ -666,7 +666,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
 {
        struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
        struct page *p = NULL;
-       int gfp_flags = GFP_USER;
+       gfp_t gfp_flags = GFP_USER;
        int r;
 
        /* set zero flag for page allocation if required */
@@ -818,7 +818,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
        return 0;
 }
 
-void ttm_page_alloc_fini()
+void ttm_page_alloc_fini(void)
 {
        int i;
 
index 72ec2e2b6e9787196ca1de65f28e4c6a0f090051..a96ed6d9d010b82cfc58ed41ec6240f99d5a9103 100644 (file)
@@ -148,13 +148,16 @@ static struct pci_device_id vmw_pci_id_list[] = {
        {0, 0, 0}
 };
 
-static char *vmw_devname = "vmwgfx";
+static int enable_fbdev;
 
 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
 static void vmw_master_init(struct vmw_master *);
 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
                              void *ptr);
 
+MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
+module_param_named(enable_fbdev, enable_fbdev, int, 0600);
+
 static void vmw_print_capabilities(uint32_t capabilities)
 {
        DRM_INFO("Capabilities:\n");
@@ -192,8 +195,6 @@ static int vmw_request_device(struct vmw_private *dev_priv)
 {
        int ret;
 
-       vmw_kms_save_vga(dev_priv);
-
        ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
        if (unlikely(ret != 0)) {
                DRM_ERROR("Unable to initialize FIFO.\n");
@@ -206,9 +207,35 @@ static int vmw_request_device(struct vmw_private *dev_priv)
 static void vmw_release_device(struct vmw_private *dev_priv)
 {
        vmw_fifo_release(dev_priv, &dev_priv->fifo);
-       vmw_kms_restore_vga(dev_priv);
 }
 
+int vmw_3d_resource_inc(struct vmw_private *dev_priv)
+{
+       int ret = 0;
+
+       mutex_lock(&dev_priv->release_mutex);
+       if (unlikely(dev_priv->num_3d_resources++ == 0)) {
+               ret = vmw_request_device(dev_priv);
+               if (unlikely(ret != 0))
+                       --dev_priv->num_3d_resources;
+       }
+       mutex_unlock(&dev_priv->release_mutex);
+       return ret;
+}
+
+
+void vmw_3d_resource_dec(struct vmw_private *dev_priv)
+{
+       int32_t n3d;
+
+       mutex_lock(&dev_priv->release_mutex);
+       if (unlikely(--dev_priv->num_3d_resources == 0))
+               vmw_release_device(dev_priv);
+       n3d = (int32_t) dev_priv->num_3d_resources;
+       mutex_unlock(&dev_priv->release_mutex);
+
+       BUG_ON(n3d < 0);
+}
 
 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 {
@@ -228,6 +255,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        dev_priv->last_read_sequence = (uint32_t) -100;
        mutex_init(&dev_priv->hw_mutex);
        mutex_init(&dev_priv->cmdbuf_mutex);
+       mutex_init(&dev_priv->release_mutex);
        rwlock_init(&dev_priv->resource_lock);
        idr_init(&dev_priv->context_idr);
        idr_init(&dev_priv->surface_idr);
@@ -244,6 +272,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
        dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
 
+       dev_priv->enable_fb = enable_fbdev;
+
        mutex_lock(&dev_priv->hw_mutex);
 
        vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
@@ -343,17 +373,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 
        dev->dev_private = dev_priv;
 
-       if (!dev->devname)
-               dev->devname = vmw_devname;
-
-       if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
-               ret = drm_irq_install(dev);
-               if (unlikely(ret != 0)) {
-                       DRM_ERROR("Failed installing irq: %d\n", ret);
-                       goto out_no_irq;
-               }
-       }
-
        ret = pci_request_regions(dev->pdev, "vmwgfx probe");
        dev_priv->stealth = (ret != 0);
        if (dev_priv->stealth) {
@@ -369,26 +388,52 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
                        goto out_no_device;
                }
        }
-       ret = vmw_request_device(dev_priv);
+       ret = vmw_kms_init(dev_priv);
        if (unlikely(ret != 0))
-               goto out_no_device;
-       vmw_kms_init(dev_priv);
+               goto out_no_kms;
        vmw_overlay_init(dev_priv);
-       vmw_fb_init(dev_priv);
+       if (dev_priv->enable_fb) {
+               ret = vmw_3d_resource_inc(dev_priv);
+               if (unlikely(ret != 0))
+                       goto out_no_fifo;
+               vmw_kms_save_vga(dev_priv);
+               vmw_fb_init(dev_priv);
+               DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ?
+                        "Detected device 3D availability.\n" :
+                        "Detected no device 3D availability.\n");
+       } else {
+               DRM_INFO("Delayed 3D detection since we're not "
+                        "running the device in SVGA mode yet.\n");
+       }
+
+       if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
+               ret = drm_irq_install(dev);
+               if (unlikely(ret != 0)) {
+                       DRM_ERROR("Failed installing irq: %d\n", ret);
+                       goto out_no_irq;
+               }
+       }
 
        dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
        register_pm_notifier(&dev_priv->pm_nb);
 
-       DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n");
-
        return 0;
 
-out_no_device:
-       if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
-               drm_irq_uninstall(dev_priv->dev);
-       if (dev->devname == vmw_devname)
-               dev->devname = NULL;
 out_no_irq:
+       if (dev_priv->enable_fb) {
+               vmw_fb_close(dev_priv);
+               vmw_kms_restore_vga(dev_priv);
+               vmw_3d_resource_dec(dev_priv);
+       }
+out_no_fifo:
+       vmw_overlay_close(dev_priv);
+       vmw_kms_close(dev_priv);
+out_no_kms:
+       if (dev_priv->stealth)
+               pci_release_region(dev->pdev, 2);
+       else
+               pci_release_regions(dev->pdev);
+out_no_device:
        ttm_object_device_release(&dev_priv->tdev);
 out_err4:
        iounmap(dev_priv->mmio_virt);
@@ -415,19 +460,20 @@ static int vmw_driver_unload(struct drm_device *dev)
 
        unregister_pm_notifier(&dev_priv->pm_nb);
 
-       vmw_fb_close(dev_priv);
+       if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+               drm_irq_uninstall(dev_priv->dev);
+       if (dev_priv->enable_fb) {
+               vmw_fb_close(dev_priv);
+               vmw_kms_restore_vga(dev_priv);
+               vmw_3d_resource_dec(dev_priv);
+       }
        vmw_kms_close(dev_priv);
        vmw_overlay_close(dev_priv);
-       vmw_release_device(dev_priv);
        if (dev_priv->stealth)
                pci_release_region(dev->pdev, 2);
        else
                pci_release_regions(dev->pdev);
 
-       if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
-               drm_irq_uninstall(dev_priv->dev);
-       if (dev->devname == vmw_devname)
-               dev->devname = NULL;
        ttm_object_device_release(&dev_priv->tdev);
        iounmap(dev_priv->mmio_virt);
        drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
@@ -500,7 +546,7 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
                struct drm_ioctl_desc *ioctl =
                    &vmw_ioctls[nr - DRM_COMMAND_BASE];
 
-               if (unlikely(ioctl->cmd != cmd)) {
+               if (unlikely(ioctl->cmd_drv != cmd)) {
                        DRM_ERROR("Invalid command format, ioctl %d\n",
                                  nr - DRM_COMMAND_BASE);
                        return -EINVAL;
@@ -589,6 +635,16 @@ static int vmw_master_set(struct drm_device *dev,
        struct vmw_master *vmaster = vmw_master(file_priv->master);
        int ret = 0;
 
+       if (!dev_priv->enable_fb) {
+               ret = vmw_3d_resource_inc(dev_priv);
+               if (unlikely(ret != 0))
+                       return ret;
+               vmw_kms_save_vga(dev_priv);
+               mutex_lock(&dev_priv->hw_mutex);
+               vmw_write(dev_priv, SVGA_REG_TRACES, 0);
+               mutex_unlock(&dev_priv->hw_mutex);
+       }
+
        if (active) {
                BUG_ON(active != &dev_priv->fbdev_master);
                ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
@@ -617,7 +673,13 @@ static int vmw_master_set(struct drm_device *dev,
        return 0;
 
 out_no_active_lock:
-       vmw_release_device(dev_priv);
+       if (!dev_priv->enable_fb) {
+               mutex_lock(&dev_priv->hw_mutex);
+               vmw_write(dev_priv, SVGA_REG_TRACES, 1);
+               mutex_unlock(&dev_priv->hw_mutex);
+               vmw_kms_restore_vga(dev_priv);
+               vmw_3d_resource_dec(dev_priv);
+       }
        return ret;
 }
 
@@ -645,11 +707,23 @@ static void vmw_master_drop(struct drm_device *dev,
 
        ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
 
+       if (!dev_priv->enable_fb) {
+               ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
+               if (unlikely(ret != 0))
+                       DRM_ERROR("Unable to clean VRAM on master drop.\n");
+               mutex_lock(&dev_priv->hw_mutex);
+               vmw_write(dev_priv, SVGA_REG_TRACES, 1);
+               mutex_unlock(&dev_priv->hw_mutex);
+               vmw_kms_restore_vga(dev_priv);
+               vmw_3d_resource_dec(dev_priv);
+       }
+
        dev_priv->active_master = &dev_priv->fbdev_master;
        ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
        ttm_vt_unlock(&dev_priv->fbdev_master.lock);
 
-       vmw_fb_on(dev_priv);
+       if (dev_priv->enable_fb)
+               vmw_fb_on(dev_priv);
 }
 
 
@@ -722,6 +796,7 @@ static struct drm_driver driver = {
        .irq_postinstall = vmw_irq_postinstall,
        .irq_uninstall = vmw_irq_uninstall,
        .irq_handler = vmw_irq_handler,
+       .get_vblank_counter = vmw_get_vblank_counter,
        .reclaim_buffers_locked = NULL,
        .get_map_ofs = drm_core_get_map_ofs,
        .get_reg_ofs = drm_core_get_reg_ofs,
index 429f917b60bf4b30ecdfd0946f9f1f0978bd5c58..58de6393f611dd79fbbebeab81102dcf0f4abfcb 100644 (file)
@@ -277,6 +277,7 @@ struct vmw_private {
 
        bool stealth;
        bool is_opened;
+       bool enable_fb;
 
        /**
         * Master management.
@@ -285,6 +286,9 @@ struct vmw_private {
        struct vmw_master *active_master;
        struct vmw_master fbdev_master;
        struct notifier_block pm_nb;
+
+       struct mutex release_mutex;
+       uint32_t num_3d_resources;
 };
 
 static inline struct vmw_private *vmw_priv(struct drm_device *dev)
@@ -319,6 +323,9 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv,
        return val;
 }
 
+int vmw_3d_resource_inc(struct vmw_private *dev_priv);
+void vmw_3d_resource_dec(struct vmw_private *dev_priv);
+
 /**
  * GMR utilities - vmwgfx_gmr.c
  */
@@ -511,6 +518,7 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv,
                        unsigned bbp, unsigned depth);
 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
+u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
 
 /**
  * Overlay control - vmwgfx_overlay.c
index 870967a97c15d52eb3f380323e6038d32ed6e76f..409e172f4abfe94502be96502e251b5d6b2e54c9 100644 (file)
@@ -615,6 +615,11 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
        if (unlikely(ret != 0))
                goto err_unlock;
 
+       if (bo->mem.mem_type == TTM_PL_VRAM &&
+           bo->mem.mm_node->start < bo->num_pages)
+               (void) ttm_bo_validate(bo, &vmw_sys_placement, false,
+                                      false, false);
+
        ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
 
        /* Could probably bug on */
index e6a1eb7ea95498f00e65d123adeae79160af8fa8..0fe31766e4cf5f11e6025e5a85f96baa5936408f 100644 (file)
@@ -106,6 +106,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
        mutex_lock(&dev_priv->hw_mutex);
        dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
        dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
+       dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
        vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
 
        min = 4;
@@ -175,6 +176,8 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
                  dev_priv->config_done_state);
        vmw_write(dev_priv, SVGA_REG_ENABLE,
                  dev_priv->enable_state);
+       vmw_write(dev_priv, SVGA_REG_TRACES,
+                 dev_priv->traces_state);
 
        mutex_unlock(&dev_priv->hw_mutex);
        vmw_fence_queue_takedown(&fifo->fence_queue);
index 64d7f47df8683ef49cfbb3c83eb449026949af03..e882ba099f0c33dab30f12f7b9328b3b628ac712 100644 (file)
@@ -898,7 +898,19 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv)
                save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
                save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
                vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+               if (i == 0 && vmw_priv->num_displays == 1 &&
+                   save->width == 0 && save->height == 0) {
+
+                       /*
+                        * It should be fairly safe to assume that these
+                        * values are uninitialized.
+                        */
+
+                       save->width = vmw_priv->vga_width - save->pos_x;
+                       save->height = vmw_priv->vga_height - save->pos_y;
+               }
        }
+
        return 0;
 }
 
@@ -984,3 +996,8 @@ out_unlock:
        ttm_read_unlock(&vmaster->lock);
        return ret;
 }
+
+u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+       return 0;
+}
index 2ff5cf78235f35379cf409b3ec659774124a0757..11cb39e3accbfa9581801095ab0398952d2313f1 100644 (file)
@@ -27,6 +27,8 @@
 
 #include "vmwgfx_kms.h"
 
+#define VMWGFX_LDU_NUM_DU 8
+
 #define vmw_crtc_to_ldu(x) \
        container_of(x, struct vmw_legacy_display_unit, base.crtc)
 #define vmw_encoder_to_ldu(x) \
@@ -335,7 +337,8 @@ static void vmw_ldu_connector_restore(struct drm_connector *connector)
 }
 
 static enum drm_connector_status
-       vmw_ldu_connector_detect(struct drm_connector *connector)
+       vmw_ldu_connector_detect(struct drm_connector *connector,
+                                bool force)
 {
        if (vmw_connector_to_ldu(connector)->pref_active)
                return connector_status_connected;
@@ -516,7 +519,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
 
        drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
                           DRM_MODE_CONNECTOR_LVDS);
-       connector->status = vmw_ldu_connector_detect(connector);
+       connector->status = vmw_ldu_connector_detect(connector, true);
 
        drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
                         DRM_MODE_ENCODER_LVDS);
@@ -535,6 +538,10 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
 
 int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
 {
+       struct drm_device *dev = dev_priv->dev;
+       int i;
+       int ret;
+
        if (dev_priv->ldu_priv) {
                DRM_INFO("ldu system already on\n");
                return -EINVAL;
@@ -552,23 +559,24 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
 
        drm_mode_create_dirty_info_property(dev_priv->dev);
 
-       vmw_ldu_init(dev_priv, 0);
-       /* for old hardware without multimon only enable one display */
        if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
-               vmw_ldu_init(dev_priv, 1);
-               vmw_ldu_init(dev_priv, 2);
-               vmw_ldu_init(dev_priv, 3);
-               vmw_ldu_init(dev_priv, 4);
-               vmw_ldu_init(dev_priv, 5);
-               vmw_ldu_init(dev_priv, 6);
-               vmw_ldu_init(dev_priv, 7);
+               for (i = 0; i < VMWGFX_LDU_NUM_DU; ++i)
+                       vmw_ldu_init(dev_priv, i);
+               ret = drm_vblank_init(dev, VMWGFX_LDU_NUM_DU);
+       } else {
+               /* for old hardware without multimon only enable one display */
+               vmw_ldu_init(dev_priv, 0);
+               ret = drm_vblank_init(dev, 1);
        }
 
-       return 0;
+       return ret;
 }
 
 int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
 {
+       struct drm_device *dev = dev_priv->dev;
+
+       drm_vblank_cleanup(dev);
        if (!dev_priv->ldu_priv)
                return -ENOSYS;
 
@@ -610,7 +618,7 @@ int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num,
                        ldu->pref_height = 600;
                        ldu->pref_active = false;
                }
-               con->status = vmw_ldu_connector_detect(con);
+               con->status = vmw_ldu_connector_detect(con, true);
        }
 
        mutex_unlock(&dev->mode_config.mutex);
index 5f2d5df01e5c370acbc1be99772701626419daf5..c8c40e9979dbd21442a5d87cf6ac4cdcad08cfad 100644 (file)
@@ -211,6 +211,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
        cmd->body.cid = cpu_to_le32(res->id);
 
        vmw_fifo_commit(dev_priv, sizeof(*cmd));
+       vmw_3d_resource_dec(dev_priv);
 }
 
 static int vmw_context_init(struct vmw_private *dev_priv,
@@ -247,6 +248,7 @@ static int vmw_context_init(struct vmw_private *dev_priv,
        cmd->body.cid = cpu_to_le32(res->id);
 
        vmw_fifo_commit(dev_priv, sizeof(*cmd));
+       (void) vmw_3d_resource_inc(dev_priv);
        vmw_resource_activate(res, vmw_hw_context_destroy);
        return 0;
 }
@@ -406,6 +408,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
        cmd->body.sid = cpu_to_le32(res->id);
 
        vmw_fifo_commit(dev_priv, sizeof(*cmd));
+       vmw_3d_resource_dec(dev_priv);
 }
 
 void vmw_surface_res_free(struct vmw_resource *res)
@@ -473,6 +476,7 @@ int vmw_surface_init(struct vmw_private *dev_priv,
        }
 
        vmw_fifo_commit(dev_priv, submit_size);
+       (void) vmw_3d_resource_inc(dev_priv);
        vmw_resource_activate(res, vmw_hw_surface_destroy);
        return 0;
 }
index b87569e96b163c04fb35790ef8c457999480e3f3..f366f968155a3ed913ce770a60ca30cbf2f97981 100644 (file)
@@ -598,7 +598,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
        pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
 }
 
-void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
+static void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
 {
        struct vga_device *vgadev;
        unsigned long flags;
index 0c52899be9643d85af5c7ac6be8c87bc67d6e6ce..3f7292486024b8feace0b72775c03a2ae122e8fc 100644 (file)
@@ -1285,8 +1285,11 @@ static const struct hid_device_id hid_blacklist[] = {
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
        { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
@@ -1578,7 +1581,6 @@ static const struct hid_device_id hid_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) },
        { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT)},
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)},
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)},
        { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
index 85c6d13c9ffa9369fca613eda828133b2b69a3e2..765a4f53eb5cb663fd319d0a71386ae1a9ee0fa5 100644 (file)
 
 #define USB_VENDOR_ID_ASUS             0x0486
 #define USB_DEVICE_ID_ASUS_T91MT       0x0185
+#define USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO   0x0186
 
 #define USB_VENDOR_ID_ASUSTEK          0x0b05
 #define USB_DEVICE_ID_ASUSTEK_LCM      0x1726
 
 #define USB_VENDOR_ID_BTC              0x046e
 #define USB_DEVICE_ID_BTC_EMPREX_REMOTE        0x5578
+#define USB_DEVICE_ID_BTC_EMPREX_REMOTE_2      0x5577
 
 #define USB_VENDOR_ID_CANDO            0x2087
 #define USB_DEVICE_ID_CANDO_MULTI_TOUCH        0x0a01
 
 #define USB_VENDOR_ID_CHICONY          0x04f2
 #define USB_DEVICE_ID_CHICONY_TACTICAL_PAD     0x0418
+#define USB_DEVICE_ID_CHICONY_MULTI_TOUCH      0xb19d
 
 #define USB_VENDOR_ID_CIDC             0x1677
 
 #define USB_VENDOR_ID_UCLOGIC          0x5543
 #define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209    0x0042
 #define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U   0x0003
+#define USB_DEVICE_ID_UCLOGIC_TABLET_KNA5      0x6001
 
 #define USB_VENDOR_ID_VERNIER          0x08f7
 #define USB_DEVICE_ID_VERNIER_LABPRO   0x0001
index e91437c189061cf7c862a74b3b6054f369528af7..ac5421d568f151cd6937f33244740d9f9759fedb 100644 (file)
@@ -239,6 +239,7 @@ static void mosart_remove(struct hid_device *hdev)
 
 static const struct hid_device_id mosart_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, mosart_devices);
index 5771f851f85693a46bd581085e25e09775fbcae5..956ed9ac19d4dee7fc3b259b1453662ec0bd8146 100644 (file)
@@ -64,6 +64,7 @@ static int ts_input_mapping(struct hid_device *hdev, struct hid_input *hi,
 static const struct hid_device_id ts_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
        { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
        { }
 };
index b729c02866798c00ae4d2c77eadba097a79d7f73..599041a7f670a9f105e00da3272d79642aefb78c 100644 (file)
@@ -828,6 +828,7 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co
                }
        } else {
                int skipped_report_id = 0;
+               int report_id = buf[0];
                if (buf[0] == 0x0) {
                        /* Don't send the Report ID */
                        buf++;
@@ -837,7 +838,7 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co
                ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
                        HID_REQ_SET_REPORT,
                        USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
-                       ((report_type + 1) << 8) | *buf,
+                       ((report_type + 1) << 8) | report_id,
                        interface->desc.bInterfaceNumber, buf, count,
                        USB_CTRL_SET_TIMEOUT);
                /* count also the report id, if this was a numbered report. */
@@ -1445,6 +1446,11 @@ static const struct hid_device_id hid_usb_table[] = {
        { }
 };
 
+struct usb_interface *usbhid_find_interface(int minor)
+{
+       return usb_find_interface(&hid_driver, minor);
+}
+
 static struct hid_driver hid_usb_driver = {
        .name = "generic-usb",
        .id_table = hid_usb_table,
index 2643d31476213cd41f5d1b39042e83d5a70a63a4..70da3181c8a0467663c15abd324dd6fbe801ea51 100644 (file)
@@ -33,6 +33,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD },
        { USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD },
        { USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD },
+       { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
        { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
@@ -69,6 +70,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT },
+       { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
        { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
 
@@ -77,6 +79,8 @@ static const struct hid_blacklist {
 
        { USB_VENDOR_ID_PI_ENGINEERING, USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL, HID_QUIRK_HIDINPUT_FORCE },
 
+       { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT },
+
        { 0, 0 }
 };
 
index 0a29c51114aaf0d36c64f6c8c25195db5e2f3747..681e620eb95b166ed43ac48c082960ba0f39e211 100644 (file)
@@ -270,7 +270,7 @@ static int hiddev_open(struct inode *inode, struct file *file)
        struct hiddev *hiddev;
        int res;
 
-       intf = usb_find_interface(&hiddev_driver, iminor(inode));
+       intf = usbhid_find_interface(iminor(inode));
        if (!intf)
                return -ENODEV;
        hid = usb_get_intfdata(intf);
index 693fd3e720df41c8ded8ccf15f9575648de77c96..89d2e847dcc671b65f15484b3df34f6c4a6c288d 100644 (file)
@@ -42,6 +42,7 @@ void usbhid_submit_report
 (struct hid_device *hid, struct hid_report *report, unsigned char dir);
 int usbhid_get_power(struct hid_device *hid);
 void usbhid_put_power(struct hid_device *hid);
+struct usb_interface *usbhid_find_interface(int minor);
 
 /* iofl flags */
 #define HID_CTRL_RUNNING       1
index 4d4d09bdec0a7a7cb043725b2fb94dc1fff23dc7..97499d00615aacbddcf5963bcfa08b5f4733a1bb 100644 (file)
@@ -409,7 +409,7 @@ config SENSORS_CORETEMP
 
 config SENSORS_PKGTEMP
        tristate "Intel processor package temperature sensor"
-       depends on X86 && PCI && EXPERIMENTAL
+       depends on X86 && EXPERIMENTAL
        help
          If you say yes here you get support for the package level temperature
          sensor inside your CPU. Check documentation/driver for details.
index 15c1a9616af33ba13ae8a79823cb7d30729c42fe..0683e6be662cfe28e803cac2b6ddd6501f06ef29 100644 (file)
@@ -79,7 +79,7 @@ struct adm1031_data {
        int chip_type;
        char valid;             /* !=0 if following fields are valid */
        unsigned long last_updated;     /* In jiffies */
-       unsigned int update_rate;       /* In milliseconds */
+       unsigned int update_interval;   /* In milliseconds */
        /* The chan_select_table contains the possible configurations for
         * auto fan control.
         */
@@ -743,23 +743,23 @@ static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 12);
 static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 13);
 static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14);
 
-/* Update Rate */
-static const unsigned int update_rates[] = {
+/* Update Interval */
+static const unsigned int update_intervals[] = {
        16000, 8000, 4000, 2000, 1000, 500, 250, 125,
 };
 
-static ssize_t show_update_rate(struct device *dev,
-                               struct device_attribute *attr, char *buf)
+static ssize_t show_update_interval(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct adm1031_data *data = i2c_get_clientdata(client);
 
-       return sprintf(buf, "%u\n", data->update_rate);
+       return sprintf(buf, "%u\n", data->update_interval);
 }
 
-static ssize_t set_update_rate(struct device *dev,
-                              struct device_attribute *attr,
-                              const char *buf, size_t count)
+static ssize_t set_update_interval(struct device *dev,
+                                  struct device_attribute *attr,
+                                  const char *buf, size_t count)
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct adm1031_data *data = i2c_get_clientdata(client);
@@ -771,12 +771,15 @@ static ssize_t set_update_rate(struct device *dev,
        if (err)
                return err;
 
-       /* find the nearest update rate from the table */
-       for (i = 0; i < ARRAY_SIZE(update_rates) - 1; i++) {
-               if (val >= update_rates[i])
+       /*
+        * Find the nearest update interval from the table.
+        * Use it to determine the matching update rate.
+        */
+       for (i = 0; i < ARRAY_SIZE(update_intervals) - 1; i++) {
+               if (val >= update_intervals[i])
                        break;
        }
-       /* if not found, we point to the last entry (lowest update rate) */
+       /* if not found, we point to the last entry (lowest update interval) */
 
        /* set the new update rate while preserving other settings */
        reg = adm1031_read_value(client, ADM1031_REG_FAN_FILTER);
@@ -785,14 +788,14 @@ static ssize_t set_update_rate(struct device *dev,
        adm1031_write_value(client, ADM1031_REG_FAN_FILTER, reg);
 
        mutex_lock(&data->update_lock);
-       data->update_rate = update_rates[i];
+       data->update_interval = update_intervals[i];
        mutex_unlock(&data->update_lock);
 
        return count;
 }
 
-static DEVICE_ATTR(update_rate, S_IRUGO | S_IWUSR, show_update_rate,
-                  set_update_rate);
+static DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR, show_update_interval,
+                  set_update_interval);
 
 static struct attribute *adm1031_attributes[] = {
        &sensor_dev_attr_fan1_input.dev_attr.attr,
@@ -830,7 +833,7 @@ static struct attribute *adm1031_attributes[] = {
 
        &sensor_dev_attr_auto_fan1_min_pwm.dev_attr.attr,
 
-       &dev_attr_update_rate.attr,
+       &dev_attr_update_interval.attr,
        &dev_attr_alarms.attr,
 
        NULL
@@ -981,7 +984,8 @@ static void adm1031_init_client(struct i2c_client *client)
        mask = ADM1031_UPDATE_RATE_MASK;
        read_val = adm1031_read_value(client, ADM1031_REG_FAN_FILTER);
        i = (read_val & mask) >> ADM1031_UPDATE_RATE_SHIFT;
-       data->update_rate = update_rates[i];
+       /* Save it as update interval */
+       data->update_interval = update_intervals[i];
 }
 
 static struct adm1031_data *adm1031_update_device(struct device *dev)
@@ -993,7 +997,8 @@ static struct adm1031_data *adm1031_update_device(struct device *dev)
 
        mutex_lock(&data->update_lock);
 
-       next_update = data->last_updated + msecs_to_jiffies(data->update_rate);
+       next_update = data->last_updated
+         + msecs_to_jiffies(data->update_interval);
        if (time_after(jiffies, next_update) || !data->valid) {
 
                dev_dbg(&client->dev, "Starting adm1031 update\n");
index de8111114f469ec21567a5781349bb0d7f7cbc98..a23b17a78ace8f42cb114b30f593b9019a24a982 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/pci.h>
 #include <asm/msr.h>
 #include <asm/processor.h>
+#include <asm/smp.h>
 
 #define DRVNAME        "coretemp"
 
@@ -423,9 +424,18 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
        int err;
        struct platform_device *pdev;
        struct pdev_entry *pdev_entry;
-#ifdef CONFIG_SMP
        struct cpuinfo_x86 *c = &cpu_data(cpu);
-#endif
+
+       /*
+        * CPUID.06H.EAX[0] indicates whether the CPU has thermal
+        * sensors. We check this bit only, all the early CPUs
+        * without thermal sensors will be filtered out.
+        */
+       if (!cpu_has(c, X86_FEATURE_DTS)) {
+               printk(KERN_INFO DRVNAME ": CPU (model=0x%x)"
+                      " has no thermal sensor.\n", c->x86_model);
+               return 0;
+       }
 
        mutex_lock(&pdev_list_mutex);
 
@@ -482,14 +492,22 @@ exit:
 
 static void coretemp_device_remove(unsigned int cpu)
 {
-       struct pdev_entry *p, *n;
+       struct pdev_entry *p;
+       unsigned int i;
+
        mutex_lock(&pdev_list_mutex);
-       list_for_each_entry_safe(p, n, &pdev_list, list) {
-               if (p->cpu == cpu) {
-                       platform_device_unregister(p->pdev);
-                       list_del(&p->list);
-                       kfree(p);
-               }
+       list_for_each_entry(p, &pdev_list, list) {
+               if (p->cpu != cpu)
+                       continue;
+
+               platform_device_unregister(p->pdev);
+               list_del(&p->list);
+               mutex_unlock(&pdev_list_mutex);
+               kfree(p);
+               for_each_cpu(i, cpu_sibling_mask(cpu))
+                       if (i != cpu && !coretemp_device_add(i))
+                               break;
+               return;
        }
        mutex_unlock(&pdev_list_mutex);
 }
@@ -527,30 +545,21 @@ static int __init coretemp_init(void)
        if (err)
                goto exit;
 
-       for_each_online_cpu(i) {
-               struct cpuinfo_x86 *c = &cpu_data(i);
-               /*
-                * CPUID.06H.EAX[0] indicates whether the CPU has thermal
-                * sensors. We check this bit only, all the early CPUs
-                * without thermal sensors will be filtered out.
-                */
-               if (c->cpuid_level >= 6 && (cpuid_eax(0x06) & 0x01))
-                       coretemp_device_add(i);
-               else {
-                       printk(KERN_INFO DRVNAME ": CPU (model=0x%x)"
-                               " has no thermal sensor.\n", c->x86_model);
-               }
-       }
+       for_each_online_cpu(i)
+               coretemp_device_add(i);
+
+#ifndef CONFIG_HOTPLUG_CPU
        if (list_empty(&pdev_list)) {
                err = -ENODEV;
                goto exit_driver_unreg;
        }
+#endif
 
        register_hotcpu_notifier(&coretemp_cpu_notifier);
        return 0;
 
-exit_driver_unreg:
 #ifndef CONFIG_HOTPLUG_CPU
+exit_driver_unreg:
        platform_driver_unregister(&coretemp_driver);
 #endif
 exit:
index 5b58b20dead1fa92b79af5c05036d45de8bf88ca..8dee3f38fdfb27e1c80a9fb36add60e414aeb805 100644 (file)
@@ -308,7 +308,6 @@ static int emc1403_probe(struct i2c_client *client,
        res = sysfs_create_group(&client->dev.kobj, &m_thermal_gr);
        if (res) {
                dev_warn(&client->dev, "create group failed\n");
-               hwmon_device_unregister(data->hwmon_dev);
                goto thermal_error1;
        }
        data->hwmon_dev = hwmon_device_register(&client->dev);
index 537841ef44b99d179318f7510dbf28dddedb0ed8..75afb3b0e0763c184a1b22cdc163ef32d10d2969 100644 (file)
@@ -111,7 +111,7 @@ static struct platform_device *f71882fg_pdev;
 /* Super-I/O Function prototypes */
 static inline int superio_inb(int base, int reg);
 static inline int superio_inw(int base, int reg);
-static inline void superio_enter(int base);
+static inline int superio_enter(int base);
 static inline void superio_select(int base, int ld);
 static inline void superio_exit(int base);
 
@@ -861,11 +861,20 @@ static int superio_inw(int base, int reg)
        return val;
 }
 
-static inline void superio_enter(int base)
+static inline int superio_enter(int base)
 {
+       /* Don't step on other drivers' I/O space by accident */
+       if (!request_muxed_region(base, 2, DRVNAME)) {
+               printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n",
+                               base);
+               return -EBUSY;
+       }
+
        /* according to the datasheet the key must be send twice! */
        outb(SIO_UNLOCK_KEY, base);
        outb(SIO_UNLOCK_KEY, base);
+
+       return 0;
 }
 
 static inline void superio_select(int base, int ld)
@@ -877,6 +886,7 @@ static inline void superio_select(int base, int ld)
 static inline void superio_exit(int base)
 {
        outb(SIO_LOCK_KEY, base);
+       release_region(base, 2);
 }
 
 static inline int fan_from_reg(u16 reg)
@@ -2175,21 +2185,15 @@ static int f71882fg_remove(struct platform_device *pdev)
 static int __init f71882fg_find(int sioaddr, unsigned short *address,
        struct f71882fg_sio_data *sio_data)
 {
-       int err = -ENODEV;
        u16 devid;
-
-       /* Don't step on other drivers' I/O space by accident */
-       if (!request_region(sioaddr, 2, DRVNAME)) {
-               printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n",
-                               (int)sioaddr);
-               return -EBUSY;
-       }
-
-       superio_enter(sioaddr);
+       int err = superio_enter(sioaddr);
+       if (err)
+               return err;
 
        devid = superio_inw(sioaddr, SIO_REG_MANID);
        if (devid != SIO_FINTEK_ID) {
                pr_debug(DRVNAME ": Not a Fintek device\n");
+               err = -ENODEV;
                goto exit;
        }
 
@@ -2213,6 +2217,7 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
        default:
                printk(KERN_INFO DRVNAME ": Unsupported Fintek device: %04x\n",
                       (unsigned int)devid);
+               err = -ENODEV;
                goto exit;
        }
 
@@ -2223,12 +2228,14 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
 
        if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) {
                printk(KERN_WARNING DRVNAME ": Device not activated\n");
+               err = -ENODEV;
                goto exit;
        }
 
        *address = superio_inw(sioaddr, SIO_REG_ADDR);
        if (*address == 0) {
                printk(KERN_WARNING DRVNAME ": Base address not set\n");
+               err = -ENODEV;
                goto exit;
        }
        *address &= ~(REGION_LENGTH - 1);       /* Ignore 3 LSB */
@@ -2239,7 +2246,6 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
                (int)superio_inb(sioaddr, SIO_REG_DEVREV));
 exit:
        superio_exit(sioaddr);
-       release_region(sioaddr, 2);
        return err;
 }
 
index 0f58ecc5334d941cb4114a3681e2e5a44893280e..9638d58f99fdb0e56f15832f824dc6096d99d3ea 100644 (file)
@@ -79,7 +79,7 @@ enum chips { f75373, f75375 };
 #define F75375_REG_PWM2_DROP_DUTY      0x6C
 
 #define FAN_CTRL_LINEAR(nr)            (4 + nr)
-#define FAN_CTRL_MODE(nr)              (5 + ((nr) * 2))
+#define FAN_CTRL_MODE(nr)              (4 + ((nr) * 2))
 
 /*
  * Data structures and manipulation thereof
@@ -298,7 +298,7 @@ static int set_pwm_enable_direct(struct i2c_client *client, int nr, int val)
                return -EINVAL;
 
        fanmode = f75375_read8(client, F75375_REG_FAN_TIMER);
-       fanmode = ~(3 << FAN_CTRL_MODE(nr));
+       fanmode &= ~(3 << FAN_CTRL_MODE(nr));
 
        switch (val) {
        case 0: /* Full speed */
@@ -350,7 +350,7 @@ static ssize_t set_pwm_mode(struct device *dev, struct device_attribute *attr,
 
        mutex_lock(&data->update_lock);
        conf = f75375_read8(client, F75375_REG_CONFIG1);
-       conf = ~(1 << FAN_CTRL_LINEAR(nr));
+       conf &= ~(1 << FAN_CTRL_LINEAR(nr));
 
        if (val == 0)
                conf |= (1 << FAN_CTRL_LINEAR(nr)) ;
index 7580f55e67e3cf1560437b428d9fb1e5b8e411d0..36e95753223059ab0e1b5ed8490fe7364b39673e 100644 (file)
@@ -221,6 +221,8 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
        AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
        AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
        AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
+       AXIS_DMI_MATCH("HPB532x", "HP ProBook 532", y_inverted),
+       AXIS_DMI_MATCH("Mini5102", "HP Mini 5102", xy_rotated_left_usd),
        { NULL, }
 /* Laptop models without axis info (yet):
  * "NC6910" "HP Compaq 6910"
index 6138f036b159956dbc4eec8282636db794485527..fc591ae53107da8481a2ab5e02f53117ac6e2471 100644 (file)
@@ -277,7 +277,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
        wake_up_interruptible(&lis3_dev.misc_wait);
        kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
 out:
-       if (lis3_dev.whoami == WAI_8B && lis3_dev.idev &&
+       if (lis3_dev.pdata && lis3_dev.whoami == WAI_8B && lis3_dev.idev &&
            lis3_dev.idev->input->users)
                return IRQ_WAKE_THREAD;
        return IRQ_HANDLED;
@@ -718,7 +718,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
         * io-apic is not configurable (and generates a warning) but I keep it
         * in case of support for other hardware.
         */
-       if (dev->whoami == WAI_8B)
+       if (dev->pdata && dev->whoami == WAI_8B)
                thread_fn = lis302dl_interrupt_thread1_8b;
        else
                thread_fn = NULL;
index dc1f5402c1d7ddcdaf4396fe177a689550b9ac2b..8e5933b72d1956c2a931ea7176b1c589e3066968 100644 (file)
@@ -121,7 +121,7 @@ static int lis3lv02d_i2c_suspend(struct i2c_client *client, pm_message_t mesg)
 {
        struct lis3lv02d *lis3 = i2c_get_clientdata(client);
 
-       if (!lis3->pdata->wakeup_flags)
+       if (!lis3->pdata || !lis3->pdata->wakeup_flags)
                lis3lv02d_poweroff(lis3);
        return 0;
 }
@@ -130,7 +130,7 @@ static int lis3lv02d_i2c_resume(struct i2c_client *client)
 {
        struct lis3lv02d *lis3 = i2c_get_clientdata(client);
 
-       if (!lis3->pdata->wakeup_flags)
+       if (!lis3->pdata || !lis3->pdata->wakeup_flags)
                lis3lv02d_poweron(lis3);
        return 0;
 }
index 82b16808a274c1e0045692fcfc32dcfdb641ec31..b9be5e3a22b3825bc640d9c038c8c724e888c5b4 100644 (file)
@@ -92,7 +92,7 @@ static int lis3lv02d_spi_suspend(struct spi_device *spi, pm_message_t mesg)
 {
        struct lis3lv02d *lis3 = spi_get_drvdata(spi);
 
-       if (!lis3->pdata->wakeup_flags)
+       if (!lis3->pdata || !lis3->pdata->wakeup_flags)
                lis3lv02d_poweroff(&lis3_dev);
 
        return 0;
@@ -102,7 +102,7 @@ static int lis3lv02d_spi_resume(struct spi_device *spi)
 {
        struct lis3lv02d *lis3 = spi_get_drvdata(spi);
 
-       if (!lis3->pdata->wakeup_flags)
+       if (!lis3->pdata || !lis3->pdata->wakeup_flags)
                lis3lv02d_poweron(lis3);
 
        return 0;
index 94741d42112da02ca902b2e87beacbe96aab3cd3..464340f25496402dd15a69e2d59281544e267c37 100644 (file)
@@ -91,7 +91,7 @@ static struct lm95241_data *lm95241_update_device(struct device *dev);
 struct lm95241_data {
        struct device *hwmon_dev;
        struct mutex update_lock;
-       unsigned long last_updated, rate; /* in jiffies */
+       unsigned long last_updated, interval; /* in jiffies */
        char valid; /* zero until following fields are valid */
        /* registers values */
        u8 local_h, local_l; /* local */
@@ -114,23 +114,23 @@ show_temp(local);
 show_temp(remote1);
 show_temp(remote2);
 
-static ssize_t show_rate(struct device *dev, struct device_attribute *attr,
+static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
                         char *buf)
 {
        struct lm95241_data *data = lm95241_update_device(dev);
 
-       snprintf(buf, PAGE_SIZE - 1, "%lu\n", 1000 * data->rate / HZ);
+       snprintf(buf, PAGE_SIZE - 1, "%lu\n", 1000 * data->interval / HZ);
        return strlen(buf);
 }
 
-static ssize_t set_rate(struct device *dev, struct device_attribute *attr,
+static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
                        const char *buf, size_t count)
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct lm95241_data *data = i2c_get_clientdata(client);
 
-       strict_strtol(buf, 10, &data->rate);
-       data->rate = data->rate * HZ / 1000;
+       strict_strtol(buf, 10, &data->interval);
+       data->interval = data->interval * HZ / 1000;
 
        return count;
 }
@@ -286,7 +286,8 @@ static DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_min1, set_min1);
 static DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_min2, set_min2);
 static DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_max1, set_max1);
 static DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_max2, set_max2);
-static DEVICE_ATTR(rate, S_IWUSR | S_IRUGO, show_rate, set_rate);
+static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_interval,
+                  set_interval);
 
 static struct attribute *lm95241_attributes[] = {
        &dev_attr_temp1_input.attr,
@@ -298,7 +299,7 @@ static struct attribute *lm95241_attributes[] = {
        &dev_attr_temp3_min.attr,
        &dev_attr_temp2_max.attr,
        &dev_attr_temp3_max.attr,
-       &dev_attr_rate.attr,
+       &dev_attr_update_interval.attr,
        NULL
 };
 
@@ -376,7 +377,7 @@ static void lm95241_init_client(struct i2c_client *client)
 {
        struct lm95241_data *data = i2c_get_clientdata(client);
 
-       data->rate = HZ;    /* 1 sec default */
+       data->interval = HZ;    /* 1 sec default */
        data->valid = 0;
        data->config = CFG_CR0076;
        data->model = 0;
@@ -410,7 +411,7 @@ static struct lm95241_data *lm95241_update_device(struct device *dev)
 
        mutex_lock(&data->update_lock);
 
-       if (time_after(jiffies, data->last_updated + data->rate) ||
+       if (time_after(jiffies, data->last_updated + data->interval) ||
            !data->valid) {
                dev_dbg(&client->dev, "Updating lm95241 data.\n");
                data->local_h =
index 74157fcda6edf4bc569469db4ae1de9efa5b882d..f11903936c8b3a51c3f8dddcb20318185952fa79 100644 (file)
@@ -33,7 +33,6 @@
 #include <linux/list.h>
 #include <linux/platform_device.h>
 #include <linux/cpu.h>
-#include <linux/pci.h>
 #include <asm/msr.h>
 #include <asm/processor.h>
 
@@ -224,7 +223,7 @@ static int __devinit pkgtemp_probe(struct platform_device *pdev)
 
        err = sysfs_create_group(&pdev->dev.kobj, &pkgtemp_group);
        if (err)
-               goto exit_free;
+               goto exit_dev;
 
        data->hwmon_dev = hwmon_device_register(&pdev->dev);
        if (IS_ERR(data->hwmon_dev)) {
@@ -238,6 +237,8 @@ static int __devinit pkgtemp_probe(struct platform_device *pdev)
 
 exit_class:
        sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group);
+exit_dev:
+       device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr);
 exit_free:
        kfree(data);
 exit:
@@ -250,6 +251,7 @@ static int __devexit pkgtemp_remove(struct platform_device *pdev)
 
        hwmon_device_unregister(data->hwmon_dev);
        sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group);
+       device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr);
        platform_set_drvdata(pdev, NULL);
        kfree(data);
        return 0;
@@ -281,9 +283,10 @@ static int __cpuinit pkgtemp_device_add(unsigned int cpu)
        int err;
        struct platform_device *pdev;
        struct pdev_entry *pdev_entry;
-#ifdef CONFIG_SMP
        struct cpuinfo_x86 *c = &cpu_data(cpu);
-#endif
+
+       if (!cpu_has(c, X86_FEATURE_PTS))
+               return 0;
 
        mutex_lock(&pdev_list_mutex);
 
@@ -339,17 +342,18 @@ exit:
 #ifdef CONFIG_HOTPLUG_CPU
 static void pkgtemp_device_remove(unsigned int cpu)
 {
-       struct pdev_entry *p, *n;
+       struct pdev_entry *p;
        unsigned int i;
        int err;
 
        mutex_lock(&pdev_list_mutex);
-       list_for_each_entry_safe(p, n, &pdev_list, list) {
+       list_for_each_entry(p, &pdev_list, list) {
                if (p->cpu != cpu)
                        continue;
 
                platform_device_unregister(p->pdev);
                list_del(&p->list);
+               mutex_unlock(&pdev_list_mutex);
                kfree(p);
                for_each_cpu(i, cpu_core_mask(cpu)) {
                        if (i != cpu) {
@@ -358,7 +362,7 @@ static void pkgtemp_device_remove(unsigned int cpu)
                                        break;
                        }
                }
-               break;
+               return;
        }
        mutex_unlock(&pdev_list_mutex);
 }
@@ -399,11 +403,6 @@ static int __init pkgtemp_init(void)
                goto exit;
 
        for_each_online_cpu(i) {
-               struct cpuinfo_x86 *c = &cpu_data(i);
-
-               if (!cpu_has(c, X86_FEATURE_PTS))
-                       continue;
-
                err = pkgtemp_device_add(i);
                if (err)
                        goto exit_devices_unreg;
index e96e69dd36fb4b4faba43ffad13b53815d06bc65..072c58008a633b713e1f68fd3a522572a41842d1 100644 (file)
@@ -127,6 +127,7 @@ superio_enter(int ioreg)
 static inline void
 superio_exit(int ioreg)
 {
+       outb(0xaa, ioreg);
        outb(0x02, ioreg);
        outb(0x02, ioreg + 1);
 }
index 2222c87876b97bc711b6d739fd4a82deef7db330..b8feac5f2ef4f6f4be4d410ec7a5a2ce2a6d4946 100644 (file)
@@ -357,9 +357,6 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
 
        dev->terminate = 0;
 
-       /* write the data into mode register */
-       davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
-
        /*
         * First byte should be set here, not after interrupt,
         * because transmit-data-ready interrupt can come before
@@ -371,6 +368,9 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
                dev->buf_len--;
        }
 
+       /* write the data into mode register; start transmitting */
+       davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
+
        r = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
                                                      dev->adapter.timeout);
        if (r == 0) {
index 0e9f85d0a835718dac97ecd52ff270f327d84136..56dbe54e88118a3fb7b112da16e11ccd5bdbc9fb 100644 (file)
@@ -218,7 +218,7 @@ static int octeon_i2c_wait(struct octeon_i2c *i2c)
                return result;
        } else if (result == 0) {
                dev_dbg(i2c->dev, "%s: timeout\n", __func__);
-               result = -ETIMEDOUT;
+               return -ETIMEDOUT;
        }
 
        return 0;
index 7674efb553786e4e2a639c5a5b263362564b626b..b33c78586bfccf815d9322df1d561b7bec5797b5 100644 (file)
@@ -680,6 +680,8 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
 
        if (r == 0)
                r = num;
+
+       omap_i2c_wait_for_bb(dev);
 out:
        omap_i2c_idle(dev);
        return r;
index 72902e0bbfa79a48caaf2193420d4b8712af1e18..bf831bf8158741a9f857eb541afc1f3a48d38e52 100644 (file)
@@ -662,8 +662,8 @@ static int s3c24xx_i2c_clockrate(struct s3c24xx_i2c *i2c, unsigned int *got)
                unsigned long sda_delay;
 
                if (pdata->sda_delay) {
-                       sda_delay = (freq / 1000) * pdata->sda_delay;
-                       sda_delay /= 1000000;
+                       sda_delay = clkin * pdata->sda_delay;
+                       sda_delay = DIV_ROUND_UP(sda_delay, 1000000);
                        sda_delay = DIV_ROUND_UP(sda_delay, 5);
                        if (sda_delay > 3)
                                sda_delay = 3;
index 4c3d1bfec0c5b450fbbe68014e1c15fe9e29c454..068cef0a987aa672566d986eb7353670d533fdd6 100644 (file)
@@ -1444,14 +1444,6 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
                        ide_acpi_port_init_devices(hwif);
        }
 
-       ide_host_for_each_port(i, hwif, host) {
-               if (hwif == NULL)
-                       continue;
-
-               if (hwif->present)
-                       hwif_register_devices(hwif);
-       }
-
        ide_host_for_each_port(i, hwif, host) {
                if (hwif == NULL)
                        continue;
@@ -1459,8 +1451,10 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
                ide_sysfs_register_port(hwif);
                ide_proc_register_port(hwif);
 
-               if (hwif->present)
+               if (hwif->present) {
                        ide_proc_port_register_devices(hwif);
+                       hwif_register_devices(hwif);
+               }
        }
 
        return j ? 0 : -1;
old mode 100755 (executable)
new mode 100644 (file)
index a10152b..0906fc5
@@ -83,7 +83,7 @@ static unsigned int mwait_substates;
 /* Reliable LAPIC Timer States, bit 1 for C1 etc.  */
 static unsigned int lapic_timer_reliable_states;
 
-static struct cpuidle_device *intel_idle_cpuidle_devices;
+static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
 static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
 
 static struct cpuidle_state *cpuidle_state_table;
@@ -108,7 +108,7 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
                .name = "NHM-C3",
                .desc = "MWAIT 0x10",
                .driver_data = (void *) 0x10,
-               .flags = CPUIDLE_FLAG_TIME_VALID,
+               .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
                .exit_latency = 20,
                .power_usage = 500,
                .target_residency = 80,
@@ -117,7 +117,7 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
                .name = "NHM-C6",
                .desc = "MWAIT 0x20",
                .driver_data = (void *) 0x20,
-               .flags = CPUIDLE_FLAG_TIME_VALID,
+               .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
                .exit_latency = 200,
                .power_usage = 350,
                .target_residency = 800,
@@ -149,7 +149,7 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
                .name = "ATM-C4",
                .desc = "MWAIT 0x30",
                .driver_data = (void *) 0x30,
-               .flags = CPUIDLE_FLAG_TIME_VALID,
+               .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
                .exit_latency = 100,
                .power_usage = 250,
                .target_residency = 400,
@@ -159,7 +159,7 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
                .name = "ATM-C6",
                .desc = "MWAIT 0x40",
                .driver_data = (void *) 0x40,
-               .flags = CPUIDLE_FLAG_TIME_VALID,
+               .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
                .exit_latency = 200,
                .power_usage = 150,
                .target_residency = 800,
@@ -185,6 +185,16 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
 
        local_irq_disable();
 
+       /*
+        * If the state flag indicates that the TLB will be flushed or if this
+        * is the deepest c-state supported, do a voluntary leave mm to avoid
+        * costly and mostly unnecessary wakeups for flushing the user TLB's
+        * associated with the active mm.
+        */
+       if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED ||
+           (&dev->states[dev->state_count - 1] == state))
+               leave_mm(cpu);
+
        if (!(lapic_timer_reliable_states & (1 << (cstate))))
                clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
 
index 8f0caf7d4482079ef45aa9ea3b8a66d6a37500d4..78fbe9ffe7f024f3f4e1ca486bcbeb5976087e27 100644 (file)
@@ -53,7 +53,7 @@
 #define T3_MAX_PBL_SIZE 256
 #define T3_MAX_RQ_SIZE 1024
 #define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1)
-#define T3_MAX_CQ_DEPTH 262144
+#define T3_MAX_CQ_DEPTH 65536
 #define T3_MAX_NUM_STAG (1<<15)
 #define T3_MAX_MR_SIZE 0x100000000ULL
 #define T3_PAGESIZE_MASK 0xffff000  /* 4KB-128MB */
index d88077a219944ec49e2f594de780d0f216f9201f..13c88871dc3b90f564a52b4651aa371a4cd15633 100644 (file)
@@ -463,7 +463,8 @@ static int send_connect(struct iwch_ep *ep)
            V_MSS_IDX(mtu_idx) |
            V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
        opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
-       opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
+       opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
+              V_CONG_CONTROL_FLAVOR(cong_flavor);
        skb->priority = CPL_PRIORITY_SETUP;
        set_arp_failure_handler(skb, act_open_req_arp_failure);
 
@@ -1280,7 +1281,8 @@ static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
            V_MSS_IDX(mtu_idx) |
            V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
        opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
-       opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
+       opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
+              V_CONG_CONTROL_FLAVOR(cong_flavor);
 
        rpl = cplhdr(skb);
        rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
index 443cea55daac5973469cf43fabe2a388abeadb55..61e0efd4ccfb5d9d4d6f6bdc50f765d365c07d0e 100644 (file)
@@ -502,7 +502,9 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
 static void nes_retrans_expired(struct nes_cm_node *cm_node)
 {
        struct iw_cm_id *cm_id = cm_node->cm_id;
-       switch (cm_node->state) {
+       enum nes_cm_node_state state = cm_node->state;
+       cm_node->state = NES_CM_STATE_CLOSED;
+       switch (state) {
        case NES_CM_STATE_SYN_RCVD:
        case NES_CM_STATE_CLOSING:
                rem_ref_cm_node(cm_node->cm_core, cm_node);
@@ -511,7 +513,6 @@ static void nes_retrans_expired(struct nes_cm_node *cm_node)
        case NES_CM_STATE_FIN_WAIT1:
                if (cm_node->cm_id)
                        cm_id->rem_ref(cm_id);
-               cm_node->state = NES_CM_STATE_CLOSED;
                send_reset(cm_node, NULL);
                break;
        default:
@@ -1439,9 +1440,6 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
                break;
        case NES_CM_STATE_MPAREQ_RCVD:
                passive_state = atomic_add_return(1, &cm_node->passive_state);
-               if (passive_state ==  NES_SEND_RESET_EVENT)
-                       create_event(cm_node, NES_CM_EVENT_RESET);
-               cm_node->state = NES_CM_STATE_CLOSED;
                dev_kfree_skb_any(skb);
                break;
        case NES_CM_STATE_ESTABLISHED:
@@ -1456,6 +1454,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
        case NES_CM_STATE_CLOSED:
                drop_packet(skb);
                break;
+       case NES_CM_STATE_FIN_WAIT2:
        case NES_CM_STATE_FIN_WAIT1:
        case NES_CM_STATE_LAST_ACK:
                cm_node->cm_id->rem_ref(cm_node->cm_id);
@@ -2777,6 +2776,12 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                return -EINVAL;
        }
 
+       passive_state = atomic_add_return(1, &cm_node->passive_state);
+       if (passive_state == NES_SEND_RESET_EVENT) {
+               rem_ref_cm_node(cm_node->cm_core, cm_node);
+               return -ECONNRESET;
+       }
+
        /* associate the node with the QP */
        nesqp->cm_node = (void *)cm_node;
        cm_node->nesqp = nesqp;
@@ -2979,9 +2984,6 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                printk(KERN_ERR "%s[%u] OFA CM event_handler returned, "
                        "ret=%d\n", __func__, __LINE__, ret);
 
-       passive_state = atomic_add_return(1, &cm_node->passive_state);
-       if (passive_state == NES_SEND_RESET_EVENT)
-               create_event(cm_node, NES_CM_EVENT_RESET);
        return 0;
 }
 
index f8233c851c694862d76861e515b93183b8d387a5..1980a461c49904e93102e02e655e5b033c5f92be 100644 (file)
@@ -3468,6 +3468,19 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
                                return; /* Ignore it, wait for close complete */
 
                        if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
+                               if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) &&
+                                       (nesqp->ibqp_state == IB_QPS_RTS) &&
+                                       ((nesadapter->eeprom_version >> 16) != NES_A0)) {
+                                       spin_lock_irqsave(&nesqp->lock, flags);
+                                       nesqp->hw_iwarp_state = iwarp_state;
+                                       nesqp->hw_tcp_state = tcp_state;
+                                       nesqp->last_aeq = async_event_id;
+                                       next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
+                                       nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
+                                       spin_unlock_irqrestore(&nesqp->lock, flags);
+                                       nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
+                                       nes_cm_disconn(nesqp);
+                               }
                                nesqp->cm_id->add_ref(nesqp->cm_id);
                                schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp,
                                                NES_TIMER_TYPE_CLOSE, 1, 0);
@@ -3477,7 +3490,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
                                                nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
                                                async_event_id, nesqp->last_aeq, tcp_state);
                        }
-
                        break;
                case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
                        if (nesqp->term_flags) {
index aa9183db32b104aaaa7bfad081c3c969699cec72..1204c3432b6322f23518c42d550746f19d9e4ce6 100644 (file)
@@ -45,6 +45,7 @@
 #define NES_PHY_TYPE_KR               9
 
 #define NES_MULTICAST_PF_MAX 8
+#define NES_A0 3
 
 enum pci_regs {
        NES_INT_STAT = 0x0000,
index 6dfdd49cdbcf36ef5cd68aee3caf46dbdaeb77f0..10560c796fd6c0ffc591601c610579d3e1b6e8ca 100644 (file)
@@ -1446,14 +1446,14 @@ static int nes_netdev_set_pauseparam(struct net_device *netdev,
                                NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
                u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
                nes_write_indexed(nesdev,
-                               NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp);
+                               NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
                nesdev->disable_tx_flow_control = 0;
        } else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) {
                u32temp = nes_read_indexed(nesdev,
                                NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
                u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
                nes_write_indexed(nesdev,
-                               NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp);
+                               NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
                nesdev->disable_tx_flow_control = 1;
        }
        if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) {
index a9b025f4147a0692845d2407b6efbd9220f9837e..ab6982056518e3c086c57738f360574b211108aa 100644 (file)
@@ -1599,11 +1599,14 @@ EXPORT_SYMBOL(input_free_device);
  * @dev: input device supporting MT events and finger tracking
  * @num_slots: number of slots used by the device
  *
- * This function allocates all necessary memory for MT slot handling
- * in the input device, and adds ABS_MT_SLOT to the device capabilities.
+ * This function allocates all necessary memory for MT slot handling in the
+ * input device, and adds ABS_MT_SLOT to the device capabilities. All slots
+ * are initially marked as unused iby setting ABS_MT_TRACKING_ID to -1.
  */
 int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots)
 {
+       int i;
+
        if (!num_slots)
                return 0;
 
@@ -1614,6 +1617,10 @@ int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots)
        dev->mtsize = num_slots;
        input_set_abs_params(dev, ABS_MT_SLOT, 0, num_slots - 1, 0, 0);
 
+       /* Mark slots as 'unused' */
+       for (i = 0; i < num_slots; i++)
+               dev->mt[i].abs[ABS_MT_TRACKING_ID - ABS_MT_FIRST] = -1;
+
        return 0;
 }
 EXPORT_SYMBOL(input_mt_create_slots);
index ea67c49146a3a03280ee8719c362c41d8033c743..b952317639116f2f18a7bc1f41ff5887c17f2a49 100644 (file)
@@ -337,10 +337,14 @@ static void report_finger_data(struct input_dev *input,
                               const struct bcm5974_config *cfg,
                               const struct tp_finger *f)
 {
-       input_report_abs(input, ABS_MT_TOUCH_MAJOR, raw2int(f->force_major));
-       input_report_abs(input, ABS_MT_TOUCH_MINOR, raw2int(f->force_minor));
-       input_report_abs(input, ABS_MT_WIDTH_MAJOR, raw2int(f->size_major));
-       input_report_abs(input, ABS_MT_WIDTH_MINOR, raw2int(f->size_minor));
+       input_report_abs(input, ABS_MT_TOUCH_MAJOR,
+                        raw2int(f->force_major) << 1);
+       input_report_abs(input, ABS_MT_TOUCH_MINOR,
+                        raw2int(f->force_minor) << 1);
+       input_report_abs(input, ABS_MT_WIDTH_MAJOR,
+                        raw2int(f->size_major) << 1);
+       input_report_abs(input, ABS_MT_WIDTH_MINOR,
+                        raw2int(f->size_minor) << 1);
        input_report_abs(input, ABS_MT_ORIENTATION,
                         MAX_FINGER_ORIENTATION - raw2int(f->orientation));
        input_report_abs(input, ABS_MT_POSITION_X, raw2int(f->abs_x));
index 46e4ba0b92463184d5e398345e50ca68809628b2..f585131604806f531f91c48ebb867919339fd80f 100644 (file)
@@ -1485,8 +1485,8 @@ static int __init i8042_init(void)
 
 static void __exit i8042_exit(void)
 {
-       platform_driver_unregister(&i8042_driver);
        platform_device_unregister(i8042_platform_device);
+       platform_driver_unregister(&i8042_driver);
        i8042_platform_exit();
 
        panic_blink = NULL;
index 40d77ba8fdc138ff98b0320b877358a5302d7a28..6e29badb969e44192be85080caf801851c3d06fa 100644 (file)
@@ -243,10 +243,10 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
                        if (features->type == WACOM_G4 ||
                                        features->type == WACOM_MO) {
                                input_report_abs(input, ABS_DISTANCE, data[6] & 0x3f);
-                               rw = (signed)(data[7] & 0x04) - (data[7] & 0x03);
+                               rw = (data[7] & 0x04) - (data[7] & 0x03);
                        } else {
                                input_report_abs(input, ABS_DISTANCE, data[7] & 0x3f);
-                               rw = -(signed)data[6];
+                               rw = -(signed char)data[6];
                        }
                        input_report_rel(input, REL_WHEEL, rw);
                }
index 74dce4ba0262560977ae88d69fbb2fbe7db62009..350eb34f049c97520bb183d97f9f84f8864663b5 100644 (file)
@@ -81,7 +81,7 @@ static int ns2_led_get_mode(struct ns2_led_data *led_dat,
        int cmd_level;
        int slow_level;
 
-       read_lock(&led_dat->rw_lock);
+       read_lock_irq(&led_dat->rw_lock);
 
        cmd_level = gpio_get_value(led_dat->cmd);
        slow_level = gpio_get_value(led_dat->slow);
@@ -95,7 +95,7 @@ static int ns2_led_get_mode(struct ns2_led_data *led_dat,
                }
        }
 
-       read_unlock(&led_dat->rw_lock);
+       read_unlock_irq(&led_dat->rw_lock);
 
        return ret;
 }
@@ -104,8 +104,9 @@ static void ns2_led_set_mode(struct ns2_led_data *led_dat,
                             enum ns2_led_modes mode)
 {
        int i;
+       unsigned long flags;
 
-       write_lock(&led_dat->rw_lock);
+       write_lock_irqsave(&led_dat->rw_lock, flags);
 
        for (i = 0; i < ARRAY_SIZE(ns2_led_modval); i++) {
                if (mode == ns2_led_modval[i].mode) {
@@ -116,7 +117,7 @@ static void ns2_led_set_mode(struct ns2_led_data *led_dat,
                }
        }
 
-       write_unlock(&led_dat->rw_lock);
+       write_unlock_irqrestore(&led_dat->rw_lock, flags);
 }
 
 static void ns2_led_set(struct led_classdev *led_cdev,
index 43cf9cc9c1df3650c228ce01920645fb474f105a..f20d13e717d55e0a04de747f62384d2ce4b0b310 100644 (file)
@@ -1643,7 +1643,9 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
                bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
                if (rdev->sb_size & bmask)
                        rdev->sb_size = (rdev->sb_size | bmask) + 1;
-       }
+       } else
+               max_dev = le32_to_cpu(sb->max_dev);
+
        for (i=0; i<max_dev;i++)
                sb->dev_roles[i] = cpu_to_le16(0xfffe);
        
@@ -7069,7 +7071,7 @@ void md_check_recovery(mddev_t *mddev)
        if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
                return;
        if ( ! (
-               (mddev->flags && !mddev->external) ||
+               (mddev->flags & ~ (1<<MD_CHANGE_PENDING)) ||
                test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
                test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
                (mddev->external == 0 && mddev->safemode == 1) ||
index 04028a9ee082735278557b606840619ade7e29a9..428377a5a6f56fe94ca033730a49102f19164dd2 100644 (file)
@@ -429,24 +429,25 @@ static void max8925_irq_sync_unlock(unsigned int irq)
        irq_tsc = cache_tsc;
        for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) {
                irq_data = &max8925_irqs[i];
+               /* 1 -- disable, 0 -- enable */
                switch (irq_data->mask_reg) {
                case MAX8925_CHG_IRQ1_MASK:
-                       irq_chg[0] &= irq_data->enable;
+                       irq_chg[0] &= ~irq_data->enable;
                        break;
                case MAX8925_CHG_IRQ2_MASK:
-                       irq_chg[1] &= irq_data->enable;
+                       irq_chg[1] &= ~irq_data->enable;
                        break;
                case MAX8925_ON_OFF_IRQ1_MASK:
-                       irq_on[0] &= irq_data->enable;
+                       irq_on[0] &= ~irq_data->enable;
                        break;
                case MAX8925_ON_OFF_IRQ2_MASK:
-                       irq_on[1] &= irq_data->enable;
+                       irq_on[1] &= ~irq_data->enable;
                        break;
                case MAX8925_RTC_IRQ_MASK:
-                       irq_rtc &= irq_data->enable;
+                       irq_rtc &= ~irq_data->enable;
                        break;
                case MAX8925_TSC_IRQ_MASK:
-                       irq_tsc &= irq_data->enable;
+                       irq_tsc &= ~irq_data->enable;
                        break;
                default:
                        dev_err(chip->dev, "wrong IRQ\n");
index 7dabe4dbd3732e1d75c396b9b1e01bdeafafa57c..294183b6260b1facff3d26764eb3cea8c6d4b011 100644 (file)
@@ -394,8 +394,13 @@ static int wm831x_irq_set_type(unsigned int irq, unsigned int type)
 
        irq = irq - wm831x->irq_base;
 
-       if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11)
-               return -EINVAL;
+       if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) {
+               /* Ignore internal-only IRQs */
+               if (irq >= 0 && irq < WM831X_NUM_IRQS)
+                       return 0;
+               else
+                       return -EINVAL;
+       }
 
        switch (type) {
        case IRQ_TYPE_EDGE_BOTH:
index 0b591b658243a7675ac66d24b97e7ea1ceaa9781..b74331260744db8829a2a8712a0d08bf5d42f949 100644 (file)
@@ -368,7 +368,7 @@ config VMWARE_BALLOON
          If unsure, say N.
 
          To compile this driver as a module, choose M here: the
-         module will be called vmware_balloon.
+         module will be called vmw_balloon.
 
 config ARM_CHARLCD
        bool "ARM Ltd. Character LCD Driver"
index 255a80dc9d73267a115da07c5729e04aa1c8a8ee..42eab95cde2af49e8ae737036695a1093caa01d9 100644 (file)
@@ -33,5 +33,5 @@ obj-$(CONFIG_IWMC3200TOP)      += iwmc3200top/
 obj-$(CONFIG_HMC6352)          += hmc6352.o
 obj-y                          += eeprom/
 obj-y                          += cb710/
-obj-$(CONFIG_VMWARE_BALLOON)   += vmware_balloon.o
+obj-$(CONFIG_VMWARE_BALLOON)   += vmw_balloon.o
 obj-$(CONFIG_ARM_CHARLCD)      += arm-charlcd.o
index bd2755e8d9a3d327f7ae54bedd0e7820f85ea5fe..f332c52968b75d7528ee8c5f21eaf561a76d373d 100644 (file)
@@ -362,9 +362,8 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
                goto err;
        }
 
-       err = mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid);
-
-       if (!err) {
+       if (ocr & R4_MEMORY_PRESENT
+           && mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid) == 0) {
                card->type = MMC_TYPE_SD_COMBO;
 
                if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
index 5f3a599ead07bbdfae11f7abc0198285e743db81..87226cd202a5086f7d90699f0a43d6d4e99725a1 100644 (file)
@@ -66,6 +66,7 @@
 #include <linux/clk.h>
 #include <linux/atmel_pdc.h>
 #include <linux/gfp.h>
+#include <linux/highmem.h>
 
 #include <linux/mmc/host.h>
 
index 9a68ff4353a2e83878fce5429afe9351140daf57..5a950b16d9e629dc3d08041bda547b165cadee76 100644 (file)
@@ -148,11 +148,12 @@ static int imxmci_start_clock(struct imxmci_host *host)
 
                while (delay--) {
                        reg = readw(host->base + MMC_REG_STATUS);
-                       if (reg & STATUS_CARD_BUS_CLK_RUN)
+                       if (reg & STATUS_CARD_BUS_CLK_RUN) {
                                /* Check twice before cut */
                                reg = readw(host->base + MMC_REG_STATUS);
                                if (reg & STATUS_CARD_BUS_CLK_RUN)
                                        return 0;
+                       }
 
                        if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
                                return 0;
index 4a8776f8afdd690048c69de91e755d35d2c884a5..4526d2791f2990229acbe9ef0f5c88286819807f 100644 (file)
@@ -2305,7 +2305,6 @@ static int omap_hsmmc_suspend(struct device *dev)
        int ret = 0;
        struct platform_device *pdev = to_platform_device(dev);
        struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
-       pm_message_t state = PMSG_SUSPEND; /* unused by MMC core */
 
        if (host && host->suspended)
                return 0;
@@ -2324,8 +2323,8 @@ static int omap_hsmmc_suspend(struct device *dev)
                        }
                }
                cancel_work_sync(&host->mmc_carddetect_work);
-               mmc_host_enable(host->mmc);
                ret = mmc_suspend_host(host->mmc);
+               mmc_host_enable(host->mmc);
                if (ret == 0) {
                        omap_hsmmc_disable_irq(host);
                        OMAP_HSMMC_WRITE(host->base, HCTL,
index 2e16e0a90a5e1a5d8d3d7487727baa39d701b8fb..976330de379ecc78cbe91f19c4bd9495d4722dae 100644 (file)
@@ -1600,7 +1600,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev)
        host->pio_active        = XFER_NONE;
 
 #ifdef CONFIG_MMC_S3C_PIODMA
-       host->dodma             = host->pdata->dma;
+       host->dodma             = host->pdata->use_dma;
 #endif
 
        host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
index 71ad4163b95e12b45aab7d41379c8022995d8ac4..aacb862ecc8a979f8022b8e0dd3428ed26c15baa 100644 (file)
@@ -241,8 +241,10 @@ static struct sdhci_ops sdhci_s3c_ops = {
 static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
 {
        struct sdhci_host *host = platform_get_drvdata(dev);
+       unsigned long flags;
+
        if (host) {
-               spin_lock(&host->lock);
+               spin_lock_irqsave(&host->lock, flags);
                if (state) {
                        dev_dbg(&dev->dev, "card inserted.\n");
                        host->flags &= ~SDHCI_DEVICE_DEAD;
@@ -253,7 +255,7 @@ static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
                        host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
                }
                tasklet_schedule(&host->card_tasklet);
-               spin_unlock(&host->lock);
+               spin_unlock_irqrestore(&host->lock, flags);
        }
 }
 
@@ -481,8 +483,10 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
        sdhci_remove_host(host, 1);
 
        for (ptr = 0; ptr < 3; ptr++) {
-               clk_disable(sc->clk_bus[ptr]);
-               clk_put(sc->clk_bus[ptr]);
+               if (sc->clk_bus[ptr]) {
+                       clk_disable(sc->clk_bus[ptr]);
+                       clk_put(sc->clk_bus[ptr]);
+               }
        }
        clk_disable(sc->clk_io);
        clk_put(sc->clk_io);
index ee7d0a5a51c496cb92b04e7b9bc8172f8a233875..69d98e3bf6abaa3c784d1d70f171387b3142eb64 100644 (file)
@@ -164,6 +164,7 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
 static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
 {
        struct mmc_data *data = host->data;
+       void *sg_virt;
        unsigned short *buf;
        unsigned int count;
        unsigned long flags;
@@ -173,8 +174,8 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
                return;
        }
 
-       buf = (unsigned short *)(tmio_mmc_kmap_atomic(host, &flags) +
-             host->sg_off);
+       sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
+       buf = (unsigned short *)(sg_virt + host->sg_off);
 
        count = host->sg_ptr->length - host->sg_off;
        if (count > data->blksz)
@@ -191,7 +192,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
 
        host->sg_off += count;
 
-       tmio_mmc_kunmap_atomic(host, &flags);
+       tmio_mmc_kunmap_atomic(sg_virt, &flags);
 
        if (host->sg_off == host->sg_ptr->length)
                tmio_mmc_next_sg(host);
index 64f7d5dfc106ac7b39e37842c5eca9a898dbbb06..0fedc78e3ea5c4613767d7d31e534143b4bf1780 100644 (file)
 
 #define ack_mmc_irqs(host, i) \
        do { \
-               u32 mask;\
-               mask  = sd_ctrl_read32((host), CTL_STATUS); \
-               mask &= ~((i) & TMIO_MASK_IRQ); \
-               sd_ctrl_write32((host), CTL_STATUS, mask); \
+               sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
        } while (0)
 
 
@@ -177,19 +174,17 @@ static inline int tmio_mmc_next_sg(struct tmio_mmc_host *host)
        return --host->sg_len;
 }
 
-static inline char *tmio_mmc_kmap_atomic(struct tmio_mmc_host *host,
+static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
        unsigned long *flags)
 {
-       struct scatterlist *sg = host->sg_ptr;
-
        local_irq_save(*flags);
        return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
 }
 
-static inline void tmio_mmc_kunmap_atomic(struct tmio_mmc_host *host,
+static inline void tmio_mmc_kunmap_atomic(void *virt,
        unsigned long *flags)
 {
-       kunmap_atomic(sg_page(host->sg_ptr), KM_BIO_SRC_IRQ);
+       kunmap_atomic(virt, KM_BIO_SRC_IRQ);
        local_irq_restore(*flags);
 }
 
index a382e3dd0a5dc8cdcddc2fc5f0b993c3badadd33..6fbeefa3a7662fb5fb85e8bfdbb86e0c63307ab3 100644 (file)
@@ -682,7 +682,6 @@ static int __devinit bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
 static int __devexit bf5xx_nand_remove(struct platform_device *pdev)
 {
        struct bf5xx_nand_info *info = to_nand_info(pdev);
-       struct mtd_info *mtd = NULL;
 
        platform_set_drvdata(pdev, NULL);
 
@@ -690,11 +689,7 @@ static int __devexit bf5xx_nand_remove(struct platform_device *pdev)
         * and their partitions, then go through freeing the
         * resources used
         */
-       mtd = &info->mtd;
-       if (mtd) {
-               nand_release(mtd);
-               kfree(mtd);
-       }
+       nand_release(&info->mtd);
 
        peripheral_free_list(bfin_nfc_pin_req);
        bf5xx_nand_dma_remove(info);
@@ -710,7 +705,7 @@ static int bf5xx_nand_scan(struct mtd_info *mtd)
        struct nand_chip *chip = mtd->priv;
        int ret;
 
-       ret = nand_scan_ident(mtd, 1);
+       ret = nand_scan_ident(mtd, 1, NULL);
        if (ret)
                return ret;
 
index fcf8ceb277d44cd3e78b5c5e0249ec1b40d7ab33..b2828e84d24377fdbf2be2d9af3d2492ee7feea2 100644 (file)
@@ -67,7 +67,9 @@
 #define NFC_V1_V2_CONFIG1_BIG          (1 << 5)
 #define NFC_V1_V2_CONFIG1_RST          (1 << 6)
 #define NFC_V1_V2_CONFIG1_CE           (1 << 7)
-#define NFC_V1_V2_CONFIG1_ONE_CYCLE    (1 << 8)
+#define NFC_V2_CONFIG1_ONE_CYCLE       (1 << 8)
+#define NFC_V2_CONFIG1_PPB(x)          (((x) & 0x3) << 9)
+#define NFC_V2_CONFIG1_FP_INT          (1 << 11)
 
 #define NFC_V1_V2_CONFIG2_INT          (1 << 15)
 
@@ -402,16 +404,16 @@ static void send_read_id_v1_v2(struct mxc_nand_host *host)
        /* Wait for operation to complete */
        wait_op_done(host, true);
 
+       memcpy(host->data_buf, host->main_area0, 16);
+
        if (this->options & NAND_BUSWIDTH_16) {
-               void __iomem *main_buf = host->main_area0;
                /* compress the ID info */
-               writeb(readb(main_buf + 2), main_buf + 1);
-               writeb(readb(main_buf + 4), main_buf + 2);
-               writeb(readb(main_buf + 6), main_buf + 3);
-               writeb(readb(main_buf + 8), main_buf + 4);
-               writeb(readb(main_buf + 10), main_buf + 5);
+               host->data_buf[1] = host->data_buf[2];
+               host->data_buf[2] = host->data_buf[4];
+               host->data_buf[3] = host->data_buf[6];
+               host->data_buf[4] = host->data_buf[8];
+               host->data_buf[5] = host->data_buf[10];
        }
-       memcpy(host->data_buf, host->main_area0, 16);
 }
 
 static uint16_t get_dev_status_v3(struct mxc_nand_host *host)
@@ -729,27 +731,30 @@ static void preset_v1_v2(struct mtd_info *mtd)
 {
        struct nand_chip *nand_chip = mtd->priv;
        struct mxc_nand_host *host = nand_chip->priv;
-       uint16_t tmp;
-
-       /* enable interrupt, disable spare enable */
-       tmp = readw(NFC_V1_V2_CONFIG1);
-       tmp &= ~NFC_V1_V2_CONFIG1_INT_MSK;
-       tmp &= ~NFC_V1_V2_CONFIG1_SP_EN;
-       if (nand_chip->ecc.mode == NAND_ECC_HW) {
-               tmp |= NFC_V1_V2_CONFIG1_ECC_EN;
-       } else {
-               tmp &= ~NFC_V1_V2_CONFIG1_ECC_EN;
-       }
+       uint16_t config1 = 0;
+
+       if (nand_chip->ecc.mode == NAND_ECC_HW)
+               config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
+
+       if (nfc_is_v21())
+               config1 |= NFC_V2_CONFIG1_FP_INT;
+
+       if (!cpu_is_mx21())
+               config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
 
        if (nfc_is_v21() && mtd->writesize) {
+               uint16_t pages_per_block = mtd->erasesize / mtd->writesize;
+
                host->eccsize = get_eccsize(mtd);
                if (host->eccsize == 4)
-                       tmp |= NFC_V2_CONFIG1_ECC_MODE_4;
+                       config1 |= NFC_V2_CONFIG1_ECC_MODE_4;
+
+               config1 |= NFC_V2_CONFIG1_PPB(ffs(pages_per_block) - 6);
        } else {
                host->eccsize = 1;
        }
 
-       writew(tmp, NFC_V1_V2_CONFIG1);
+       writew(config1, NFC_V1_V2_CONFIG1);
        /* preset operation */
 
        /* Unlock the internal RAM Buffer */
index 133d51528f8dc0fb79eae4d12230e1a65bd7595e..513e0a76a4a73866d52bba8151e43556a3b30a54 100644 (file)
@@ -413,7 +413,7 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
                prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT);
        } while (prefetch_status);
        /* disable and stop the PFPW engine */
-       gpmc_prefetch_reset();
+       gpmc_prefetch_reset(info->gpmc_cs);
 
        dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
        return 0;
index 4d89f37802075a26e602cf4ba6f42de54d07cfc8..4d01cda6884463daa844c02fde951970854e972a 100644 (file)
@@ -1320,6 +1320,7 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
                goto fail_free_irq;
        }
 
+#ifdef CONFIG_MTD_PARTITIONS
        if (mtd_has_cmdlinepart()) {
                static const char *probes[] = { "cmdlinepart", NULL };
                struct mtd_partition *parts;
@@ -1332,6 +1333,9 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
        }
 
        return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
+#else
+       return 0;
+#endif
 
 fail_free_irq:
        free_irq(irq, info);
@@ -1364,7 +1368,9 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
        platform_set_drvdata(pdev, NULL);
 
        del_mtd_device(mtd);
+#ifdef CONFIG_MTD_PARTITIONS
        del_mtd_partitions(mtd);
+#endif
        irq = platform_get_irq(pdev, 0);
        if (irq >= 0)
                free_irq(irq, info);
index cb443af3d45feee407bb79e8440f439cbdba7bd1..a460f1b748c20fbcb29982925820b79e6a7bbd78 100644 (file)
@@ -554,14 +554,13 @@ static int s5pc110_dma_ops(void *dst, void *src, size_t count, int direction)
 
        do {
                status = readl(base + S5PC110_DMA_TRANS_STATUS);
+               if (status & S5PC110_DMA_TRANS_STATUS_TE) {
+                       writel(S5PC110_DMA_TRANS_CMD_TEC,
+                                       base + S5PC110_DMA_TRANS_CMD);
+                       return -EIO;
+               }
        } while (!(status & S5PC110_DMA_TRANS_STATUS_TD));
 
-       if (status & S5PC110_DMA_TRANS_STATUS_TE) {
-               writel(S5PC110_DMA_TRANS_CMD_TEC, base + S5PC110_DMA_TRANS_CMD);
-               writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
-               return -EIO;
-       }
-
        writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
 
        return 0;
@@ -571,13 +570,12 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
                unsigned char *buffer, int offset, size_t count)
 {
        struct onenand_chip *this = mtd->priv;
-       void __iomem *bufferram;
        void __iomem *p;
        void *buf = (void *) buffer;
        dma_addr_t dma_src, dma_dst;
        int err;
 
-       p = bufferram = this->base + area;
+       p = this->base + area;
        if (ONENAND_CURRENT_BUFFERRAM(this)) {
                if (area == ONENAND_DATARAM)
                        p += this->writesize;
@@ -621,7 +619,7 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
 normal:
        if (count != mtd->writesize) {
                /* Copy the bufferram to memory to prevent unaligned access */
-               memcpy(this->page_buf, bufferram, mtd->writesize);
+               memcpy(this->page_buf, p, mtd->writesize);
                p = this->page_buf + offset;
        }
 
index bdf2149e529689b1135603904da6343fcbbbebc2..87f0a93b165c33478e1d59aa5e23942afaccc80f 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/of_device.h>
 #include <linux/of_mdio.h>
 #include <linux/of_platform.h>
+#include <linux/of_address.h>
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
 #include <linux/tcp.h>      /* needed for sizeof(tcphdr) */
index 5ae28c975b384bf64ff7a7dc763a3d1766669b5a..8cf9d4f56bb2223893dd912e1d475a48fc30f683 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/phy.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/of_address.h>
 #include <linux/slab.h>
 #include <linux/of_mdio.h>
 
index 49279b0ee526a54a534c6f8c04e7c48aa4aba0d0..f9b509a6b09a702d214ed2821d3bf6a79a671b49 100644 (file)
@@ -508,7 +508,8 @@ static int pcnet_confcheck(struct pcmcia_device *p_dev,
                           unsigned int vcc,
                           void *priv_data)
 {
-       int *has_shmem = priv_data;
+       int *priv = priv_data;
+       int try = (*priv & 0x1);
        int i;
        cistpl_io_t *io = &cfg->io;
 
@@ -525,77 +526,103 @@ static int pcnet_confcheck(struct pcmcia_device *p_dev,
                i = p_dev->resource[1]->end = 0;
        }
 
-       *has_shmem = ((cfg->mem.nwin == 1) &&
-                     (cfg->mem.win[0].len >= 0x4000));
+       *priv &= ((cfg->mem.nwin == 1) &&
+                 (cfg->mem.win[0].len >= 0x4000)) ? 0x10 : ~0x10;
+
        p_dev->resource[0]->start = io->win[i].base;
        p_dev->resource[0]->end = io->win[i].len;
-       p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
+       if (!try)
+               p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
+       else
+               p_dev->io_lines = 16;
        if (p_dev->resource[0]->end + p_dev->resource[1]->end >= 32)
                return try_io_port(p_dev);
 
-       return 0;
+       return -EINVAL;
+}
+
+static hw_info_t *pcnet_try_config(struct pcmcia_device *link,
+                                  int *has_shmem, int try)
+{
+       struct net_device *dev = link->priv;
+       hw_info_t *local_hw_info;
+       pcnet_dev_t *info = PRIV(dev);
+       int priv = try;
+       int ret;
+
+       ret = pcmcia_loop_config(link, pcnet_confcheck, &priv);
+       if (ret) {
+               dev_warn(&link->dev, "no useable port range found\n");
+               return NULL;
+       }
+       *has_shmem = (priv & 0x10);
+
+       if (!link->irq)
+               return NULL;
+
+       if (resource_size(link->resource[1]) == 8) {
+               link->conf.Attributes |= CONF_ENABLE_SPKR;
+               link->conf.Status = CCSR_AUDIO_ENA;
+       }
+       if ((link->manf_id == MANFID_IBM) &&
+           (link->card_id == PRODID_IBM_HOME_AND_AWAY))
+               link->conf.ConfigIndex |= 0x10;
+
+       ret = pcmcia_request_configuration(link, &link->conf);
+       if (ret)
+               return NULL;
+
+       dev->irq = link->irq;
+       dev->base_addr = link->resource[0]->start;
+
+       if (info->flags & HAS_MISC_REG) {
+               if ((if_port == 1) || (if_port == 2))
+                       dev->if_port = if_port;
+               else
+                       dev_notice(&link->dev, "invalid if_port requested\n");
+       } else
+               dev->if_port = 0;
+
+       if ((link->conf.ConfigBase == 0x03c0) &&
+           (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) {
+               dev_info(&link->dev,
+                       "this is an AX88190 card - use axnet_cs instead.\n");
+               return NULL;
+       }
+
+       local_hw_info = get_hwinfo(link);
+       if (!local_hw_info)
+               local_hw_info = get_prom(link);
+       if (!local_hw_info)
+               local_hw_info = get_dl10019(link);
+       if (!local_hw_info)
+               local_hw_info = get_ax88190(link);
+       if (!local_hw_info)
+               local_hw_info = get_hwired(link);
+
+       return local_hw_info;
 }
 
 static int pcnet_config(struct pcmcia_device *link)
 {
     struct net_device *dev = link->priv;
     pcnet_dev_t *info = PRIV(dev);
-    int ret, start_pg, stop_pg, cm_offset;
+    int start_pg, stop_pg, cm_offset;
     int has_shmem = 0;
     hw_info_t *local_hw_info;
 
     dev_dbg(&link->dev, "pcnet_config\n");
 
-    ret = pcmcia_loop_config(link, pcnet_confcheck, &has_shmem);
-    if (ret)
-       goto failed;
-
-    if (!link->irq)
-           goto failed;
-
-    if (resource_size(link->resource[1]) == 8) {
-       link->conf.Attributes |= CONF_ENABLE_SPKR;
-       link->conf.Status = CCSR_AUDIO_ENA;
-    }
-    if ((link->manf_id == MANFID_IBM) &&
-       (link->card_id == PRODID_IBM_HOME_AND_AWAY))
-       link->conf.ConfigIndex |= 0x10;
-
-    ret = pcmcia_request_configuration(link, &link->conf);
-    if (ret)
-           goto failed;
-    dev->irq = link->irq;
-    dev->base_addr = link->resource[0]->start;
-    if (info->flags & HAS_MISC_REG) {
-       if ((if_port == 1) || (if_port == 2))
-           dev->if_port = if_port;
-       else
-           printk(KERN_NOTICE "pcnet_cs: invalid if_port requested\n");
-    } else {
-       dev->if_port = 0;
-    }
-
-    if ((link->conf.ConfigBase == 0x03c0) &&
-       (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) {
-       printk(KERN_INFO "pcnet_cs: this is an AX88190 card!\n");
-       printk(KERN_INFO "pcnet_cs: use axnet_cs instead.\n");
-       goto failed;
-    }
-
-    local_hw_info = get_hwinfo(link);
-    if (local_hw_info == NULL)
-       local_hw_info = get_prom(link);
-    if (local_hw_info == NULL)
-       local_hw_info = get_dl10019(link);
-    if (local_hw_info == NULL)
-       local_hw_info = get_ax88190(link);
-    if (local_hw_info == NULL)
-       local_hw_info = get_hwired(link);
-
-    if (local_hw_info == NULL) {
-       printk(KERN_NOTICE "pcnet_cs: unable to read hardware net"
-              " address for io base %#3lx\n", dev->base_addr);
-       goto failed;
+    local_hw_info = pcnet_try_config(link, &has_shmem, 0);
+    if (!local_hw_info) {
+           /* check whether forcing io_lines to 16 helps... */
+           pcmcia_disable_device(link);
+           local_hw_info = pcnet_try_config(link, &has_shmem, 1);
+           if (local_hw_info == NULL) {
+                   dev_notice(&link->dev, "unable to read hardware net"
+                           " address for io base %#3lx\n", dev->base_addr);
+                   goto failed;
+           }
     }
 
     info->flags = local_hw_info->flags;
index a9352b2c7ac430d4e4aafac3d65a1b46005ea505..b7e755f4178ad885332ccaaeeb5eda492e6dfcfd 100644 (file)
@@ -141,16 +141,6 @@ static struct notifier_block module_load_nb = {
        .notifier_call = module_load_notify,
 };
 
-
-static void end_sync(void)
-{
-       end_cpu_work();
-       /* make sure we don't leak task structs */
-       process_task_mortuary();
-       process_task_mortuary();
-}
-
-
 int sync_start(void)
 {
        int err;
@@ -158,7 +148,7 @@ int sync_start(void)
        if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
                return -ENOMEM;
 
-       start_cpu_work();
+       mutex_lock(&buffer_mutex);
 
        err = task_handoff_register(&task_free_nb);
        if (err)
@@ -173,7 +163,10 @@ int sync_start(void)
        if (err)
                goto out4;
 
+       start_cpu_work();
+
 out:
+       mutex_unlock(&buffer_mutex);
        return err;
 out4:
        profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
@@ -182,7 +175,6 @@ out3:
 out2:
        task_handoff_unregister(&task_free_nb);
 out1:
-       end_sync();
        free_cpumask_var(marked_cpus);
        goto out;
 }
@@ -190,11 +182,20 @@ out1:
 
 void sync_stop(void)
 {
+       /* flush buffers */
+       mutex_lock(&buffer_mutex);
+       end_cpu_work();
        unregister_module_notifier(&module_load_nb);
        profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
        profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
        task_handoff_unregister(&task_free_nb);
-       end_sync();
+       mutex_unlock(&buffer_mutex);
+       flush_scheduled_work();
+
+       /* make sure we don't leak task structs */
+       process_task_mortuary();
+       process_task_mortuary();
+
        free_cpumask_var(marked_cpus);
 }
 
index 219f79e2210a3fcd561b94456c4960a0a1fbacd9..f179ac2ea80149423034d66a90b1e81251c5069b 100644 (file)
@@ -120,8 +120,6 @@ void end_cpu_work(void)
 
                cancel_delayed_work(&b->work);
        }
-
-       flush_scheduled_work();
 }
 
 /*
index c3ceebb5be84168ae71b845bdc52b3c9a6b557b3..4789f8e8bf7ad91cae0b0a1043349ac96ea5fa64 100644 (file)
 #define DMA_32BIT_PFN          IOVA_PFN(DMA_BIT_MASK(32))
 #define DMA_64BIT_PFN          IOVA_PFN(DMA_BIT_MASK(64))
 
+/* page table handling */
+#define LEVEL_STRIDE           (9)
+#define LEVEL_MASK             (((u64)1 << LEVEL_STRIDE) - 1)
+
+static inline int agaw_to_level(int agaw)
+{
+       return agaw + 2;
+}
+
+static inline int agaw_to_width(int agaw)
+{
+       return 30 + agaw * LEVEL_STRIDE;
+}
+
+static inline int width_to_agaw(int width)
+{
+       return (width - 30) / LEVEL_STRIDE;
+}
+
+static inline unsigned int level_to_offset_bits(int level)
+{
+       return (level - 1) * LEVEL_STRIDE;
+}
+
+static inline int pfn_level_offset(unsigned long pfn, int level)
+{
+       return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
+}
+
+static inline unsigned long level_mask(int level)
+{
+       return -1UL << level_to_offset_bits(level);
+}
+
+static inline unsigned long level_size(int level)
+{
+       return 1UL << level_to_offset_bits(level);
+}
+
+static inline unsigned long align_to_level(unsigned long pfn, int level)
+{
+       return (pfn + level_size(level) - 1) & level_mask(level);
+}
 
 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
    are never going to work. */
@@ -434,8 +477,6 @@ void free_iova_mem(struct iova *iova)
 }
 
 
-static inline int width_to_agaw(int width);
-
 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
 {
        unsigned long sagaw;
@@ -646,51 +687,6 @@ out:
        spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
-/* page table handling */
-#define LEVEL_STRIDE           (9)
-#define LEVEL_MASK             (((u64)1 << LEVEL_STRIDE) - 1)
-
-static inline int agaw_to_level(int agaw)
-{
-       return agaw + 2;
-}
-
-static inline int agaw_to_width(int agaw)
-{
-       return 30 + agaw * LEVEL_STRIDE;
-
-}
-
-static inline int width_to_agaw(int width)
-{
-       return (width - 30) / LEVEL_STRIDE;
-}
-
-static inline unsigned int level_to_offset_bits(int level)
-{
-       return (level - 1) * LEVEL_STRIDE;
-}
-
-static inline int pfn_level_offset(unsigned long pfn, int level)
-{
-       return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
-}
-
-static inline unsigned long level_mask(int level)
-{
-       return -1UL << level_to_offset_bits(level);
-}
-
-static inline unsigned long level_size(int level)
-{
-       return 1UL << level_to_offset_bits(level);
-}
-
-static inline unsigned long align_to_level(unsigned long pfn, int level)
-{
-       return (pfn + level_size(level) - 1) & level_mask(level);
-}
-
 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
                                      unsigned long pfn)
 {
@@ -3761,6 +3757,33 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
 
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
 
+#define GGC 0x52
+#define GGC_MEMORY_SIZE_MASK   (0xf << 8)
+#define GGC_MEMORY_SIZE_NONE   (0x0 << 8)
+#define GGC_MEMORY_SIZE_1M     (0x1 << 8)
+#define GGC_MEMORY_SIZE_2M     (0x3 << 8)
+#define GGC_MEMORY_VT_ENABLED  (0x8 << 8)
+#define GGC_MEMORY_SIZE_2M_VT  (0x9 << 8)
+#define GGC_MEMORY_SIZE_3M_VT  (0xa << 8)
+#define GGC_MEMORY_SIZE_4M_VT  (0xb << 8)
+
+static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
+{
+       unsigned short ggc;
+
+       if (pci_read_config_word(dev, GGC, &ggc))
+               return;
+
+       if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
+               printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
+               dmar_map_gfx = 0;
+       }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
+
 /* On Tylersburg chipsets, some BIOSes have been known to enable the
    ISOCH DMAR unit for the Azalia sound device, but not give it any
    TLB entries, which causes it to deadlock. Check for that.  We do
index ce6a3666b3d9878f70be6fb65f3e1272fdf6b6b2..553d8ee55c1c4aa3c9f6567867642d3505cf89df 100644 (file)
@@ -608,7 +608,7 @@ int pci_iov_resource_bar(struct pci_dev *dev, int resno,
  * the VF BAR size multiplied by the number of VFs.  The alignment
  * is just the VF BAR size.
  */
-int pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
+resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
 {
        struct resource tmp;
        enum pci_bar_type type;
index 7754a678ab15cc77445d396aa59409658bf8006f..6beb11b617a92973f7a32cc47a7343a759218d6e 100644 (file)
@@ -264,7 +264,8 @@ extern int pci_iov_init(struct pci_dev *dev);
 extern void pci_iov_release(struct pci_dev *dev);
 extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
                                enum pci_bar_type *type);
-extern int pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
+extern resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev,
+                                                   int resno);
 extern void pci_restore_iov_state(struct pci_dev *dev);
 extern int pci_iov_bus_range(struct pci_bus *bus);
 
@@ -320,7 +321,7 @@ static inline int pci_ats_enabled(struct pci_dev *dev)
 }
 #endif /* CONFIG_PCI_IOV */
 
-static inline int pci_resource_alignment(struct pci_dev *dev,
+static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
                                         struct resource *res)
 {
 #ifdef CONFIG_PCI_IOV
index 89ed181cd90cd9cd68506a212eb423ba60f9c8e2..857ae01734a66156c8abb92335be91cc674964a0 100644 (file)
@@ -162,6 +162,26 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1,       quirk_isa_d
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC,     PCI_DEVICE_ID_NEC_CBUS_2,       quirk_isa_dma_hangs);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC,     PCI_DEVICE_ID_NEC_CBUS_3,       quirk_isa_dma_hangs);
 
+/*
+ * Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear
+ * for some HT machines to use C4 w/o hanging.
+ */
+static void __devinit quirk_tigerpoint_bm_sts(struct pci_dev *dev)
+{
+       u32 pmbase;
+       u16 pm1a;
+
+       pci_read_config_dword(dev, 0x40, &pmbase);
+       pmbase = pmbase & 0xff80;
+       pm1a = inw(pmbase);
+
+       if (pm1a & 0x10) {
+               dev_info(&dev->dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n");
+               outw(0x10, pmbase);
+       }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts);
+
 /*
  *     Chipsets where PCI->PCI transfers vanish or hang
  */
index 54aa1c238cb34a5966a1d8ff6621835846514d87..9ba4dade69a4d67cd5b0efc1e4c55a70dbf349c1 100644 (file)
@@ -163,7 +163,7 @@ static int pcmcia_access_config(struct pcmcia_device *p_dev,
        c = p_dev->function_config;
 
        if (!(c->state & CONFIG_LOCKED)) {
-               dev_dbg(&s->dev, "Configuration isnt't locked\n");
+               dev_dbg(&p_dev->dev, "Configuration isnt't locked\n");
                mutex_unlock(&s->ops_mutex);
                return -EACCES;
        }
@@ -220,7 +220,7 @@ int pcmcia_map_mem_page(struct pcmcia_device *p_dev, window_handle_t wh,
        s->win[w].card_start = offset;
        ret = s->ops->set_mem_map(s, &s->win[w]);
        if (ret)
-               dev_warn(&s->dev, "failed to set_mem_map\n");
+               dev_warn(&p_dev->dev, "failed to set_mem_map\n");
        mutex_unlock(&s->ops_mutex);
        return ret;
 } /* pcmcia_map_mem_page */
@@ -244,18 +244,18 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
        c = p_dev->function_config;
 
        if (!(s->state & SOCKET_PRESENT)) {
-               dev_dbg(&s->dev, "No card present\n");
+               dev_dbg(&p_dev->dev, "No card present\n");
                ret = -ENODEV;
                goto unlock;
        }
        if (!(c->state & CONFIG_LOCKED)) {
-               dev_dbg(&s->dev, "Configuration isnt't locked\n");
+               dev_dbg(&p_dev->dev, "Configuration isnt't locked\n");
                ret = -EACCES;
                goto unlock;
        }
 
        if (mod->Attributes & (CONF_IRQ_CHANGE_VALID | CONF_VCC_CHANGE_VALID)) {
-               dev_dbg(&s->dev,
+               dev_dbg(&p_dev->dev,
                        "changing Vcc or IRQ is not allowed at this time\n");
                ret = -EINVAL;
                goto unlock;
@@ -265,20 +265,22 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
        if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) &&
            (mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
                if (mod->Vpp1 != mod->Vpp2) {
-                       dev_dbg(&s->dev, "Vpp1 and Vpp2 must be the same\n");
+                       dev_dbg(&p_dev->dev,
+                               "Vpp1 and Vpp2 must be the same\n");
                        ret = -EINVAL;
                        goto unlock;
                }
                s->socket.Vpp = mod->Vpp1;
                if (s->ops->set_socket(s, &s->socket)) {
-                       dev_printk(KERN_WARNING, &s->dev,
+                       dev_printk(KERN_WARNING, &p_dev->dev,
                                   "Unable to set VPP\n");
                        ret = -EIO;
                        goto unlock;
                }
        } else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) ||
                   (mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
-               dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n");
+               dev_dbg(&p_dev->dev,
+                       "changing Vcc is not allowed at this time\n");
                ret = -EINVAL;
                goto unlock;
        }
@@ -401,7 +403,7 @@ int pcmcia_release_window(struct pcmcia_device *p_dev, struct resource *res)
        win = &s->win[w];
 
        if (!(p_dev->_win & CLIENT_WIN_REQ(w))) {
-               dev_dbg(&s->dev, "not releasing unknown window\n");
+               dev_dbg(&p_dev->dev, "not releasing unknown window\n");
                mutex_unlock(&s->ops_mutex);
                return -EINVAL;
        }
@@ -439,7 +441,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
                return -ENODEV;
 
        if (req->IntType & INT_CARDBUS) {
-               dev_dbg(&s->dev, "IntType may not be INT_CARDBUS\n");
+               dev_dbg(&p_dev->dev, "IntType may not be INT_CARDBUS\n");
                return -EINVAL;
        }
 
@@ -447,7 +449,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
        c = p_dev->function_config;
        if (c->state & CONFIG_LOCKED) {
                mutex_unlock(&s->ops_mutex);
-               dev_dbg(&s->dev, "Configuration is locked\n");
+               dev_dbg(&p_dev->dev, "Configuration is locked\n");
                return -EACCES;
        }
 
@@ -455,7 +457,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
        s->socket.Vpp = req->Vpp;
        if (s->ops->set_socket(s, &s->socket)) {
                mutex_unlock(&s->ops_mutex);
-               dev_printk(KERN_WARNING, &s->dev,
+               dev_printk(KERN_WARNING, &p_dev->dev,
                           "Unable to set socket state\n");
                return -EINVAL;
        }
@@ -569,19 +571,20 @@ int pcmcia_request_io(struct pcmcia_device *p_dev)
        int ret = -EINVAL;
 
        mutex_lock(&s->ops_mutex);
-       dev_dbg(&s->dev, "pcmcia_request_io: %pR , %pR", &c->io[0], &c->io[1]);
+       dev_dbg(&p_dev->dev, "pcmcia_request_io: %pR , %pR",
+               &c->io[0], &c->io[1]);
 
        if (!(s->state & SOCKET_PRESENT)) {
-               dev_dbg(&s->dev, "pcmcia_request_io: No card present\n");
+               dev_dbg(&p_dev->dev, "pcmcia_request_io: No card present\n");
                goto out;
        }
 
        if (c->state & CONFIG_LOCKED) {
-               dev_dbg(&s->dev, "Configuration is locked\n");
+               dev_dbg(&p_dev->dev, "Configuration is locked\n");
                goto out;
        }
        if (c->state & CONFIG_IO_REQ) {
-               dev_dbg(&s->dev, "IO already configured\n");
+               dev_dbg(&p_dev->dev, "IO already configured\n");
                goto out;
        }
 
@@ -592,7 +595,13 @@ int pcmcia_request_io(struct pcmcia_device *p_dev)
        if (c->io[1].end) {
                ret = alloc_io_space(s, &c->io[1], p_dev->io_lines);
                if (ret) {
+                       struct resource tmp = c->io[0];
+                       /* release the previously allocated resource */
                        release_io_space(s, &c->io[0]);
+                       /* but preserve the settings, for they worked... */
+                       c->io[0].end = resource_size(&tmp);
+                       c->io[0].start = tmp.start;
+                       c->io[0].flags = tmp.flags;
                        goto out;
                }
        } else
@@ -601,7 +610,7 @@ int pcmcia_request_io(struct pcmcia_device *p_dev)
        c->state |= CONFIG_IO_REQ;
        p_dev->_io = 1;
 
-       dev_dbg(&s->dev, "pcmcia_request_io succeeded: %pR , %pR",
+       dev_dbg(&p_dev->dev, "pcmcia_request_io succeeded: %pR , %pR",
                &c->io[0], &c->io[1]);
 out:
        mutex_unlock(&s->ops_mutex);
@@ -800,7 +809,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
        int w;
 
        if (!(s->state & SOCKET_PRESENT)) {
-               dev_dbg(&s->dev, "No card present\n");
+               dev_dbg(&p_dev->dev, "No card present\n");
                return -ENODEV;
        }
 
@@ -809,12 +818,12 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
                req->Size = s->map_size;
        align = (s->features & SS_CAP_MEM_ALIGN) ? req->Size : s->map_size;
        if (req->Size & (s->map_size-1)) {
-               dev_dbg(&s->dev, "invalid map size\n");
+               dev_dbg(&p_dev->dev, "invalid map size\n");
                return -EINVAL;
        }
        if ((req->Base && (s->features & SS_CAP_STATIC_MAP)) ||
            (req->Base & (align-1))) {
-               dev_dbg(&s->dev, "invalid base address\n");
+               dev_dbg(&p_dev->dev, "invalid base address\n");
                return -EINVAL;
        }
        if (req->Base)
@@ -826,7 +835,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
                if (!(s->state & SOCKET_WIN_REQ(w)))
                        break;
        if (w == MAX_WIN) {
-               dev_dbg(&s->dev, "all windows are used already\n");
+               dev_dbg(&p_dev->dev, "all windows are used already\n");
                mutex_unlock(&s->ops_mutex);
                return -EINVAL;
        }
@@ -837,7 +846,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
                win->res = pcmcia_find_mem_region(req->Base, req->Size, align,
                                                0, s);
                if (!win->res) {
-                       dev_dbg(&s->dev, "allocating mem region failed\n");
+                       dev_dbg(&p_dev->dev, "allocating mem region failed\n");
                        mutex_unlock(&s->ops_mutex);
                        return -EINVAL;
                }
@@ -851,7 +860,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
        win->card_start = 0;
 
        if (s->ops->set_mem_map(s, win) != 0) {
-               dev_dbg(&s->dev, "failed to set memory mapping\n");
+               dev_dbg(&p_dev->dev, "failed to set memory mapping\n");
                mutex_unlock(&s->ops_mutex);
                return -EIO;
        }
@@ -874,7 +883,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
        if (win->res)
                request_resource(&iomem_resource, res);
 
-       dev_dbg(&s->dev, "request_window results in %pR\n", res);
+       dev_dbg(&p_dev->dev, "request_window results in %pR\n", res);
 
        mutex_unlock(&s->ops_mutex);
        *wh = res;
index b8a869af0f4410dd25a3cb0b4b5ace093ea5d6a5..deef6656ab7b8e2015e382ed48b85e067c7503a5 100644 (file)
@@ -646,7 +646,7 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev,
        if (!pci_resource_start(dev, 0)) {
                dev_warn(&dev->dev, "refusing to load the driver as the "
                        "io_base is NULL.\n");
-               goto err_out_free_mem;
+               goto err_out_disable;
        }
 
        dev_info(&dev->dev, "Cirrus PD6729 PCI to PCMCIA Bridge at 0x%llx "
index e35ed128bdef439313a107106f36c47e03dc5b7e..2d61186ad5a2e96708fcec4beb0a8402eb2bc09f 100644 (file)
@@ -3093,7 +3093,8 @@ static const struct tpacpi_quirk tpacpi_hotkey_qtable[] __initconst = {
        TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */
 };
 
-typedef u16 tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN];
+typedef u16 tpacpi_keymap_entry_t;
+typedef tpacpi_keymap_entry_t tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN];
 
 static int __init hotkey_init(struct ibm_init_struct *iibm)
 {
@@ -3230,7 +3231,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
        };
 
 #define TPACPI_HOTKEY_MAP_SIZE         sizeof(tpacpi_keymap_t)
-#define TPACPI_HOTKEY_MAP_TYPESIZE     sizeof(tpacpi_keymap_t[0])
+#define TPACPI_HOTKEY_MAP_TYPESIZE     sizeof(tpacpi_keymap_entry_t)
 
        int res, i;
        int status;
index 936bae560fa1f730255bde8a94c9493c953efdfd..dc628cb2e762803b6f3374d33a99feae5b08640c 100644 (file)
@@ -233,6 +233,7 @@ static int calculate_capacity(enum apm_source source)
                empty_design_prop = POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN;
                now_prop = POWER_SUPPLY_PROP_ENERGY_NOW;
                avg_prop = POWER_SUPPLY_PROP_ENERGY_AVG;
+               break;
        case SOURCE_VOLTAGE:
                full_prop = POWER_SUPPLY_PROP_VOLTAGE_MAX;
                empty_prop = POWER_SUPPLY_PROP_VOLTAGE_MIN;
index c61ffec2ff106ca7aa153dac55cebf5a77b495b1..2a10cd361181292f9d6bbef7ef38cda9772c839b 100644 (file)
@@ -185,8 +185,8 @@ static int pmic_scu_ipc_battery_property_get(struct battery_property *prop)
 {
        u32 data[3];
        u8 *p = (u8 *)&data[1];
-       int err = intel_scu_ipc_command(IPC_CMD_BATTERY_PROPERTY,
-                               IPCMSG_BATTERY, NULL, 0, data, 3);
+       int err = intel_scu_ipc_command(IPCMSG_BATTERY,
+                               IPC_CMD_BATTERY_PROPERTY, NULL, 0, data, 3);
 
        prop->capacity = data[0];
        prop->crnt = *p++;
@@ -207,7 +207,7 @@ static int pmic_scu_ipc_battery_property_get(struct battery_property *prop)
 
 static int pmic_scu_ipc_set_charger(int charger)
 {
-       return intel_scu_ipc_simple_command(charger, IPCMSG_BATTERY);
+       return intel_scu_ipc_simple_command(IPCMSG_BATTERY, charger);
 }
 
 /**
index 7d149a8d8d9b6cb7fe92f73b71536a723cbf6249..2ce2eb71d0f5be88e03495ea1de06e16902a461a 100644 (file)
@@ -215,7 +215,7 @@ static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index)
        struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
        int ret = -EINVAL;
 
-       if (info->vol_table && (index < (2 << info->vol_nbits))) {
+       if (info->vol_table && (index < (1 << info->vol_nbits))) {
                ret = info->vol_table[index];
                if (info->slope_double)
                        ret <<= 1;
@@ -233,7 +233,7 @@ static int choose_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
                max_uV = max_uV >> 1;
        }
        if (info->vol_table) {
-               for (i = 0; i < (2 << info->vol_nbits); i++) {
+               for (i = 0; i < (1 << info->vol_nbits); i++) {
                        if (!info->vol_table[i])
                                break;
                        if ((min_uV <= info->vol_table[i])
index 11790990277a3dbd9a96ca2ba398f505480f5e19..b349266a43de63c150b92ab36c8b49274958392b 100644 (file)
@@ -634,12 +634,9 @@ static int __devinit ab3100_regulators_probe(struct platform_device *pdev)
                                "%s: failed to register regulator %s err %d\n",
                                __func__, ab3100_regulator_desc[i].name,
                                err);
-                       i--;
                        /* remove the already registered regulators */
-                       while (i > 0) {
+                       while (--i >= 0)
                                regulator_unregister(ab3100_regulators[i].rdev);
-                               i--;
-                       }
                        return err;
                }
 
index dc3f1a491675abe371a483f862b01b60d1489a47..28c7ae67cec9ea1d573c94b30d8732f7770340b0 100644 (file)
@@ -157,7 +157,7 @@ static int ab8500_list_voltage(struct regulator_dev *rdev, unsigned selector)
        if (info->fixed_uV)
                return info->fixed_uV;
 
-       if (selector > info->voltages_len)
+       if (selector >= info->voltages_len)
                return -EINVAL;
 
        return info->supported_voltages[selector];
@@ -344,13 +344,14 @@ static inline struct ab8500_regulator_info *find_regulator_info(int id)
 static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
 {
        struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
-       struct ab8500_platform_data *pdata = dev_get_platdata(ab8500->dev);
+       struct ab8500_platform_data *pdata;
        int i, err;
 
        if (!ab8500) {
                dev_err(&pdev->dev, "null mfd parent\n");
                return -EINVAL;
        }
+       pdata = dev_get_platdata(ab8500->dev);
 
        /* register all regulators */
        for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
@@ -368,11 +369,9 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
                        dev_err(&pdev->dev, "failed to register regulator %s\n",
                                        info->desc.name);
                        /* when we fail, un-register all earlier regulators */
-                       i--;
-                       while (i > 0) {
+                       while (--i >= 0) {
                                info = &ab8500_regulator_info[i];
                                regulator_unregister(info->regulator);
-                               i--;
                        }
                        return err;
                }
index d59d2f2314afc999b8c9b6b9b9eb1c68df02b08f..df1fb53c09d266ff9369add1303c47f3d24702ab 100644 (file)
@@ -25,7 +25,7 @@ struct ad5398_chip_info {
        unsigned int current_level;
        unsigned int current_mask;
        unsigned int current_offset;
-       struct regulator_dev rdev;
+       struct regulator_dev *rdev;
 };
 
 static int ad5398_calc_current(struct ad5398_chip_info *chip,
@@ -211,7 +211,6 @@ MODULE_DEVICE_TABLE(i2c, ad5398_id);
 static int __devinit ad5398_probe(struct i2c_client *client,
                                const struct i2c_device_id *id)
 {
-       struct regulator_dev *rdev;
        struct regulator_init_data *init_data = client->dev.platform_data;
        struct ad5398_chip_info *chip;
        const struct ad5398_current_data_format *df =
@@ -233,9 +232,10 @@ static int __devinit ad5398_probe(struct i2c_client *client,
        chip->current_offset = df->current_offset;
        chip->current_mask = (chip->current_level - 1) << chip->current_offset;
 
-       rdev = regulator_register(&ad5398_reg, &client->dev, init_data, chip);
-       if (IS_ERR(rdev)) {
-               ret = PTR_ERR(rdev);
+       chip->rdev = regulator_register(&ad5398_reg, &client->dev,
+                                       init_data, chip);
+       if (IS_ERR(chip->rdev)) {
+               ret = PTR_ERR(chip->rdev);
                dev_err(&client->dev, "failed to register %s %s\n",
                        id->name, ad5398_reg.name);
                goto err;
@@ -254,7 +254,7 @@ static int __devexit ad5398_remove(struct i2c_client *client)
 {
        struct ad5398_chip_info *chip = i2c_get_clientdata(client);
 
-       regulator_unregister(&chip->rdev);
+       regulator_unregister(chip->rdev);
        kfree(chip);
        i2c_set_clientdata(client, NULL);
 
index 422a709d271d51d593899db82b0b930cf93f2847..cc8b337b9119de5e955aabe1935ad931a895c71a 100644 (file)
@@ -700,7 +700,7 @@ static void print_constraints(struct regulator_dev *rdev)
            constraints->min_uA != constraints->max_uA) {
                ret = _regulator_get_current_limit(rdev);
                if (ret > 0)
-                       count += sprintf(buf + count, "at %d uA ", ret / 1000);
+                       count += sprintf(buf + count, "at %d mA ", ret / 1000);
        }
 
        if (constraints->valid_modes_mask & REGULATOR_MODE_FAST)
@@ -2302,8 +2302,10 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
        dev_set_name(&rdev->dev, "regulator.%d",
                     atomic_inc_return(&regulator_no) - 1);
        ret = device_register(&rdev->dev);
-       if (ret != 0)
+       if (ret != 0) {
+               put_device(&rdev->dev);
                goto clean;
+       }
 
        dev_set_drvdata(&rdev->dev, rdev);
 
index e49d2bd393f27cdd105f0ba9a16e7a9c96b99c62..d61ecb885a8c94e857e78dcb23452ee9540385f0 100644 (file)
@@ -165,7 +165,7 @@ static int __devinit isl6271a_probe(struct i2c_client *i2c,
        mutex_init(&pmic->mtx);
 
        for (i = 0; i < 3; i++) {
-               pmic->rdev[i] = regulator_register(&isl_rd[0], &i2c->dev,
+               pmic->rdev[i] = regulator_register(&isl_rd[i], &i2c->dev,
                                                init_data, pmic);
                if (IS_ERR(pmic->rdev[i])) {
                        dev_err(&i2c->dev, "failed to register %s\n", id->name);
index 8867c2710a6d07319d9ad1b6d926922b280f30dc..559cfa271a4452389577be87543ce6116bfc0ebf 100644 (file)
@@ -121,14 +121,14 @@ static int max1586_v6_set(struct regulator_dev *rdev, int min_uV, int max_uV)
        if (max_uV < MAX1586_V6_MIN_UV || max_uV > MAX1586_V6_MAX_UV)
                return -EINVAL;
 
-       if (min_uV >= 3000000)
-               selector = 3;
-       if (min_uV < 3000000)
-               selector = 2;
-       if (min_uV < 2500000)
-               selector = 1;
        if (min_uV < 1800000)
                selector = 0;
+       else if (min_uV < 2500000)
+               selector = 1;
+       else if (min_uV < 3000000)
+               selector = 2;
+       else if (min_uV >= 3000000)
+               selector = 3;
 
        if (max1586_v6_calc_voltage(selector) > max_uV)
                return -EINVAL;
index 4520ace3f7e707f82ccbf0fb068df921dfc6c2df..6b60a9c0366b3c5236fa7019844274c8b1155b3e 100644 (file)
@@ -330,7 +330,7 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
                /* set external clock frequency */
                info->extclk_freq = pdata->extclk_freq;
                max8649_set_bits(info->i2c, MAX8649_SYNC, MAX8649_EXT_MASK,
-                                info->extclk_freq);
+                                info->extclk_freq << 6);
        }
 
        if (pdata->ramp_timing) {
index ab67298799f95a573f7a901199ac966cdc79847b..a1baf1fbe00472e71845591e2d427715a720a226 100644 (file)
@@ -549,7 +549,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
        if (!max8998)
                return -ENOMEM;
 
-       size = sizeof(struct regulator_dev *) * (pdata->num_regulators + 1);
+       size = sizeof(struct regulator_dev *) * pdata->num_regulators;
        max8998->rdev = kzalloc(size, GFP_KERNEL);
        if (!max8998->rdev) {
                kfree(max8998);
@@ -557,7 +557,9 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
        }
 
        rdev = max8998->rdev;
+       max8998->dev = &pdev->dev;
        max8998->iodev = iodev;
+       max8998->num_regulators = pdata->num_regulators;
        platform_set_drvdata(pdev, max8998);
 
        for (i = 0; i < pdata->num_regulators; i++) {
@@ -583,7 +585,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
 
        return 0;
 err:
-       for (i = 0; i <= max8998->num_regulators; i++)
+       for (i = 0; i < max8998->num_regulators; i++)
                if (rdev[i])
                        regulator_unregister(rdev[i]);
 
@@ -599,7 +601,7 @@ static int __devexit max8998_pmic_remove(struct platform_device *pdev)
        struct regulator_dev **rdev = max8998->rdev;
        int i;
 
-       for (i = 0; i <= max8998->num_regulators; i++)
+       for (i = 0; i < max8998->num_regulators; i++)
                if (rdev[i])
                        regulator_unregister(rdev[i]);
 
index c239f42aa4a3efa4ba47743f3d031c00f750a81a..020f5878d7fff19bb35f58b7cb2745d10beeb484 100644 (file)
@@ -626,12 +626,6 @@ fail:
        return error;
 }
 
-/**
- * tps6507x_remove - TPS6507x driver i2c remove handler
- * @client: i2c driver client device structure
- *
- * Unregister TPS driver as an i2c client device driver
- */
 static int __devexit tps6507x_pmic_remove(struct platform_device *pdev)
 {
        struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev);
index 8cff1413a147f1822336160602c57d253f3d1902..51237fbb1bbb7e15f952296a9ffcb9f668418fc9 100644 (file)
@@ -133,7 +133,7 @@ static int tps6586x_ldo_get_voltage(struct regulator_dev *rdev)
        mask = ((1 << ri->volt_nbits) - 1) << ri->volt_shift;
        val = (val & mask) >> ri->volt_shift;
 
-       if (val > ri->desc.n_voltages)
+       if (val >= ri->desc.n_voltages)
                BUG();
 
        return ri->voltages[val] * 1000;
@@ -150,7 +150,7 @@ static int tps6586x_dvm_set_voltage(struct regulator_dev *rdev,
        if (ret)
                return ret;
 
-       return tps6586x_set_bits(parent, ri->go_reg, ri->go_bit);
+       return tps6586x_set_bits(parent, ri->go_reg, 1 << ri->go_bit);
 }
 
 static int tps6586x_regulator_enable(struct regulator_dev *rdev)
index e686cdb61b97cd8a54ab7fbecb1312a41e333595..9edf8f692341d89ed3645459da80be221c94eeec 100644 (file)
@@ -215,8 +215,7 @@ static int wm831x_gp_ldo_set_mode(struct regulator_dev *rdev,
 
        case REGULATOR_MODE_IDLE:
                ret = wm831x_set_bits(wm831x, ctrl_reg,
-                                     WM831X_LDO1_LP_MODE,
-                                     WM831X_LDO1_LP_MODE);
+                                     WM831X_LDO1_LP_MODE, 0);
                if (ret < 0)
                        return ret;
 
@@ -225,10 +224,12 @@ static int wm831x_gp_ldo_set_mode(struct regulator_dev *rdev,
                                      WM831X_LDO1_ON_MODE);
                if (ret < 0)
                        return ret;
+               break;
 
        case REGULATOR_MODE_STANDBY:
                ret = wm831x_set_bits(wm831x, ctrl_reg,
-                                     WM831X_LDO1_LP_MODE, 0);
+                                     WM831X_LDO1_LP_MODE,
+                                     WM831X_LDO1_LP_MODE);
                if (ret < 0)
                        return ret;
 
index 0e6ed7db93643436eadaeee5fbc48b7fd8ed599f..fe4b8a8a9dfd43a88ba9df10a9496a1b732d4328 100644 (file)
@@ -1129,7 +1129,7 @@ static unsigned int wm8350_dcdc_get_mode(struct regulator_dev *rdev)
                        mode = REGULATOR_MODE_NORMAL;
        } else if (!active && !sleep)
                mode = REGULATOR_MODE_IDLE;
-       else if (!sleep)
+       else if (sleep)
                mode = REGULATOR_MODE_STANDBY;
 
        return mode;
index d26780ea254b5d9e9e296309dc23f2640418a7f5..261a07e0fb24c0dd7cd1d05d3d60c35d184b1e38 100644 (file)
@@ -235,6 +235,7 @@ static int __init ab3100_rtc_probe(struct platform_device *pdev)
                err = PTR_ERR(rtc);
                return err;
        }
+       platform_set_drvdata(pdev, rtc);
 
        return 0;
 }
@@ -244,6 +245,7 @@ static int __exit ab3100_rtc_remove(struct platform_device *pdev)
        struct rtc_device *rtc = platform_get_drvdata(pdev);
 
        rtc_device_unregister(rtc);
+       platform_set_drvdata(pdev, NULL);
        return 0;
 }
 
index 72b2bcc2c22413b1a63e465e355ea65084ec7b8e..d4fb82d85e9b36ab61e1626236cb98bf76364ea2 100644 (file)
@@ -426,7 +426,7 @@ static int bfin_rtc_suspend(struct platform_device *pdev, pm_message_t state)
                enable_irq_wake(IRQ_RTC);
                bfin_rtc_sync_pending(&pdev->dev);
        } else
-               bfin_rtc_int_clear(-1);
+               bfin_rtc_int_clear(0);
 
        return 0;
 }
@@ -435,8 +435,17 @@ static int bfin_rtc_resume(struct platform_device *pdev)
 {
        if (device_may_wakeup(&pdev->dev))
                disable_irq_wake(IRQ_RTC);
-       else
-               bfin_write_RTC_ISTAT(-1);
+
+       /*
+        * Since only some of the RTC bits are maintained externally in the
+        * Vbat domain, we need to wait for the RTC MMRs to be synced into
+        * the core after waking up.  This happens every RTC 1HZ.  Once that
+        * has happened, we can go ahead and re-enable the important write
+        * complete interrupt event.
+        */
+       while (!(bfin_read_RTC_ISTAT() & RTC_ISTAT_SEC))
+               continue;
+       bfin_rtc_int_set(RTC_ISTAT_WRITE_COMPLETE);
 
        return 0;
 }
index 66377f3e28b851eaa908c6057a9646a639e9c229..d60557cae8ef4fdadadb10b343125f174cb7508d 100644 (file)
@@ -364,7 +364,7 @@ static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t)
        t->time.tm_isdst = -1;
        t->enabled = !!(reg[M41T80_REG_ALARM_MON] & M41T80_ALMON_AFE);
        t->pending = !!(reg[M41T80_REG_FLAGS] & M41T80_FLAGS_AF);
-       return rtc_valid_tm(t);
+       return 0;
 }
 
 static struct rtc_class_ops m41t80_rtc_ops = {
index 6c418fe7f288ae2deaa9f44080a749a9eaafad88..b7a6690e5b35e8744295bf212a8e0d75e0d8dd6f 100644 (file)
@@ -403,7 +403,7 @@ static int pl031_probe(struct amba_device *adev, struct amba_id *id)
        }
 
        if (request_irq(adev->irq[0], pl031_interrupt,
-                       IRQF_DISABLED | IRQF_SHARED, "rtc-pl031", ldata)) {
+                       IRQF_DISABLED, "rtc-pl031", ldata)) {
                ret = -EIO;
                goto out_no_irq;
        }
index a0d3ec89d412ac57d683fa5a446720a375dfab22..f57a87f4ae96abb367a2e08d353378b31f2a19fa 100644 (file)
@@ -310,11 +310,6 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
 
        s3c_rtc_setaie(alrm->enabled);
 
-       if (alrm->enabled)
-               enable_irq_wake(s3c_rtc_alarmno);
-       else
-               disable_irq_wake(s3c_rtc_alarmno);
-
        return 0;
 }
 
@@ -587,6 +582,10 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state)
                ticnt_en_save &= S3C64XX_RTCCON_TICEN;
        }
        s3c_rtc_enable(pdev, 0);
+
+       if (device_may_wakeup(&pdev->dev))
+               enable_irq_wake(s3c_rtc_alarmno);
+
        return 0;
 }
 
@@ -600,6 +599,10 @@ static int s3c_rtc_resume(struct platform_device *pdev)
                tmp = readb(s3c_rtc_base + S3C2410_RTCCON);
                writeb(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON);
        }
+
+       if (device_may_wakeup(&pdev->dev))
+               disable_irq_wake(s3c_rtc_alarmno);
+
        return 0;
 }
 #else
index b7de02525ec901ebbee390b2798043eb1938f4bd..85cf607fc78f62d243ae41b20f2df018710be2e0 100644 (file)
@@ -217,8 +217,7 @@ tapeblock_setup_device(struct tape_device * device)
        if (!blkdat->request_queue)
                return -ENOMEM;
 
-       elevator_exit(blkdat->request_queue->elevator);
-       rc = elevator_init(blkdat->request_queue, "noop");
+       rc = elevator_change(blkdat->request_queue, "noop");
        if (rc)
                goto cleanup_queue;
 
index 7d4d2275573c138d9e1c5223e41928defd4649aa..7f11f3e48e120ec82f1d7e26a0f1055fca771f97 100644 (file)
@@ -300,8 +300,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
                           enum iscsi_host_param param, char *buf)
 {
        struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost);
-       int len = 0;
-       int status;
+       int status = 0;
 
        SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param);
        switch (param) {
@@ -315,7 +314,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
        default:
                return iscsi_host_get_param(shost, param, buf);
        }
-       return len;
+       return status;
 }
 
 int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba)
index 26350e470bccf71ea47b8dd99f387a31aff21670..877324fc594c28b606db82a12ab9b3559615ecbd 100644 (file)
@@ -368,7 +368,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
        memset(req, 0, sizeof(*req));
        wrb->tag0 |= tag;
 
-       be_wrb_hdr_prepare(wrb, sizeof(*req), true, 1);
+       be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
                           OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD,
                           sizeof(*req));
index cd05e049d5f6ad7164c6485ee336db00933c63b0..d0c82340f0e25198d0c56101c76d0d1a00b17254 100644 (file)
@@ -1404,13 +1404,13 @@ void scsi_print_sense(char *name, struct scsi_cmnd *cmd)
 {
        struct scsi_sense_hdr sshdr;
 
-       scmd_printk(KERN_INFO, cmd, "");
+       scmd_printk(KERN_INFO, cmd, " ");
        scsi_decode_sense_buffer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
                                 &sshdr);
        scsi_show_sense_hdr(&sshdr);
        scsi_decode_sense_extras(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
                                 &sshdr);
-       scmd_printk(KERN_INFO, cmd, "");
+       scmd_printk(KERN_INFO, cmd, " ");
        scsi_show_extd_sense(sshdr.asc, sshdr.ascq);
 }
 EXPORT_SYMBOL(scsi_print_sense);
@@ -1453,7 +1453,7 @@ EXPORT_SYMBOL(scsi_show_result);
 
 void scsi_print_result(struct scsi_cmnd *cmd)
 {
-       scmd_printk(KERN_INFO, cmd, "");
+       scmd_printk(KERN_INFO, cmd, " ");
        scsi_show_result(cmd->result);
 }
 EXPORT_SYMBOL(scsi_print_result);
index 4f5551b5fe53d29a1a0473d3d5d799dabb46e0d5..c5d0606ad0974edab8c0326f4fadff905413c171 100644 (file)
@@ -3231,6 +3231,12 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
        misc_fw_support = readl(&cfgtable->misc_fw_support);
        use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
 
+       /* The doorbell reset seems to cause lockups on some Smart
+        * Arrays (e.g. P410, P410i, maybe others).  Until this is
+        * fixed or at least isolated, avoid the doorbell reset.
+        */
+       use_doorbell = 0;
+
        rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
        if (rc)
                goto unmap_cfgtable;
index fda4de3440c4640f6754a1a66d143aeb5da8936c..e88bbdde49c5066b4bfb9fa2d8b5822115777cd8 100644 (file)
@@ -865,7 +865,7 @@ void osd_req_read(struct osd_request *or,
 {
        _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
        WARN_ON(or->in.bio || or->in.total_bytes);
-       WARN_ON(1 == (bio->bi_rw & REQ_WRITE));
+       WARN_ON(bio->bi_rw & REQ_WRITE);
        or->in.bio = bio;
        or->in.total_bytes = len;
 }
index 420238cc794eb7dd69dbe132bdf6be29752ed644..114bc5a81171993ac91c27ba600cee72adb9aed9 100644 (file)
@@ -1838,26 +1838,33 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
 
        qla24xx_disable_vp(vha);
 
+       vha->flags.delete_progress = 1;
+
        fc_remove_host(vha->host);
 
        scsi_remove_host(vha->host);
 
-       qla2x00_free_fcports(vha);
+       if (vha->timer_active) {
+               qla2x00_vp_stop_timer(vha);
+               DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]"
+               " = %p has stopped\n", vha->host_no, vha->vp_idx, vha));
+       }
 
        qla24xx_deallocate_vp_id(vha);
 
+       /* No pending activities shall be there on the vha now */
+       DEBUG(msleep(random32()%10));  /* Just to see if something falls on
+                                       * the net we have placed below */
+
+       BUG_ON(atomic_read(&vha->vref_count));
+
+       qla2x00_free_fcports(vha);
+
        mutex_lock(&ha->vport_lock);
        ha->cur_vport_count--;
        clear_bit(vha->vp_idx, ha->vp_idx_map);
        mutex_unlock(&ha->vport_lock);
 
-       if (vha->timer_active) {
-               qla2x00_vp_stop_timer(vha);
-               DEBUG15(printk ("scsi(%ld): timer for the vport[%d] = %p "
-                   "has stopped\n",
-                   vha->host_no, vha->vp_idx, vha));
-        }
-
        if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
                if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
                        qla_printk(KERN_WARNING, ha,
index 6cfc28a25eb3c41cca4f24b91ba575fd8d12c48d..b74e6b5743dc2931cfefa730beb351f1fa60ea57 100644 (file)
@@ -29,8 +29,6 @@
 /* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */
 /* #define QL_DEBUG_LEVEL_18 */ /* Output T10 CRC trace messages */
 
-/* #define QL_PRINTK_BUF */ /* Captures printk to buffer */
-
 /*
 * Macros use for debugging the driver.
 */
index 3a432ea0c7a3548844dd4013fd06798396135a70..d2a4e1530708add3659525948aec20ce13629f91 100644 (file)
@@ -2641,6 +2641,7 @@ struct qla_hw_data {
 #define MBX_UPDATE_FLASH_ACTIVE        3
 
        struct mutex vport_lock;        /* Virtual port synchronization */
+       spinlock_t vport_slock; /* order is hardware_lock, then vport_slock */
        struct completion mbx_cmd_comp; /* Serialize mbx access */
        struct completion mbx_intr_comp;  /* Used for completion notification */
        struct completion dcbx_comp;    /* For set port config notification */
@@ -2828,6 +2829,7 @@ typedef struct scsi_qla_host {
                uint32_t        management_server_logged_in :1;
                uint32_t        process_response_queue  :1;
                uint32_t        difdix_supported:1;
+               uint32_t        delete_progress:1;
        } flags;
 
        atomic_t        loop_state;
@@ -2922,6 +2924,8 @@ typedef struct scsi_qla_host {
        struct req_que *req;
        int             fw_heartbeat_counter;
        int             seconds_since_last_heartbeat;
+
+       atomic_t        vref_count;
 } scsi_qla_host_t;
 
 /*
@@ -2932,6 +2936,22 @@ typedef struct scsi_qla_host {
         test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \
         atomic_read(&ha->loop_state) == LOOP_DOWN)
 
+#define QLA_VHA_MARK_BUSY(__vha, __bail) do {               \
+       atomic_inc(&__vha->vref_count);                      \
+       mb();                                                \
+       if (__vha->flags.delete_progress) {                  \
+               atomic_dec(&__vha->vref_count);              \
+               __bail = 1;                                  \
+       } else {                                             \
+               __bail = 0;                                  \
+       }                                                    \
+} while (0)
+
+#define QLA_VHA_MARK_NOT_BUSY(__vha) do {                   \
+       atomic_dec(&__vha->vref_count);                      \
+} while (0)
+
+
 #define qla_printk(level, ha, format, arg...) \
        dev_printk(level , &((ha)->pdev->dev) , format , ## arg)
 
index d863ed2619b56853f7d29250dfd17b876fb0ee5c..9c383baebe279d0c27bb3027dc1bd337ab52603e 100644 (file)
@@ -69,21 +69,29 @@ qla2x00_ctx_sp_free(srb_t *sp)
 {
        struct srb_ctx *ctx = sp->ctx;
        struct srb_iocb *iocb = ctx->u.iocb_cmd;
+       struct scsi_qla_host *vha = sp->fcport->vha;
 
        del_timer_sync(&iocb->timer);
        kfree(iocb);
        kfree(ctx);
        mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
+
+       QLA_VHA_MARK_NOT_BUSY(vha);
 }
 
 inline srb_t *
 qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
     unsigned long tmo)
 {
-       srb_t *sp;
+       srb_t *sp = NULL;
        struct qla_hw_data *ha = vha->hw;
        struct srb_ctx *ctx;
        struct srb_iocb *iocb;
+       uint8_t bail;
+
+       QLA_VHA_MARK_BUSY(vha, bail);
+       if (bail)
+               return NULL;
 
        sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
        if (!sp)
@@ -116,6 +124,8 @@ qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
        iocb->timer.function = qla2x00_ctx_sp_timeout;
        add_timer(&iocb->timer);
 done:
+       if (!sp)
+               QLA_VHA_MARK_NOT_BUSY(vha);
        return sp;
 }
 
@@ -1777,11 +1787,15 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
                qla2x00_init_response_q_entries(rsp);
        }
 
+       spin_lock_irqsave(&ha->vport_slock, flags);
        /* Clear RSCN queue. */
        list_for_each_entry(vp, &ha->vp_list, list) {
                vp->rscn_in_ptr = 0;
                vp->rscn_out_ptr = 0;
        }
+
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
+
        ha->isp_ops->config_rings(vha);
 
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3218,12 +3232,17 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
                /* Bypass virtual ports of the same host. */
                found = 0;
                if (ha->num_vhosts) {
+                       unsigned long flags;
+
+                       spin_lock_irqsave(&ha->vport_slock, flags);
                        list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
                                if (new_fcport->d_id.b24 == vp->d_id.b24) {
                                        found = 1;
                                        break;
                                }
                        }
+                       spin_unlock_irqrestore(&ha->vport_slock, flags);
+
                        if (found)
                                continue;
                }
@@ -3343,6 +3362,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
        struct qla_hw_data *ha = vha->hw;
        struct scsi_qla_host *vp;
        struct scsi_qla_host *tvp;
+       unsigned long flags = 0;
 
        rval = QLA_SUCCESS;
 
@@ -3367,6 +3387,8 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
                /* Check for loop ID being already in use. */
                found = 0;
                fcport = NULL;
+
+               spin_lock_irqsave(&ha->vport_slock, flags);
                list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
                        list_for_each_entry(fcport, &vp->vp_fcports, list) {
                                if (fcport->loop_id == dev->loop_id &&
@@ -3379,6 +3401,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
                        if (found)
                                break;
                }
+               spin_unlock_irqrestore(&ha->vport_slock, flags);
 
                /* If not in use then it is free to use. */
                if (!found) {
@@ -3791,14 +3814,27 @@ void
 qla2x00_update_fcports(scsi_qla_host_t *base_vha)
 {
        fc_port_t *fcport;
-       struct scsi_qla_host *tvp, *vha;
+       struct scsi_qla_host *vha;
+       struct qla_hw_data *ha = base_vha->hw;
+       unsigned long flags;
 
+       spin_lock_irqsave(&ha->vport_slock, flags);
        /* Go with deferred removal of rport references. */
-       list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list)
-               list_for_each_entry(fcport, &vha->vp_fcports, list)
+       list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
+               atomic_inc(&vha->vref_count);
+               list_for_each_entry(fcport, &vha->vp_fcports, list) {
                        if (fcport && fcport->drport &&
-                           atomic_read(&fcport->state) != FCS_UNCONFIGURED)
+                           atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
+                               spin_unlock_irqrestore(&ha->vport_slock, flags);
+
                                qla2x00_rport_del(fcport);
+
+                               spin_lock_irqsave(&ha->vport_slock, flags);
+                       }
+               }
+               atomic_dec(&vha->vref_count);
+       }
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
 }
 
 void
@@ -3806,7 +3842,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
 {
        struct qla_hw_data *ha = vha->hw;
        struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
-       struct scsi_qla_host *tvp;
+       unsigned long flags;
 
        vha->flags.online = 0;
        ha->flags.chip_reset_done = 0;
@@ -3824,8 +3860,18 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
        if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
                atomic_set(&vha->loop_state, LOOP_DOWN);
                qla2x00_mark_all_devices_lost(vha, 0);
-               list_for_each_entry_safe(vp, tvp, &base_vha->hw->vp_list, list)
+
+               spin_lock_irqsave(&ha->vport_slock, flags);
+               list_for_each_entry(vp, &base_vha->hw->vp_list, list) {
+                       atomic_inc(&vp->vref_count);
+                       spin_unlock_irqrestore(&ha->vport_slock, flags);
+
                        qla2x00_mark_all_devices_lost(vp, 0);
+
+                       spin_lock_irqsave(&ha->vport_slock, flags);
+                       atomic_dec(&vp->vref_count);
+               }
+               spin_unlock_irqrestore(&ha->vport_slock, flags);
        } else {
                if (!atomic_read(&vha->loop_down_timer))
                        atomic_set(&vha->loop_down_timer,
@@ -3862,8 +3908,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
        uint8_t        status = 0;
        struct qla_hw_data *ha = vha->hw;
        struct scsi_qla_host *vp;
-       struct scsi_qla_host *tvp;
        struct req_que *req = ha->req_q_map[0];
+       unsigned long flags;
 
        if (vha->flags.online) {
                qla2x00_abort_isp_cleanup(vha);
@@ -3970,10 +4016,21 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
                DEBUG(printk(KERN_INFO
                                "qla2x00_abort_isp(%ld): succeeded.\n",
                                vha->host_no));
-               list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
-                       if (vp->vp_idx)
+
+               spin_lock_irqsave(&ha->vport_slock, flags);
+               list_for_each_entry(vp, &ha->vp_list, list) {
+                       if (vp->vp_idx) {
+                               atomic_inc(&vp->vref_count);
+                               spin_unlock_irqrestore(&ha->vport_slock, flags);
+
                                qla2x00_vp_abort_isp(vp);
+
+                               spin_lock_irqsave(&ha->vport_slock, flags);
+                               atomic_dec(&vp->vref_count);
+                       }
                }
+               spin_unlock_irqrestore(&ha->vport_slock, flags);
+
        } else {
                qla_printk(KERN_INFO, ha,
                        "qla2x00_abort_isp: **** FAILED ****\n");
@@ -5185,7 +5242,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
        struct req_que *req = ha->req_q_map[0];
        struct rsp_que *rsp = ha->rsp_q_map[0];
        struct scsi_qla_host *vp;
-       struct scsi_qla_host *tvp;
+       unsigned long flags;
 
        status = qla2x00_init_rings(vha);
        if (!status) {
@@ -5272,10 +5329,21 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
                DEBUG(printk(KERN_INFO
                        "qla82xx_restart_isp(%ld): succeeded.\n",
                        vha->host_no));
-               list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
-                       if (vp->vp_idx)
+
+               spin_lock_irqsave(&ha->vport_slock, flags);
+               list_for_each_entry(vp, &ha->vp_list, list) {
+                       if (vp->vp_idx) {
+                               atomic_inc(&vp->vref_count);
+                               spin_unlock_irqrestore(&ha->vport_slock, flags);
+
                                qla2x00_vp_abort_isp(vp);
+
+                               spin_lock_irqsave(&ha->vport_slock, flags);
+                               atomic_dec(&vp->vref_count);
+                       }
                }
+               spin_unlock_irqrestore(&ha->vport_slock, flags);
+
        } else {
                qla_printk(KERN_INFO, ha,
                        "qla82xx_restart_isp: **** FAILED ****\n");
index 6982ba70e12af12235d8d6502c78f81c06e732aa..28f65be19dad9878f19981a18ebc60ef13bed356 100644 (file)
@@ -1706,19 +1706,20 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
                                cp->result = DID_ERROR << 16;
                                break;
                        }
-               } else if (!lscsi_status) {
+               } else {
                        DEBUG2(qla_printk(KERN_INFO, ha,
                            "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x "
                            "of 0x%x bytes).\n", vha->host_no, cp->device->id,
                            cp->device->lun, resid, scsi_bufflen(cp)));
 
-                       cp->result = DID_ERROR << 16;
-                       break;
+                       cp->result = DID_ERROR << 16 | lscsi_status;
+                       goto check_scsi_status;
                }
 
                cp->result = DID_OK << 16 | lscsi_status;
                logit = 0;
 
+check_scsi_status:
                /*
                 * Check to see if SCSI Status is non zero. If so report SCSI
                 * Status.
index 6009b0c69488144bf21715a2a8b03192fbac56ef..a595ec8264f8d7f73823a11d0616591a7957bd63 100644 (file)
@@ -2913,7 +2913,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
        uint16_t stat = le16_to_cpu(rptid_entry->vp_idx);
        struct qla_hw_data *ha = vha->hw;
        scsi_qla_host_t *vp;
-       scsi_qla_host_t *tvp;
+       unsigned long   flags;
 
        if (rptid_entry->entry_status != 0)
                return;
@@ -2945,9 +2945,12 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
                        return;
                }
 
-               list_for_each_entry_safe(vp, tvp, &ha->vp_list, list)
+               spin_lock_irqsave(&ha->vport_slock, flags);
+               list_for_each_entry(vp, &ha->vp_list, list)
                        if (vp_idx == vp->vp_idx)
                                break;
+               spin_unlock_irqrestore(&ha->vport_slock, flags);
+
                if (!vp)
                        return;
 
index 987c5b0ca78ea22d67d074ee7d170ef1f14b52a4..2b69392a71a1fac76b50ab389a0c3e1054dce59f 100644 (file)
@@ -30,6 +30,7 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
 {
        uint32_t vp_id;
        struct qla_hw_data *ha = vha->hw;
+       unsigned long flags;
 
        /* Find an empty slot and assign an vp_id */
        mutex_lock(&ha->vport_lock);
@@ -44,7 +45,11 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
        set_bit(vp_id, ha->vp_idx_map);
        ha->num_vhosts++;
        vha->vp_idx = vp_id;
+
+       spin_lock_irqsave(&ha->vport_slock, flags);
        list_add_tail(&vha->list, &ha->vp_list);
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
+
        mutex_unlock(&ha->vport_lock);
        return vp_id;
 }
@@ -54,12 +59,31 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
 {
        uint16_t vp_id;
        struct qla_hw_data *ha = vha->hw;
+       unsigned long flags = 0;
 
        mutex_lock(&ha->vport_lock);
+       /*
+        * Wait for all pending activities to finish before removing vport from
+        * the list.
+        * Lock needs to be held for safe removal from the list (it
+        * ensures no active vp_list traversal while the vport is removed
+        * from the queue)
+        */
+       spin_lock_irqsave(&ha->vport_slock, flags);
+       while (atomic_read(&vha->vref_count)) {
+               spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+               msleep(500);
+
+               spin_lock_irqsave(&ha->vport_slock, flags);
+       }
+       list_del(&vha->list);
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
+
        vp_id = vha->vp_idx;
        ha->num_vhosts--;
        clear_bit(vp_id, ha->vp_idx_map);
-       list_del(&vha->list);
+
        mutex_unlock(&ha->vport_lock);
 }
 
@@ -68,12 +92,17 @@ qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
 {
        scsi_qla_host_t *vha;
        struct scsi_qla_host *tvha;
+       unsigned long flags;
 
+       spin_lock_irqsave(&ha->vport_slock, flags);
        /* Locate matching device in database. */
        list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
-               if (!memcmp(port_name, vha->port_name, WWN_SIZE))
+               if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
+                       spin_unlock_irqrestore(&ha->vport_slock, flags);
                        return vha;
+               }
        }
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
        return NULL;
 }
 
@@ -93,6 +122,12 @@ qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
 static void
 qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
 {
+       /*
+        * !!! NOTE !!!
+        * This function, if called in contexts other than vp create, disable
+        * or delete, please make sure this is synchronized with the
+        * delete thread.
+        */
        fc_port_t *fcport;
 
        list_for_each_entry(fcport, &vha->vp_fcports, list) {
@@ -100,7 +135,6 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
                    "loop_id=0x%04x :%x\n",
                    vha->host_no, fcport->loop_id, fcport->vp_idx));
 
-               atomic_set(&fcport->state, FCS_DEVICE_DEAD);
                qla2x00_mark_device_lost(vha, fcport, 0, 0);
                atomic_set(&fcport->state, FCS_UNCONFIGURED);
        }
@@ -194,12 +228,17 @@ qla24xx_configure_vp(scsi_qla_host_t *vha)
 void
 qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
 {
-       scsi_qla_host_t *vha, *tvha;
+       scsi_qla_host_t *vha;
        struct qla_hw_data *ha = rsp->hw;
        int i = 0;
+       unsigned long flags;
 
-       list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
+       spin_lock_irqsave(&ha->vport_slock, flags);
+       list_for_each_entry(vha, &ha->vp_list, list) {
                if (vha->vp_idx) {
+                       atomic_inc(&vha->vref_count);
+                       spin_unlock_irqrestore(&ha->vport_slock, flags);
+
                        switch (mb[0]) {
                        case MBA_LIP_OCCURRED:
                        case MBA_LOOP_UP:
@@ -215,9 +254,13 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
                                qla2x00_async_event(vha, rsp, mb);
                                break;
                        }
+
+                       spin_lock_irqsave(&ha->vport_slock, flags);
+                       atomic_dec(&vha->vref_count);
                }
                i++;
        }
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
 }
 
 int
@@ -297,7 +340,7 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
        int ret;
        struct qla_hw_data *ha = vha->hw;
        scsi_qla_host_t *vp;
-       struct scsi_qla_host *tvp;
+       unsigned long flags = 0;
 
        if (vha->vp_idx)
                return;
@@ -309,10 +352,19 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
        if (!(ha->current_topology & ISP_CFG_F))
                return;
 
-       list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
-               if (vp->vp_idx)
+       spin_lock_irqsave(&ha->vport_slock, flags);
+       list_for_each_entry(vp, &ha->vp_list, list) {
+               if (vp->vp_idx) {
+                       atomic_inc(&vp->vref_count);
+                       spin_unlock_irqrestore(&ha->vport_slock, flags);
+
                        ret = qla2x00_do_dpc_vp(vp);
+
+                       spin_lock_irqsave(&ha->vport_slock, flags);
+                       atomic_dec(&vp->vref_count);
+               }
        }
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
 }
 
 int
index 915b77a6e19390026134ff697cceaa89e9dd8c99..0a71cc71eab23922e9c6aee451962f60a1e3cfb3 100644 (file)
@@ -2672,6 +2672,19 @@ qla82xx_start_scsi(srb_t *sp)
 sufficient_dsds:
                req_cnt = 1;
 
+               if (req->cnt < (req_cnt + 2)) {
+                       cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+                               &reg->req_q_out[0]);
+                       if (req->ring_index < cnt)
+                               req->cnt = cnt - req->ring_index;
+                       else
+                               req->cnt = req->length -
+                                       (req->ring_index - cnt);
+               }
+
+               if (req->cnt < (req_cnt + 2))
+                       goto queuing_error;
+
                ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
                if (!sp->ctx) {
                        DEBUG(printk(KERN_INFO
@@ -3307,16 +3320,19 @@ qla82xx_check_fw_alive(scsi_qla_host_t *vha)
                                set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
                        }
                        qla2xxx_wake_dpc(vha);
+                       ha->flags.fw_hung = 1;
                        if (ha->flags.mbox_busy) {
-                               ha->flags.fw_hung = 1;
                                ha->flags.mbox_int = 1;
                                DEBUG2(qla_printk(KERN_ERR, ha,
-                                   "Due to fw hung, doing premature "
-                                   "completion of mbx command\n"));
-                               complete(&ha->mbx_intr_comp);
+                                       "Due to fw hung, doing premature "
+                                       "completion of mbx command\n"));
+                               if (test_bit(MBX_INTR_WAIT,
+                                       &ha->mbx_cmd_flags))
+                                       complete(&ha->mbx_intr_comp);
                        }
                }
-       }
+       } else
+               vha->seconds_since_last_heartbeat = 0;
        vha->fw_heartbeat_counter = fw_heartbeat_counter;
 }
 
@@ -3418,13 +3434,15 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
                                "%s(): Adapter reset needed!\n", __func__);
                        set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
                        qla2xxx_wake_dpc(vha);
+                       ha->flags.fw_hung = 1;
                        if (ha->flags.mbox_busy) {
-                               ha->flags.fw_hung = 1;
                                ha->flags.mbox_int = 1;
                                DEBUG2(qla_printk(KERN_ERR, ha,
-                                   "Need reset, doing premature "
-                                   "completion of mbx command\n"));
-                               complete(&ha->mbx_intr_comp);
+                                       "Need reset, doing premature "
+                                       "completion of mbx command\n"));
+                               if (test_bit(MBX_INTR_WAIT,
+                                       &ha->mbx_cmd_flags))
+                                       complete(&ha->mbx_intr_comp);
                        }
                } else {
                        qla82xx_check_fw_alive(vha);
index 8c80b49ac1c44d875f7c033fbb1c8962723307cb..1e4bff695254b4fb6a4adbc1008ba4a999352088 100644 (file)
@@ -2341,16 +2341,28 @@ probe_out:
 static void
 qla2x00_remove_one(struct pci_dev *pdev)
 {
-       scsi_qla_host_t *base_vha, *vha, *temp;
+       scsi_qla_host_t *base_vha, *vha;
        struct qla_hw_data  *ha;
+       unsigned long flags;
 
        base_vha = pci_get_drvdata(pdev);
        ha = base_vha->hw;
 
-       list_for_each_entry_safe(vha, temp, &ha->vp_list, list) {
-               if (vha && vha->fc_vport)
+       spin_lock_irqsave(&ha->vport_slock, flags);
+       list_for_each_entry(vha, &ha->vp_list, list) {
+               atomic_inc(&vha->vref_count);
+
+               if (vha && vha->fc_vport) {
+                       spin_unlock_irqrestore(&ha->vport_slock, flags);
+
                        fc_vport_terminate(vha->fc_vport);
+
+                       spin_lock_irqsave(&ha->vport_slock, flags);
+               }
+
+               atomic_dec(&vha->vref_count);
        }
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
 
        set_bit(UNLOADING, &base_vha->dpc_flags);
 
@@ -2975,10 +2987,17 @@ static struct qla_work_evt *
 qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
 {
        struct qla_work_evt *e;
+       uint8_t bail;
+
+       QLA_VHA_MARK_BUSY(vha, bail);
+       if (bail)
+               return NULL;
 
        e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
-       if (!e)
+       if (!e) {
+               QLA_VHA_MARK_NOT_BUSY(vha);
                return NULL;
+       }
 
        INIT_LIST_HEAD(&e->list);
        e->type = type;
@@ -3135,6 +3154,9 @@ qla2x00_do_work(struct scsi_qla_host *vha)
                }
                if (e->flags & QLA_EVT_FLAG_FREE)
                        kfree(e);
+
+               /* For each work completed decrement vha ref count */
+               QLA_VHA_MARK_NOT_BUSY(vha);
        }
 }
 
index e75ccb91317dcd27b030efb04f59a3f5ea4fcf16..8edbccb3232d3b68cb16d0054a444e657a452975 100644 (file)
@@ -7,9 +7,9 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "8.03.03-k0"
+#define QLA2XXX_VERSION      "8.03.04-k0"
 
 #define QLA_DRIVER_MAJOR_VER   8
 #define QLA_DRIVER_MINOR_VER   3
-#define QLA_DRIVER_PATCH_VER   3
+#define QLA_DRIVER_PATCH_VER   4
 #define QLA_DRIVER_BETA_VER    0
index 9ade720422c685f01ab8f7fe3b625bfc054390d7..ee02d3838a0a41e09fbfff0096483ad3a24eb968 100644 (file)
@@ -1011,8 +1011,8 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
 
 err_exit:
        scsi_release_buffers(cmd);
-       scsi_put_command(cmd);
        cmd->request->special = NULL;
+       scsi_put_command(cmd);
        return error;
 }
 EXPORT_SYMBOL(scsi_init_io);
index 2714becc2eaf72fc4cb4452232b3fc67ab320586..ffa0689ee84050c13b8d347b9429e5bf64a88ee6 100644 (file)
@@ -870,7 +870,7 @@ static int sd_release(struct gendisk *disk, fmode_t mode)
 
        SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n"));
 
-       if (atomic_dec_return(&sdkp->openers) && sdev->removable) {
+       if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) {
                if (scsi_block_when_processing_errors(sdev))
                        scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
        }
@@ -2625,15 +2625,15 @@ module_exit(exit_sd);
 static void sd_print_sense_hdr(struct scsi_disk *sdkp,
                               struct scsi_sense_hdr *sshdr)
 {
-       sd_printk(KERN_INFO, sdkp, "");
+       sd_printk(KERN_INFO, sdkp, " ");
        scsi_show_sense_hdr(sshdr);
-       sd_printk(KERN_INFO, sdkp, "");
+       sd_printk(KERN_INFO, sdkp, " ");
        scsi_show_extd_sense(sshdr->asc, sshdr->ascq);
 }
 
 static void sd_print_result(struct scsi_disk *sdkp, int result)
 {
-       sd_printk(KERN_INFO, sdkp, "");
+       sd_printk(KERN_INFO, sdkp, " ");
        scsi_show_result(result);
 }
 
index a7bc8b7b09ac3f26f7a17f0a992d9806b1e4ce40..2c3e89ddf069fb66fad54c46912216c97daa1ff8 100644 (file)
@@ -72,10 +72,7 @@ static void sym_printl_hex(u_char *p, int n)
 
 static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg)
 {
-       if (label)
-               sym_print_addr(cp->cmd, "%s: ", label);
-       else
-               sym_print_addr(cp->cmd, "");
+       sym_print_addr(cp->cmd, "%s: ", label);
 
        spi_print_msg(msg);
        printf("\n");
@@ -4558,7 +4555,8 @@ static void sym_int_sir(struct sym_hcb *np)
                        switch (np->msgin [2]) {
                        case M_X_MODIFY_DP:
                                if (DEBUG_FLAGS & DEBUG_POINTER)
-                                       sym_print_msg(cp, NULL, np->msgin);
+                                       sym_print_msg(cp, "extended msg ",
+                                                     np->msgin);
                                tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) + 
                                      (np->msgin[5]<<8)  + (np->msgin[6]);
                                sym_modify_dp(np, tp, cp, tmp);
@@ -4585,7 +4583,7 @@ static void sym_int_sir(struct sym_hcb *np)
                 */
                case M_IGN_RESIDUE:
                        if (DEBUG_FLAGS & DEBUG_POINTER)
-                               sym_print_msg(cp, NULL, np->msgin);
+                               sym_print_msg(cp, "1 or 2 byte ", np->msgin);
                        if (cp->host_flags & HF_SENSE)
                                OUTL_DSP(np, SCRIPTA_BA(np, clrack));
                        else
index 50441ffe8e3856592dbfdae368e9891b64078d50..2904aa044126dbc49729caacbcb3d236da900bc6 100644 (file)
@@ -472,14 +472,9 @@ pl010_set_termios(struct uart_port *port, struct ktermios *termios,
        spin_unlock_irqrestore(&uap->port.lock, flags);
 }
 
-static void pl010_set_ldisc(struct uart_port *port)
+static void pl010_set_ldisc(struct uart_port *port, int new)
 {
-       int line = port->line;
-
-       if (line >= port->state->port.tty->driver->num)
-               return;
-
-       if (port->state->port.tty->ldisc->ops->num == N_PPS) {
+       if (new == N_PPS) {
                port->flags |= UPF_HARDPPS_CD;
                pl010_enable_ms(port);
        } else
index bc9af503907f4b24ac0845068701b2de1474a105..5dff45c76d32c5f026dc1ae408166a4f5e9ac6d2 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/init.h>
 #include <linux/console.h>
 #include <linux/sysrq.h>
+#include <linux/slab.h>
 #include <linux/serial_reg.h>
 #include <linux/circ_buf.h>
 #include <linux/delay.h>
@@ -1423,7 +1424,6 @@ static void hsu_global_init(void)
        }
 
        phsu = hsu;
-
        hsu_debugfs_init(hsu);
        return;
 
@@ -1435,18 +1435,20 @@ err_free_region:
 
 static void serial_hsu_remove(struct pci_dev *pdev)
 {
-       struct hsu_port *hsu;
-       int i;
+       void *priv = pci_get_drvdata(pdev);
+       struct uart_hsu_port *up;
 
-       hsu = pci_get_drvdata(pdev);
-       if (!hsu)
+       if (!priv)
                return;
 
-       for (i = 0; i < 3; i++)
-               uart_remove_one_port(&serial_hsu_reg, &hsu->port[i].port);
+       /* For port 0/1/2, priv is the address of uart_hsu_port */
+       if (pdev->device != 0x081E) {
+               up = priv;
+               uart_remove_one_port(&serial_hsu_reg, &up->port);
+       }
 
        pci_set_drvdata(pdev, NULL);
-       free_irq(hsu->irq, hsu);
+       free_irq(pdev->irq, priv);
        pci_disable_device(pdev);
 }
 
index 8dedb266f143f1cf801d84f34892a999b2dc9934..c4399e23565aca0c986fdecc56fc424dee2c1ee3 100644 (file)
@@ -500,6 +500,7 @@ static int __init mpc512x_psc_fifoc_init(void)
        psc_fifoc = of_iomap(np, 0);
        if (!psc_fifoc) {
                pr_err("%s: Can't map FIFOC\n", __func__);
+               of_node_put(np);
                return -ENODEV;
        }
 
index f6ad1ecbff79ec9688fbc024a2036f351efff26e..51c15f58e01ef84a8ce43ba8b62472f0e4d7bec0 100644 (file)
@@ -29,6 +29,7 @@
 
 #include <linux/module.h>
 #include <linux/ioport.h>
+#include <linux/irq.h>
 #include <linux/init.h>
 #include <linux/console.h>
 #include <linux/sysrq.h>
index 141c69554bd481d27863b852bbd79b364417643e..7d475b2a79e8bb7fee0cd8f1f048fc61d1b7db3c 100644 (file)
@@ -335,8 +335,6 @@ static int serial_probe(struct pcmcia_device *link)
        info->p_dev = link;
        link->priv = info;
 
-       link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
-       link->resource[0]->end = 8;
        link->conf.Attributes = CONF_ENABLE_IRQ;
        if (do_sound) {
                link->conf.Attributes |= CONF_ENABLE_SPKR;
@@ -411,6 +409,27 @@ static int setup_serial(struct pcmcia_device *handle, struct serial_info * info,
 
 /*====================================================================*/
 
+static int pfc_config(struct pcmcia_device *p_dev)
+{
+       unsigned int port = 0;
+       struct serial_info *info = p_dev->priv;
+
+       if ((p_dev->resource[1]->end != 0) &&
+               (resource_size(p_dev->resource[1]) == 8)) {
+               port = p_dev->resource[1]->start;
+               info->slave = 1;
+       } else if ((info->manfid == MANFID_OSITECH) &&
+               (resource_size(p_dev->resource[0]) == 0x40)) {
+               port = p_dev->resource[0]->start + 0x28;
+               info->slave = 1;
+       }
+       if (info->slave)
+               return setup_serial(p_dev, info, port, p_dev->irq);
+
+       dev_warn(&p_dev->dev, "no usable port range found, giving up\n");
+       return -ENODEV;
+}
+
 static int simple_config_check(struct pcmcia_device *p_dev,
                               cistpl_cftable_entry_t *cf,
                               cistpl_cftable_entry_t *dflt,
@@ -461,23 +480,8 @@ static int simple_config(struct pcmcia_device *link)
        struct serial_info *info = link->priv;
        int i = -ENODEV, try;
 
-       /* If the card is already configured, look up the port and irq */
-       if (link->function_config) {
-               unsigned int port = 0;
-               if ((link->resource[1]->end != 0) &&
-                       (resource_size(link->resource[1]) == 8)) {
-                       port = link->resource[1]->end;
-                       info->slave = 1;
-               } else if ((info->manfid == MANFID_OSITECH) &&
-                       (resource_size(link->resource[0]) == 0x40)) {
-                       port = link->resource[0]->start + 0x28;
-                       info->slave = 1;
-               }
-               if (info->slave) {
-                       return setup_serial(link, info, port,
-                                           link->irq);
-               }
-       }
+       link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+       link->resource[0]->end = 8;
 
        /* First pass: look for a config entry that looks normal.
         * Two tries: without IO aliases, then with aliases */
@@ -491,8 +495,7 @@ static int simple_config(struct pcmcia_device *link)
        if (!pcmcia_loop_config(link, simple_config_check_notpicky, NULL))
                goto found_port;
 
-       printk(KERN_NOTICE
-              "serial_cs: no usable port range found, giving up\n");
+       dev_warn(&link->dev, "no usable port range found, giving up\n");
        return -1;
 
 found_port:
@@ -558,6 +561,7 @@ static int multi_config(struct pcmcia_device *link)
        int i, base2 = 0;
 
        /* First, look for a generic full-sized window */
+       link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
        link->resource[0]->end = info->multi * 8;
        if (pcmcia_loop_config(link, multi_config_check, &base2)) {
                /* If that didn't work, look for two windows */
@@ -565,15 +569,14 @@ static int multi_config(struct pcmcia_device *link)
                info->multi = 2;
                if (pcmcia_loop_config(link, multi_config_check_notpicky,
                                       &base2)) {
-                       printk(KERN_NOTICE "serial_cs: no usable port range"
+                       dev_warn(&link->dev, "no usable port range "
                               "found, giving up\n");
                        return -ENODEV;
                }
        }
 
        if (!link->irq)
-               dev_warn(&link->dev,
-                       "serial_cs: no usable IRQ found, continuing...\n");
+               dev_warn(&link->dev, "no usable IRQ found, continuing...\n");
 
        /*
         * Apply any configuration quirks.
@@ -675,6 +678,7 @@ static int serial_config(struct pcmcia_device * link)
           multifunction cards that ask for appropriate IO port ranges */
        if ((info->multi == 0) &&
            (link->has_func_id) &&
+           (link->socket->pcmcia_pfc == 0) &&
            ((link->func_id == CISTPL_FUNCID_MULTI) ||
             (link->func_id == CISTPL_FUNCID_SERIAL)))
                pcmcia_loop_config(link, serial_check_for_multi, info);
@@ -685,7 +689,13 @@ static int serial_config(struct pcmcia_device * link)
        if (info->quirk && info->quirk->multi != -1)
                info->multi = info->quirk->multi;
 
-       if (info->multi > 1)
+       dev_info(&link->dev,
+               "trying to set up [0x%04x:0x%04x] (pfc: %d, multi: %d, quirk: %p)\n",
+               link->manf_id, link->card_id,
+               link->socket->pcmcia_pfc, info->multi, info->quirk);
+       if (link->socket->pcmcia_pfc)
+               i = pfc_config(link);
+       else if (info->multi > 1)
                i = multi_config(link);
        else
                i = simple_config(link);
@@ -704,7 +714,7 @@ static int serial_config(struct pcmcia_device * link)
        return 0;
 
 failed:
-       dev_warn(&link->dev, "serial_cs: failed to initialize\n");
+       dev_warn(&link->dev, "failed to initialize\n");
        serial_remove(link);
        return -ENODEV;
 }
index acd35d1ebd12621e35a05324daa450992b081f17..4c37c4e28647e68f62e1eaadcc92ac887857bf44 100644 (file)
@@ -503,8 +503,9 @@ static void giveback(struct pl022 *pl022)
        msg->state = NULL;
        if (msg->complete)
                msg->complete(msg->context);
-       /* This message is completed, so let's turn off the clock! */
+       /* This message is completed, so let's turn off the clocks! */
        clk_disable(pl022->clk);
+       amba_pclk_disable(pl022->adev);
 }
 
 /**
@@ -1139,9 +1140,10 @@ static void pump_messages(struct work_struct *work)
        /* Setup the SPI using the per chip configuration */
        pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi);
        /*
-        * We enable the clock here, then the clock will be disabled when
+        * We enable the clocks here, then the clocks will be disabled when
         * giveback() is called in each method (poll/interrupt/DMA)
         */
+       amba_pclk_enable(pl022->adev);
        clk_enable(pl022->clk);
        restore_state(pl022);
        flush(pl022);
@@ -1786,11 +1788,9 @@ pl022_probe(struct amba_device *adev, struct amba_id *id)
        }
 
        /* Disable SSP */
-       clk_enable(pl022->clk);
        writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
               SSP_CR1(pl022->virtbase));
        load_ssp_default_config(pl022);
-       clk_disable(pl022->clk);
 
        status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022",
                             pl022);
@@ -1818,6 +1818,8 @@ pl022_probe(struct amba_device *adev, struct amba_id *id)
                goto err_spi_register;
        }
        dev_dbg(dev, "probe succeded\n");
+       /* Disable the silicon block pclk and clock it when needed */
+       amba_pclk_disable(adev);
        return 0;
 
  err_spi_register:
@@ -1879,9 +1881,9 @@ static int pl022_suspend(struct amba_device *adev, pm_message_t state)
                return status;
        }
 
-       clk_enable(pl022->clk);
+       amba_pclk_enable(adev);
        load_ssp_default_config(pl022);
-       clk_disable(pl022->clk);
+       amba_pclk_disable(adev);
        dev_dbg(&adev->dev, "suspended\n");
        return 0;
 }
@@ -1981,7 +1983,7 @@ static int __init pl022_init(void)
        return amba_driver_register(&pl022_driver);
 }
 
-module_init(pl022_init);
+subsys_initcall(pl022_init);
 
 static void __exit pl022_exit(void)
 {
index d256cb00604c55db5cc2ce25233b8d8e189c69be..56247853c298aac0de06eae049429ed5581727c4 100644 (file)
@@ -181,10 +181,6 @@ static void flush(struct dw_spi *dws)
        wait_till_not_busy(dws);
 }
 
-static void null_cs_control(u32 command)
-{
-}
-
 static int null_writer(struct dw_spi *dws)
 {
        u8 n_bytes = dws->n_bytes;
@@ -322,7 +318,7 @@ static void giveback(struct dw_spi *dws)
                                        struct spi_transfer,
                                        transfer_list);
 
-       if (!last_transfer->cs_change)
+       if (!last_transfer->cs_change && dws->cs_control)
                dws->cs_control(MRST_SPI_DEASSERT);
 
        msg->state = NULL;
@@ -396,6 +392,11 @@ static irqreturn_t interrupt_transfer(struct dw_spi *dws)
 static irqreturn_t dw_spi_irq(int irq, void *dev_id)
 {
        struct dw_spi *dws = dev_id;
+       u16 irq_status, irq_mask = 0x3f;
+
+       irq_status = dw_readw(dws, isr) & irq_mask;
+       if (!irq_status)
+               return IRQ_NONE;
 
        if (!dws->cur_msg) {
                spi_mask_intr(dws, SPI_INT_TXEI);
@@ -544,13 +545,13 @@ static void pump_transfers(unsigned long data)
         */
        if (dws->cs_control) {
                if (dws->rx && dws->tx)
-                       chip->tmode = 0x00;
+                       chip->tmode = SPI_TMOD_TR;
                else if (dws->rx)
-                       chip->tmode = 0x02;
+                       chip->tmode = SPI_TMOD_RO;
                else
-                       chip->tmode = 0x01;
+                       chip->tmode = SPI_TMOD_TO;
 
-               cr0 &= ~(0x3 << SPI_MODE_OFFSET);
+               cr0 &= ~SPI_TMOD_MASK;
                cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
        }
 
@@ -699,9 +700,6 @@ static int dw_spi_setup(struct spi_device *spi)
                chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
                if (!chip)
                        return -ENOMEM;
-
-               chip->cs_control = null_cs_control;
-               chip->enable_dma = 0;
        }
 
        /*
@@ -883,7 +881,7 @@ int __devinit dw_spi_add_host(struct dw_spi *dws)
        dws->dma_inited = 0;
        dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);
 
-       ret = request_irq(dws->irq, dw_spi_irq, 0,
+       ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED,
                        "dw_spi", dws);
        if (ret < 0) {
                dev_err(&master->dev, "can not get IRQ\n");
index a9e5c79ae52a04a43aebabeacfc2d3cc29e03e77..b5a78a1f4421a0c19aa8735bd49f076fd8f91b46 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/init.h>
 #include <linux/cache.h>
 #include <linux/mutex.h>
+#include <linux/of_device.h>
 #include <linux/slab.h>
 #include <linux/mod_devicetable.h>
 #include <linux/spi/spi.h>
@@ -86,6 +87,10 @@ static int spi_match_device(struct device *dev, struct device_driver *drv)
        const struct spi_device *spi = to_spi_device(dev);
        const struct spi_driver *sdrv = to_spi_driver(drv);
 
+       /* Attempt an OF style match */
+       if (of_driver_match_device(dev, drv))
+               return 1;
+
        if (sdrv->id_table)
                return !!spi_match_id(sdrv->id_table, spi);
 
@@ -554,11 +559,9 @@ done:
 EXPORT_SYMBOL_GPL(spi_register_master);
 
 
-static int __unregister(struct device *dev, void *master_dev)
+static int __unregister(struct device *dev, void *null)
 {
-       /* note: before about 2.6.14-rc1 this would corrupt memory: */
-       if (dev != master_dev)
-               spi_unregister_device(to_spi_device(dev));
+       spi_unregister_device(to_spi_device(dev));
        return 0;
 }
 
@@ -576,8 +579,7 @@ void spi_unregister_master(struct spi_master *master)
 {
        int dummy;
 
-       dummy = device_for_each_child(master->dev.parent, &master->dev,
-                                       __unregister);
+       dummy = device_for_each_child(&master->dev, NULL, __unregister);
        device_unregister(&master->dev);
 }
 EXPORT_SYMBOL_GPL(spi_unregister_master);
index e24a63498acb84f7e4f9f83710aca3fadefcc177..63e51b011d508c8922bb047cfb94572049323493 100644 (file)
@@ -350,7 +350,7 @@ static int __init spi_gpio_probe(struct platform_device *pdev)
        spi_gpio->bitbang.master = spi_master_get(master);
        spi_gpio->bitbang.chipselect = spi_gpio_chipselect;
 
-       if ((master_flags & (SPI_MASTER_NO_RX | SPI_MASTER_NO_RX)) == 0) {
+       if ((master_flags & (SPI_MASTER_NO_TX | SPI_MASTER_NO_RX)) == 0) {
                spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0;
                spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1;
                spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2;
index d31b57f7baaf3c3bf970294afd13a1396cecf46c..1dd86b835cd86a800aa8db8257e4a37422a33b8a 100644 (file)
@@ -408,11 +408,17 @@ static void mpc8xxx_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
 
        xfer_ofs = mspi->xfer_in_progress->len - mspi->count;
 
-       out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs);
+       if (mspi->rx_dma == mspi->dma_dummy_rx)
+               out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma);
+       else
+               out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs);
        out_be16(&rx_bd->cbd_datlen, 0);
        out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP);
 
-       out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs);
+       if (mspi->tx_dma == mspi->dma_dummy_tx)
+               out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma);
+       else
+               out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs);
        out_be16(&tx_bd->cbd_datlen, xfer_len);
        out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP |
                                 BD_SC_LAST);
index 97365815a729a2d3c17d47d3815ffa38ff6c6526..c3038da2648aa4e621b0e6127b89c3017724cb55 100644 (file)
@@ -200,6 +200,9 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
                val = readl(regs + S3C64XX_SPI_STATUS);
        } while (TX_FIFO_LVL(val, sci) && loops--);
 
+       if (loops == 0)
+               dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
+
        /* Flush RxFIFO*/
        loops = msecs_to_loops(1);
        do {
@@ -210,6 +213,9 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
                        break;
        } while (loops--);
 
+       if (loops == 0)
+               dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n");
+
        val = readl(regs + S3C64XX_SPI_CH_CFG);
        val &= ~S3C64XX_SPI_CH_SW_RST;
        writel(val, regs + S3C64XX_SPI_CH_CFG);
@@ -320,16 +326,17 @@ static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
 
        /* millisecs to xfer 'len' bytes @ 'cur_speed' */
        ms = xfer->len * 8 * 1000 / sdd->cur_speed;
-       ms += 5; /* some tolerance */
+       ms += 10; /* some tolerance */
 
        if (dma_mode) {
                val = msecs_to_jiffies(ms) + 10;
                val = wait_for_completion_timeout(&sdd->xfer_completion, val);
        } else {
+               u32 status;
                val = msecs_to_loops(ms);
                do {
-                       val = readl(regs + S3C64XX_SPI_STATUS);
-               } while (RX_FIFO_LVL(val, sci) < xfer->len && --val);
+                       status = readl(regs + S3C64XX_SPI_STATUS);
+               } while (RX_FIFO_LVL(status, sci) < xfer->len && --val);
        }
 
        if (!val)
@@ -447,8 +454,8 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
        writel(val, regs + S3C64XX_SPI_CLK_CFG);
 }
 
-void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
-                               int size, enum s3c2410_dma_buffresult res)
+static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
+                                int size, enum s3c2410_dma_buffresult res)
 {
        struct s3c64xx_spi_driver_data *sdd = buf_id;
        unsigned long flags;
@@ -467,8 +474,8 @@ void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
        spin_unlock_irqrestore(&sdd->lock, flags);
 }
 
-void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id,
-                               int size, enum s3c2410_dma_buffresult res)
+static void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id,
+                                int size, enum s3c2410_dma_buffresult res)
 {
        struct s3c64xx_spi_driver_data *sdd = buf_id;
        unsigned long flags;
@@ -508,8 +515,9 @@ static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 
                if (xfer->tx_buf != NULL) {
-                       xfer->tx_dma = dma_map_single(dev, xfer->tx_buf,
-                                               xfer->len, DMA_TO_DEVICE);
+                       xfer->tx_dma = dma_map_single(dev,
+                                       (void *)xfer->tx_buf, xfer->len,
+                                       DMA_TO_DEVICE);
                        if (dma_mapping_error(dev, xfer->tx_dma)) {
                                dev_err(dev, "dma_map_single Tx failed\n");
                                xfer->tx_dma = XFER_DMAADDR_INVALID;
@@ -919,6 +927,13 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
+       sci = pdev->dev.platform_data;
+       if (!sci->src_clk_name) {
+               dev_err(&pdev->dev,
+                       "Board init must call s3c64xx_spi_set_info()\n");
+               return -EINVAL;
+       }
+
        /* Check for availability of necessary resource */
 
        dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
@@ -946,8 +961,6 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
 
-       sci = pdev->dev.platform_data;
-
        platform_set_drvdata(pdev, master);
 
        sdd = spi_master_get_devdata(master);
@@ -1170,7 +1183,7 @@ static int __init s3c64xx_spi_init(void)
 {
        return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe);
 }
-module_init(s3c64xx_spi_init);
+subsys_initcall(s3c64xx_spi_init);
 
 static void __exit s3c64xx_spi_exit(void)
 {
index baa8b05b9e8d70a49ba2dcb63e5e8c3cea7ed341..6e973a79aa25b6fec509639687fc4e196a568397 100644 (file)
@@ -30,7 +30,6 @@
 #include "hash.h"
 
 #include <linux/if_arp.h>
-#include <linux/netfilter_bridge.h>
 
 #define MIN(x, y) ((x) < (y) ? (x) : (y))
 
@@ -431,11 +430,6 @@ out:
        return NOTIFY_DONE;
 }
 
-static int batman_skb_recv_finish(struct sk_buff *skb)
-{
-       return NF_ACCEPT;
-}
-
 /* receive a packet with the batman ethertype coming on a hard
  * interface */
 int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
@@ -456,13 +450,6 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
        if (atomic_read(&module_state) != MODULE_ACTIVE)
                goto err_free;
 
-       /* if netfilter/ebtables wants to block incoming batman
-        * packets then give them a chance to do so here */
-       ret = NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, dev, NULL,
-                     batman_skb_recv_finish);
-       if (ret != 1)
-               goto err_out;
-
        /* packet should hold at least type and version */
        if (unlikely(skb_headlen(skb) < 2))
                goto err_free;
index 055edee7b4e401be0f9142d222fab9a52631c150..da3c82e47bbd6365788b827f9a1ee2e4a48be226 100644 (file)
@@ -29,7 +29,6 @@
 #include "vis.h"
 #include "aggregation.h"
 
-#include <linux/netfilter_bridge.h>
 
 static void send_outstanding_bcast_packet(struct work_struct *work);
 
@@ -92,12 +91,9 @@ int send_skb_packet(struct sk_buff *skb,
 
        /* dev_queue_xmit() returns a negative result on error.  However on
         * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
-        * (which is > 0). This will not be treated as an error.
-        * Also, if netfilter/ebtables wants to block outgoing batman
-        * packets then giving them a chance to do so here */
+        * (which is > 0). This will not be treated as an error. */
 
-       return NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
-                      dev_queue_xmit);
+       return dev_queue_xmit(skb);
 send_skb_err:
        kfree_skb(skb);
        return NET_XMIT_DROP;
index 9952579425b99a477c29e488243337e9372b2200..1b3060eb2921c9da66fb2f0844de4a2d1afa4ce0 100644 (file)
@@ -80,5 +80,4 @@ struct st_proto_s {
 extern long st_register(struct st_proto_s *);
 extern long st_unregister(enum proto_type);
 
-extern struct platform_device *st_get_plat_device(void);
 #endif /* ST_H */
index 063c9b1db1ab655504f5908e7c4598418dafd0fd..b85d8bfdf600ad22cd4d62636b521bfb3ac38fa5 100644 (file)
@@ -38,7 +38,6 @@
 #include "st_ll.h"
 #include "st.h"
 
-#define VERBOSE
 /* strings to be used for rfkill entries and by
  * ST Core to be used for sysfs debug entry
  */
@@ -581,7 +580,7 @@ long st_register(struct st_proto_s *new_proto)
        long err = 0;
        unsigned long flags = 0;
 
-       st_kim_ref(&st_gdata);
+       st_kim_ref(&st_gdata, 0);
        pr_info("%s(%d) ", __func__, new_proto->type);
        if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL
            || new_proto->reg_complete_cb == NULL) {
@@ -713,7 +712,7 @@ long st_unregister(enum proto_type type)
 
        pr_debug("%s: %d ", __func__, type);
 
-       st_kim_ref(&st_gdata);
+       st_kim_ref(&st_gdata, 0);
        if (type < ST_BT || type >= ST_MAX) {
                pr_err(" protocol %d not supported", type);
                return -EPROTONOSUPPORT;
@@ -767,7 +766,7 @@ long st_write(struct sk_buff *skb)
 #endif
        long len;
 
-       st_kim_ref(&st_gdata);
+       st_kim_ref(&st_gdata, 0);
        if (unlikely(skb == NULL || st_gdata == NULL
                || st_gdata->tty == NULL)) {
                pr_err("data/tty unavailable to perform write");
@@ -818,7 +817,7 @@ static int st_tty_open(struct tty_struct *tty)
        struct st_data_s *st_gdata;
        pr_info("%s ", __func__);
 
-       st_kim_ref(&st_gdata);
+       st_kim_ref(&st_gdata, 0);
        st_gdata->tty = tty;
        tty->disc_data = st_gdata;
 
index e0c32d149f5f294878d22597bec9c1bfc05660a4..8601320a679ee8ce5dda5ea9d36f8c85798dd23e 100644 (file)
@@ -117,7 +117,7 @@ int st_core_init(struct st_data_s **);
 void st_core_exit(struct st_data_s *);
 
 /* ask for reference from KIM */
-void st_kim_ref(struct st_data_s **);
+void st_kim_ref(struct st_data_s **, int);
 
 #define GPS_STUB_TEST
 #ifdef GPS_STUB_TEST
index b4a6c7fdc4e6ca59283dd3e64736c0efdfd97407..9e99463f76e8a0d048f7ec8667a455125d88cb0b 100644 (file)
@@ -72,10 +72,25 @@ const unsigned char *protocol_names[] = {
        PROTO_ENTRY(ST_GPS, "GPS"),
 };
 
+#define MAX_ST_DEVICES 3       /* Imagine 1 on each UART for now */
+struct platform_device *st_kim_devices[MAX_ST_DEVICES];
 
 /**********************************************************************/
 /* internal functions */
 
+/**
+ * st_get_plat_device -
+ *     function which returns the reference to the platform device
+ *     requested by id. As of now only 1 such device exists (id=0)
+ *     the context requesting for reference can get the id to be
+ *     requested by a. The protocol driver which is registering or
+ *     b. the tty device which is opened.
+ */
+static struct platform_device *st_get_plat_device(int id)
+{
+       return st_kim_devices[id];
+}
+
 /**
  * validate_firmware_response -
  *     function to return whether the firmware response was proper
@@ -353,7 +368,7 @@ void st_kim_chip_toggle(enum proto_type type, enum kim_gpio_state state)
        struct kim_data_s       *kim_gdata;
        pr_info(" %s ", __func__);
 
-       kim_pdev = st_get_plat_device();
+       kim_pdev = st_get_plat_device(0);
        kim_gdata = dev_get_drvdata(&kim_pdev->dev);
 
        if (kim_gdata->gpios[type] == -1) {
@@ -574,12 +589,12 @@ static int kim_toggle_radio(void *data, bool blocked)
  *     This would enable multiple such platform devices to exist
  *     on a given platform
  */
-void st_kim_ref(struct st_data_s **core_data)
+void st_kim_ref(struct st_data_s **core_data, int id)
 {
        struct platform_device  *pdev;
        struct kim_data_s       *kim_gdata;
        /* get kim_gdata reference from platform device */
-       pdev = st_get_plat_device();
+       pdev = st_get_plat_device(id);
        kim_gdata = dev_get_drvdata(&pdev->dev);
        *core_data = kim_gdata->core_data;
 }
@@ -623,6 +638,7 @@ static int kim_probe(struct platform_device *pdev)
        long *gpios = pdev->dev.platform_data;
        struct kim_data_s       *kim_gdata;
 
+       st_kim_devices[pdev->id] = pdev;
        kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC);
        if (!kim_gdata) {
                pr_err("no mem to allocate");
index 0142338bcafe7fc0440edbf3c65a12d7f9b0df85..4bdb8362de827cdb5c820e2bcb3ca65f9a9226e7 100644 (file)
@@ -766,9 +766,14 @@ static int wpa_set_associate(PSDevice pDevice,
     DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len);
 
 
-       if (param->u.wpa_associate.wpa_ie &&
-           copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len))
-           return -EINVAL;
+       if (param->u.wpa_associate.wpa_ie_len) {
+               if (!param->u.wpa_associate.wpa_ie)
+                       return -EINVAL;
+               if (param->u.wpa_associate.wpa_ie_len > sizeof(abyWPAIE))
+                       return -EINVAL;
+               if (copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len))
+                       return -EFAULT;
+       }
 
        if (param->u.wpa_associate.mode == 1)
            pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
index 7e594449600e004c7f6ba14fa2dce39ebee25d20..9eed5b52d9de22647f6ca7e94791cd97f6e60122 100644 (file)
@@ -91,12 +91,12 @@ config USB_DYNAMIC_MINORS
          If you are unsure about this, say N here.
 
 config USB_SUSPEND
-       bool "USB runtime power management (suspend/resume and wakeup)"
+       bool "USB runtime power management (autosuspend) and wakeup"
        depends on USB && PM_RUNTIME
        help
          If you say Y here, you can use driver calls or the sysfs
-         "power/level" file to suspend or resume individual USB
-         peripherals and to enable or disable autosuspend (see
+         "power/control" file to enable or disable autosuspend for
+         individual USB peripherals (see
          Documentation/usb/power-management.txt for more details).
 
          Also, USB "remote wakeup" signaling is supported, whereby some
index f06f5dbc8cdc22fbedfa463c05b18672cbb99516..1e6ccef2cf0cbd9e817e7045dccfb03918efa9d7 100644 (file)
@@ -159,9 +159,9 @@ void usb_major_cleanup(void)
 int usb_register_dev(struct usb_interface *intf,
                     struct usb_class_driver *class_driver)
 {
-       int retval = -EINVAL;
+       int retval;
        int minor_base = class_driver->minor_base;
-       int minor = 0;
+       int minor;
        char name[20];
        char *temp;
 
@@ -173,12 +173,17 @@ int usb_register_dev(struct usb_interface *intf,
         */
        minor_base = 0;
 #endif
-       intf->minor = -1;
-
-       dbg ("looking for a minor, starting at %d", minor_base);
 
        if (class_driver->fops == NULL)
-               goto exit;
+               return -EINVAL;
+       if (intf->minor >= 0)
+               return -EADDRINUSE;
+
+       retval = init_usb_class();
+       if (retval)
+               return retval;
+
+       dev_dbg(&intf->dev, "looking for a minor, starting at %d", minor_base);
 
        down_write(&minor_rwsem);
        for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) {
@@ -186,20 +191,12 @@ int usb_register_dev(struct usb_interface *intf,
                        continue;
 
                usb_minors[minor] = class_driver->fops;
-
-               retval = 0;
+               intf->minor = minor;
                break;
        }
        up_write(&minor_rwsem);
-
-       if (retval)
-               goto exit;
-
-       retval = init_usb_class();
-       if (retval)
-               goto exit;
-
-       intf->minor = minor;
+       if (intf->minor < 0)
+               return -EXFULL;
 
        /* create a usb class device for this usb interface */
        snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
@@ -213,11 +210,11 @@ int usb_register_dev(struct usb_interface *intf,
                                      "%s", temp);
        if (IS_ERR(intf->usb_dev)) {
                down_write(&minor_rwsem);
-               usb_minors[intf->minor] = NULL;
+               usb_minors[minor] = NULL;
+               intf->minor = -1;
                up_write(&minor_rwsem);
                retval = PTR_ERR(intf->usb_dev);
        }
-exit:
        return retval;
 }
 EXPORT_SYMBOL_GPL(usb_register_dev);
index 844683e503830485147910ff16ca035a90194d27..9f0ce7de0e366fb0066dfb92d6a6403aa5f4a2b3 100644 (file)
@@ -1802,6 +1802,7 @@ free_interfaces:
                intf->dev.groups = usb_interface_groups;
                intf->dev.dma_mask = dev->dev.dma_mask;
                INIT_WORK(&intf->reset_ws, __usb_queue_reset_device);
+               intf->minor = -1;
                device_initialize(&intf->dev);
                dev_set_name(&intf->dev, "%d-%s:%d.%d",
                        dev->bus->busnum, dev->devpath,
index 58b72d741d9313b1f393a033cd0ed4cb89ad32ac..a1e8d273103f77b2d237a5f68438b289b51bb92a 100644 (file)
@@ -119,6 +119,11 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
                        ehci->broken_periodic = 1;
                        ehci_info(ehci, "using broken periodic workaround\n");
                }
+               if (pdev->device == 0x0806 || pdev->device == 0x0811
+                               || pdev->device == 0x0829) {
+                       ehci_info(ehci, "disable lpm for langwell/penwell\n");
+                       ehci->has_lpm = 0;
+               }
                break;
        case PCI_VENDOR_ID_TDI:
                if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
index 59dc3d351b60269f3c3872b859c0b39d7fcbe449..5ab5bb89bae3558cfe6f296e37931680afa0e323 100644 (file)
@@ -322,6 +322,7 @@ cppi_channel_allocate(struct dma_controller *c,
                                index, transmit ? 'T' : 'R', cppi_ch);
        cppi_ch->hw_ep = ep;
        cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
+       cppi_ch->channel.max_len = 0x7fffffff;
 
        DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R');
        return &cppi_ch->channel;
index c79a5e30d43735bb285300152ade427fd81dac6a..9e8639d4e862b65a2b23bc27a29fd09ba0b60ec1 100644 (file)
@@ -195,15 +195,14 @@ static const struct file_operations musb_regdump_fops = {
 
 static int musb_test_mode_open(struct inode *inode, struct file *file)
 {
-       file->private_data = inode->i_private;
-
        return single_open(file, musb_test_mode_show, inode->i_private);
 }
 
 static ssize_t musb_test_mode_write(struct file *file,
                const char __user *ubuf, size_t count, loff_t *ppos)
 {
-       struct musb             *musb = file->private_data;
+       struct seq_file         *s = file->private_data;
+       struct musb             *musb = s->private;
        u8                      test = 0;
        char                    buf[18];
 
index 6fca870e957ed3b5061f19deec76a9472d6d5211..d065e23f123ee755f9e6fe0e661823f48d917727 100644 (file)
@@ -300,6 +300,11 @@ static void txstate(struct musb *musb, struct musb_request *req)
 #ifndef        CONFIG_MUSB_PIO_ONLY
        if (is_dma_capable() && musb_ep->dma) {
                struct dma_controller   *c = musb->dma_controller;
+               size_t request_size;
+
+               /* setup DMA, then program endpoint CSR */
+               request_size = min_t(size_t, request->length - request->actual,
+                                       musb_ep->dma->max_len);
 
                use_dma = (request->dma != DMA_ADDR_INVALID);
 
@@ -307,11 +312,6 @@ static void txstate(struct musb *musb, struct musb_request *req)
 
 #ifdef CONFIG_USB_INVENTRA_DMA
                {
-                       size_t request_size;
-
-                       /* setup DMA, then program endpoint CSR */
-                       request_size = min_t(size_t, request->length,
-                                               musb_ep->dma->max_len);
                        if (request_size < musb_ep->packet_sz)
                                musb_ep->dma->desired_mode = 0;
                        else
@@ -373,8 +373,8 @@ static void txstate(struct musb *musb, struct musb_request *req)
                use_dma = use_dma && c->channel_program(
                                musb_ep->dma, musb_ep->packet_sz,
                                0,
-                               request->dma,
-                               request->length);
+                               request->dma + request->actual,
+                               request_size);
                if (!use_dma) {
                        c->channel_release(musb_ep->dma);
                        musb_ep->dma = NULL;
@@ -386,8 +386,8 @@ static void txstate(struct musb *musb, struct musb_request *req)
                use_dma = use_dma && c->channel_program(
                                musb_ep->dma, musb_ep->packet_sz,
                                request->zero,
-                               request->dma,
-                               request->length);
+                               request->dma + request->actual,
+                               request_size);
 #endif
        }
 #endif
@@ -501,26 +501,14 @@ void musb_g_tx(struct musb *musb, u8 epnum)
                                request->zero = 0;
                        }
 
-                       /* ... or if not, then complete it. */
-                       musb_g_giveback(musb_ep, request, 0);
-
-                       /*
-                        * Kickstart next transfer if appropriate;
-                        * the packet that just completed might not
-                        * be transmitted for hours or days.
-                        * REVISIT for double buffering...
-                        * FIXME revisit for stalls too...
-                        */
-                       musb_ep_select(mbase, epnum);
-                       csr = musb_readw(epio, MUSB_TXCSR);
-                       if (csr & MUSB_TXCSR_FIFONOTEMPTY)
-                               return;
-
-                       request = musb_ep->desc ? next_request(musb_ep) : NULL;
-                       if (!request) {
-                               DBG(4, "%s idle now\n",
-                                       musb_ep->end_point.name);
-                               return;
+                       if (request->actual == request->length) {
+                               musb_g_giveback(musb_ep, request, 0);
+                               request = musb_ep->desc ? next_request(musb_ep) : NULL;
+                               if (!request) {
+                                       DBG(4, "%s idle now\n",
+                                               musb_ep->end_point.name);
+                                       return;
+                               }
                        }
                }
 
@@ -568,11 +556,19 @@ static void rxstate(struct musb *musb, struct musb_request *req)
 {
        const u8                epnum = req->epnum;
        struct usb_request      *request = &req->request;
-       struct musb_ep          *musb_ep = &musb->endpoints[epnum].ep_out;
+       struct musb_ep          *musb_ep;
        void __iomem            *epio = musb->endpoints[epnum].regs;
        unsigned                fifo_count = 0;
-       u16                     len = musb_ep->packet_sz;
+       u16                     len;
        u16                     csr = musb_readw(epio, MUSB_RXCSR);
+       struct musb_hw_ep       *hw_ep = &musb->endpoints[epnum];
+
+       if (hw_ep->is_shared_fifo)
+               musb_ep = &hw_ep->ep_in;
+       else
+               musb_ep = &hw_ep->ep_out;
+
+       len = musb_ep->packet_sz;
 
        /* We shouldn't get here while DMA is active, but we do... */
        if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
@@ -647,8 +643,8 @@ static void rxstate(struct musb *musb, struct musb_request *req)
         */
 
                                csr |= MUSB_RXCSR_DMAENAB;
-#ifdef USE_MODE1
                                csr |= MUSB_RXCSR_AUTOCLEAR;
+#ifdef USE_MODE1
                                /* csr |= MUSB_RXCSR_DMAMODE; */
 
                                /* this special sequence (enabling and then
@@ -663,10 +659,11 @@ static void rxstate(struct musb *musb, struct musb_request *req)
                                if (request->actual < request->length) {
                                        int transfer_size = 0;
 #ifdef USE_MODE1
-                                       transfer_size = min(request->length,
+                                       transfer_size = min(request->length - request->actual,
                                                        channel->max_len);
 #else
-                                       transfer_size = len;
+                                       transfer_size = min(request->length - request->actual,
+                                                       (unsigned)len);
 #endif
                                        if (transfer_size <= musb_ep->packet_sz)
                                                musb_ep->dma->desired_mode = 0;
@@ -740,9 +737,15 @@ void musb_g_rx(struct musb *musb, u8 epnum)
        u16                     csr;
        struct usb_request      *request;
        void __iomem            *mbase = musb->mregs;
-       struct musb_ep          *musb_ep = &musb->endpoints[epnum].ep_out;
+       struct musb_ep          *musb_ep;
        void __iomem            *epio = musb->endpoints[epnum].regs;
        struct dma_channel      *dma;
+       struct musb_hw_ep       *hw_ep = &musb->endpoints[epnum];
+
+       if (hw_ep->is_shared_fifo)
+               musb_ep = &hw_ep->ep_in;
+       else
+               musb_ep = &hw_ep->ep_out;
 
        musb_ep_select(mbase, epnum);
 
@@ -1081,7 +1084,7 @@ struct free_record {
 /*
  * Context: controller locked, IRQs blocked.
  */
-static void musb_ep_restart(struct musb *musb, struct musb_request *req)
+void musb_ep_restart(struct musb *musb, struct musb_request *req)
 {
        DBG(3, "<== %s request %p len %u on hw_ep%d\n",
                req->tx ? "TX/IN" : "RX/OUT",
index c8b140325d82bf4bb6cd3ad85216fa24ec308ce7..572b1da7f2dc45ea1fd3d9bb67d3ea273d56cfae 100644 (file)
@@ -105,4 +105,6 @@ extern void musb_gadget_cleanup(struct musb *);
 
 extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int);
 
+extern void musb_ep_restart(struct musb *, struct musb_request *);
+
 #endif         /* __MUSB_GADGET_H */
index 59bef8f3a3585100310bbb43848ea56603a82c28..6dd03f4c5f4956983c2d1bf2552c248f6a1702a0 100644 (file)
@@ -261,6 +261,7 @@ __acquires(musb->lock)
                                        ctrlrequest->wIndex & 0x0f;
                                struct musb_ep          *musb_ep;
                                struct musb_hw_ep       *ep;
+                               struct musb_request     *request;
                                void __iomem            *regs;
                                int                     is_in;
                                u16                     csr;
@@ -302,6 +303,14 @@ __acquires(musb->lock)
                                        musb_writew(regs, MUSB_RXCSR, csr);
                                }
 
+                               /* Maybe start the first request in the queue */
+                               request = to_musb_request(
+                                               next_request(musb_ep));
+                               if (!musb_ep->busy && request) {
+                                       DBG(3, "restarting the request\n");
+                                       musb_ep_restart(musb, request);
+                               }
+
                                /* select ep0 again */
                                musb_ep_select(mbase, 0);
                                } break;
index 877d20b1dff973fd6975f258bb0b18267df1f5f5..9e65c47cc98b95761daba7186dd180550b954def 100644 (file)
@@ -660,6 +660,12 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
 
        qh->segsize = length;
 
+       /*
+        * Ensure the data reaches to main memory before starting
+        * DMA transfer
+        */
+       wmb();
+
        if (!dma->channel_program(channel, pkt_size, mode,
                        urb->transfer_dma + offset, length)) {
                dma->channel_release(channel);
index 05aaac1c3861e5be2f8d30c4311e7faa5a2c40a8..0bc97698af157d2cc25e4309a0da34ac2ad4e317 100644 (file)
@@ -347,11 +347,20 @@ static void twl4030_i2c_access(struct twl4030_usb *twl, int on)
        }
 }
 
-static void twl4030_phy_power(struct twl4030_usb *twl, int on)
+static void __twl4030_phy_power(struct twl4030_usb *twl, int on)
 {
-       u8 pwr;
+       u8 pwr = twl4030_usb_read(twl, PHY_PWR_CTRL);
+
+       if (on)
+               pwr &= ~PHY_PWR_PHYPWD;
+       else
+               pwr |= PHY_PWR_PHYPWD;
 
-       pwr = twl4030_usb_read(twl, PHY_PWR_CTRL);
+       WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
+}
+
+static void twl4030_phy_power(struct twl4030_usb *twl, int on)
+{
        if (on) {
                regulator_enable(twl->usb3v1);
                regulator_enable(twl->usb1v8);
@@ -365,15 +374,13 @@ static void twl4030_phy_power(struct twl4030_usb *twl, int on)
                twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0,
                                                        VUSB_DEDICATED2);
                regulator_enable(twl->usb1v5);
-               pwr &= ~PHY_PWR_PHYPWD;
-               WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
+               __twl4030_phy_power(twl, 1);
                twl4030_usb_write(twl, PHY_CLK_CTRL,
                                  twl4030_usb_read(twl, PHY_CLK_CTRL) |
                                        (PHY_CLK_CTRL_CLOCKGATING_EN |
                                                PHY_CLK_CTRL_CLK32K_EN));
-       } else  {
-               pwr |= PHY_PWR_PHYPWD;
-               WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
+       } else {
+               __twl4030_phy_power(twl, 0);
                regulator_disable(twl->usb1v5);
                regulator_disable(twl->usb1v8);
                regulator_disable(twl->usb3v1);
@@ -387,19 +394,25 @@ static void twl4030_phy_suspend(struct twl4030_usb *twl, int controller_off)
 
        twl4030_phy_power(twl, 0);
        twl->asleep = 1;
+       dev_dbg(twl->dev, "%s\n", __func__);
 }
 
-static void twl4030_phy_resume(struct twl4030_usb *twl)
+static void __twl4030_phy_resume(struct twl4030_usb *twl)
 {
-       if (!twl->asleep)
-               return;
-
        twl4030_phy_power(twl, 1);
        twl4030_i2c_access(twl, 1);
        twl4030_usb_set_mode(twl, twl->usb_mode);
        if (twl->usb_mode == T2_USB_MODE_ULPI)
                twl4030_i2c_access(twl, 0);
+}
+
+static void twl4030_phy_resume(struct twl4030_usb *twl)
+{
+       if (!twl->asleep)
+               return;
+       __twl4030_phy_resume(twl);
        twl->asleep = 0;
+       dev_dbg(twl->dev, "%s\n", __func__);
 }
 
 static int twl4030_usb_ldo_init(struct twl4030_usb *twl)
@@ -408,8 +421,8 @@ static int twl4030_usb_ldo_init(struct twl4030_usb *twl)
        twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0xC0, PROTECT_KEY);
        twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0x0C, PROTECT_KEY);
 
-       /* put VUSB3V1 LDO in active state */
-       twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2);
+       /* Keep VUSB3V1 LDO in sleep state until VBUS/ID change detected*/
+       /*twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2);*/
 
        /* input to VUSB3V1 LDO is from VBAT, not VBUS */
        twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0x14, VUSB_DEDICATED1);
@@ -502,6 +515,26 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
        return IRQ_HANDLED;
 }
 
+static void twl4030_usb_phy_init(struct twl4030_usb *twl)
+{
+       int status;
+
+       status = twl4030_usb_linkstat(twl);
+       if (status >= 0) {
+               if (status == USB_EVENT_NONE) {
+                       __twl4030_phy_power(twl, 0);
+                       twl->asleep = 1;
+               } else {
+                       __twl4030_phy_resume(twl);
+                       twl->asleep = 0;
+               }
+
+               blocking_notifier_call_chain(&twl->otg.notifier, status,
+                               twl->otg.gadget);
+       }
+       sysfs_notify(&twl->dev->kobj, NULL, "vbus");
+}
+
 static int twl4030_set_suspend(struct otg_transceiver *x, int suspend)
 {
        struct twl4030_usb *twl = xceiv_to_twl(x);
@@ -550,7 +583,6 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
        struct twl4030_usb_data *pdata = pdev->dev.platform_data;
        struct twl4030_usb      *twl;
        int                     status, err;
-       u8                      pwr;
 
        if (!pdata) {
                dev_dbg(&pdev->dev, "platform_data not available\n");
@@ -569,10 +601,7 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
        twl->otg.set_peripheral = twl4030_set_peripheral;
        twl->otg.set_suspend    = twl4030_set_suspend;
        twl->usb_mode           = pdata->usb_mode;
-
-       pwr = twl4030_usb_read(twl, PHY_PWR_CTRL);
-
-       twl->asleep             = (pwr & PHY_PWR_PHYPWD);
+       twl->asleep = 1;
 
        /* init spinlock for workqueue */
        spin_lock_init(&twl->lock);
@@ -610,15 +639,10 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
                return status;
        }
 
-       /* The IRQ handler just handles changes from the previous states
-        * of the ID and VBUS pins ... in probe() we must initialize that
-        * previous state.  The easy way:  fake an IRQ.
-        *
-        * REVISIT:  a real IRQ might have happened already, if PREEMPT is
-        * enabled.  Else the IRQ may not yet be configured or enabled,
-        * because of scheduling delays.
+       /* Power down phy or make it work according to
+        * current link state.
         */
-       twl4030_usb_irq(twl->irq, twl);
+       twl4030_usb_phy_init(twl);
 
        dev_info(&pdev->dev, "Initialized TWL4030 USB module\n");
        return 0;
index 30922a7e3347494b5ca30b81ea77e53ea767e91e..aa665817a2720414a7e669b20499d62ebca4b6db 100644 (file)
@@ -2024,6 +2024,9 @@ static int mos7720_ioctl(struct tty_struct *tty, struct file *file,
 
        case TIOCGICOUNT:
                cnow = mos7720_port->icount;
+
+               memset(&icount, 0, sizeof(struct serial_icounter_struct));
+
                icount.cts = cnow.cts;
                icount.dsr = cnow.dsr;
                icount.rng = cnow.rng;
index 1c9b6e9b2386e5032da1e3b8a095525222dc8cb5..1a42bc2137995bea0b70905cfe518674242141fd 100644 (file)
@@ -2285,6 +2285,9 @@ static int mos7840_ioctl(struct tty_struct *tty, struct file *file,
        case TIOCGICOUNT:
                cnow = mos7840_port->icount;
                smp_rmb();
+
+               memset(&icount, 0, sizeof(struct serial_icounter_struct));
+
                icount.cts = cnow.cts;
                icount.dsr = cnow.dsr;
                icount.rng = cnow.rng;
index 84f842331dfae0d7c4b194b42f283fae44e8a6af..7ccc967831f05bca5aba179bb88ff7880f55b92f 100644 (file)
@@ -3508,7 +3508,7 @@ static void fbcon_exit(void)
        softback_buf = 0UL;
 
        for (i = 0; i < FB_MAX; i++) {
-               int pending;
+               int pending = 0;
 
                mapped = 0;
                info = registered_fb[i];
@@ -3516,7 +3516,8 @@ static void fbcon_exit(void)
                if (info == NULL)
                        continue;
 
-               pending = cancel_work_sync(&info->queue);
+               if (info->queue.func)
+                       pending = cancel_work_sync(&info->queue);
                DPRINTK("fbcon: %s pending work\n", (pending ? "canceled" :
                        "no"));
 
index 815f84b07933f7b97dc1fab249055531f6cc65b2..70477c2e4b619cd6a2ca5562771e15243cd647a3 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/platform_device.h>
 #include <linux/screen_info.h>
 #include <linux/dmi.h>
-
+#include <linux/pci.h>
 #include <video/vga.h>
 
 static struct fb_var_screeninfo efifb_defined __devinitdata = {
@@ -39,17 +39,31 @@ enum {
        M_I20,          /* 20-Inch iMac */
        M_I20_SR,       /* 20-Inch iMac (Santa Rosa) */
        M_I24,          /* 24-Inch iMac */
+       M_I24_8_1,      /* 24-Inch iMac, 8,1th gen */
+       M_I24_10_1,     /* 24-Inch iMac, 10,1th gen */
+       M_I27_11_1,     /* 27-Inch iMac, 11,1th gen */
        M_MINI,         /* Mac Mini */
+       M_MINI_3_1,     /* Mac Mini, 3,1th gen */
+       M_MINI_4_1,     /* Mac Mini, 4,1th gen */
        M_MB,           /* MacBook */
        M_MB_2,         /* MacBook, 2nd rev. */
        M_MB_3,         /* MacBook, 3rd rev. */
+       M_MB_5_1,       /* MacBook, 5th rev. */
+       M_MB_6_1,       /* MacBook, 6th rev. */
+       M_MB_7_1,       /* MacBook, 7th rev. */
        M_MB_SR,        /* MacBook, 2nd gen, (Santa Rosa) */
        M_MBA,          /* MacBook Air */
        M_MBP,          /* MacBook Pro */
        M_MBP_2,        /* MacBook Pro 2nd gen */
+       M_MBP_2_2,      /* MacBook Pro 2,2nd gen */
        M_MBP_SR,       /* MacBook Pro (Santa Rosa) */
        M_MBP_4,        /* MacBook Pro, 4th gen */
        M_MBP_5_1,    /* MacBook Pro, 5,1th gen */
+       M_MBP_5_2,      /* MacBook Pro, 5,2th gen */
+       M_MBP_5_3,      /* MacBook Pro, 5,3rd gen */
+       M_MBP_6_1,      /* MacBook Pro, 6,1th gen */
+       M_MBP_6_2,      /* MacBook Pro, 6,2th gen */
+       M_MBP_7_1,      /* MacBook Pro, 7,1th gen */
        M_UNKNOWN       /* placeholder */
 };
 
@@ -64,14 +78,28 @@ static struct efifb_dmi_info {
        [M_I20] = { "i20", 0x80010000, 1728 * 4, 1680, 1050 }, /* guess */
        [M_I20_SR] = { "imac7", 0x40010000, 1728 * 4, 1680, 1050 },
        [M_I24] = { "i24", 0x80010000, 2048 * 4, 1920, 1200 }, /* guess */
+       [M_I24_8_1] = { "imac8", 0xc0060000, 2048 * 4, 1920, 1200 },
+       [M_I24_10_1] = { "imac10", 0xc0010000, 2048 * 4, 1920, 1080 },
+       [M_I27_11_1] = { "imac11", 0xc0010000, 2560 * 4, 2560, 1440 },
        [M_MINI]= { "mini", 0x80000000, 2048 * 4, 1024, 768 },
+       [M_MINI_3_1] = { "mini31", 0x40010000, 1024 * 4, 1024, 768 },
+       [M_MINI_4_1] = { "mini41", 0xc0010000, 2048 * 4, 1920, 1200 },
        [M_MB] = { "macbook", 0x80000000, 2048 * 4, 1280, 800 },
+       [M_MB_5_1] = { "macbook51", 0x80010000, 2048 * 4, 1280, 800 },
+       [M_MB_6_1] = { "macbook61", 0x80010000, 2048 * 4, 1280, 800 },
+       [M_MB_7_1] = { "macbook71", 0x80010000, 2048 * 4, 1280, 800 },
        [M_MBA] = { "mba", 0x80000000, 2048 * 4, 1280, 800 },
        [M_MBP] = { "mbp", 0x80010000, 1472 * 4, 1440, 900 },
        [M_MBP_2] = { "mbp2", 0, 0, 0, 0 }, /* placeholder */
+       [M_MBP_2_2] = { "mbp22", 0x80010000, 1472 * 4, 1440, 900 },
        [M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900 },
        [M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200 },
        [M_MBP_5_1] = { "mbp51", 0xc0010000, 2048 * 4, 1440, 900 },
+       [M_MBP_5_2] = { "mbp52", 0xc0010000, 2048 * 4, 1920, 1200 },
+       [M_MBP_5_3] = { "mbp53", 0xd0010000, 2048 * 4, 1440, 900 },
+       [M_MBP_6_1] = { "mbp61", 0x90030000, 2048 * 4, 1920, 1200 },
+       [M_MBP_6_2] = { "mbp62", 0x90030000, 2048 * 4, 1680, 1050 },
+       [M_MBP_7_1] = { "mbp71", 0xc0010000, 2048 * 4, 1280, 800 },
        [M_UNKNOWN] = { NULL, 0, 0, 0, 0 }
 };
 
@@ -92,7 +120,12 @@ static const struct dmi_system_id dmi_system_table[] __initconst = {
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac6,1", M_I24),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac6,1", M_I24),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac7,1", M_I20_SR),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac8,1", M_I24_8_1),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac10,1", M_I24_10_1),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac11,1", M_I27_11_1),
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "Macmini1,1", M_MINI),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini3,1", M_MINI_3_1),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini4,1", M_MINI_4_1),
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook1,1", M_MB),
        /* At least one of these two will be right; maybe both? */
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook2,1", M_MB),
@@ -101,14 +134,23 @@ static const struct dmi_system_id dmi_system_table[] __initconst = {
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook3,1", M_MB),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook3,1", M_MB),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook4,1", M_MB),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook5,1", M_MB_5_1),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook6,1", M_MB_6_1),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook7,1", M_MB_7_1),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookAir1,1", M_MBA),
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro1,1", M_MBP),
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,1", M_MBP_2),
+       EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,2", M_MBP_2_2),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro2,1", M_MBP_2),
        EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4),
        EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,1", M_MBP_5_1),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,2", M_MBP_5_2),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,3", M_MBP_5_3),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,1", M_MBP_6_1),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,2", M_MBP_6_2),
+       EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro7,1", M_MBP_7_1),
        {},
 };
 
@@ -116,7 +158,7 @@ static int set_system(const struct dmi_system_id *id)
 {
        struct efifb_dmi_info *info = id->driver_data;
        if (info->base == 0)
-               return -ENODEV;
+               return 0;
 
        printk(KERN_INFO "efifb: dmi detected %s - framebuffer at %p "
                         "(%dx%d, stride %d)\n", id->ident,
@@ -124,18 +166,55 @@ static int set_system(const struct dmi_system_id *id)
                         info->stride);
 
        /* Trust the bootloader over the DMI tables */
-       if (screen_info.lfb_base == 0)
+       if (screen_info.lfb_base == 0) {
+#if defined(CONFIG_PCI)
+               struct pci_dev *dev = NULL;
+               int found_bar = 0;
+#endif
                screen_info.lfb_base = info->base;
-       if (screen_info.lfb_linelength == 0)
-               screen_info.lfb_linelength = info->stride;
-       if (screen_info.lfb_width == 0)
-               screen_info.lfb_width = info->width;
-       if (screen_info.lfb_height == 0)
-               screen_info.lfb_height = info->height;
-       if (screen_info.orig_video_isVGA == 0)
-               screen_info.orig_video_isVGA = VIDEO_TYPE_EFI;
 
-       return 0;
+#if defined(CONFIG_PCI)
+               /* make sure that the address in the table is actually on a
+                * VGA device's PCI BAR */
+
+               for_each_pci_dev(dev) {
+                       int i;
+                       if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
+                               continue;
+                       for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+                               resource_size_t start, end;
+
+                               start = pci_resource_start(dev, i);
+                               if (start == 0)
+                                       break;
+                               end = pci_resource_end(dev, i);
+                               if (screen_info.lfb_base >= start &&
+                                               screen_info.lfb_base < end) {
+                                       found_bar = 1;
+                               }
+                       }
+               }
+               if (!found_bar)
+                       screen_info.lfb_base = 0;
+#endif
+       }
+       if (screen_info.lfb_base) {
+               if (screen_info.lfb_linelength == 0)
+                       screen_info.lfb_linelength = info->stride;
+               if (screen_info.lfb_width == 0)
+                       screen_info.lfb_width = info->width;
+               if (screen_info.lfb_height == 0)
+                       screen_info.lfb_height = info->height;
+               if (screen_info.orig_video_isVGA == 0)
+                       screen_info.orig_video_isVGA = VIDEO_TYPE_EFI;
+       } else {
+               screen_info.lfb_linelength = 0;
+               screen_info.lfb_width = 0;
+               screen_info.lfb_height = 0;
+               screen_info.orig_video_isVGA = 0;
+               return 0;
+       }
+       return 1;
 }
 
 static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
index c91a7f70f7b086f56882ba07f97ba38e314adfd6..a31a77ff6f3d2a3ac2daa56baeb6ae55430f6177 100644 (file)
@@ -298,8 +298,8 @@ static void set_dma_control0(struct pxa168fb_info *fbi)
         * Set bit to enable graphics DMA.
         */
        x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0);
-       x |= fbi->active ? 0x00000100 : 0;
-       fbi->active = 0;
+       x &= ~CFG_GRA_ENA_MASK;
+       x |= fbi->active ? CFG_GRA_ENA(1) : CFG_GRA_ENA(0);
 
        /*
         * If we are in a pseudo-color mode, we need to enable
@@ -559,7 +559,7 @@ static struct fb_ops pxa168fb_ops = {
        .fb_imageblit   = cfb_imageblit,
 };
 
-static int __init pxa168fb_init_mode(struct fb_info *info,
+static int __devinit pxa168fb_init_mode(struct fb_info *info,
                              struct pxa168fb_mach_info *mi)
 {
        struct pxa168fb_info *fbi = info->par;
@@ -599,7 +599,7 @@ static int __init pxa168fb_init_mode(struct fb_info *info,
        return ret;
 }
 
-static int __init pxa168fb_probe(struct platform_device *pdev)
+static int __devinit pxa168fb_probe(struct platform_device *pdev)
 {
        struct pxa168fb_mach_info *mi;
        struct fb_info *info = 0;
@@ -792,7 +792,7 @@ static struct platform_driver pxa168fb_driver = {
        .probe          = pxa168fb_probe,
 };
 
-static int __devinit pxa168fb_init(void)
+static int __init pxa168fb_init(void)
 {
        return platform_driver_register(&pxa168fb_driver);
 }
index 559bf1727a2b8f252a193d6fac276aed56124428..b52f8e4ef1fdbe3cd19c70d4fb282d15f5b99f85 100644 (file)
@@ -1701,6 +1701,9 @@ static int        sisfb_ioctl(struct fb_info *info, unsigned int cmd,
                break;
 
           case FBIOGET_VBLANK:
+
+               memset(&sisvbblank, 0, sizeof(struct fb_vblank));
+
                sisvbblank.count = 0;
                sisvbblank.flags = sisfb_setupvbblankflags(ivideo, &sisvbblank.vcount, &sisvbblank.hcount);
 
index da03c074e32aad8b909c4b11c57a4c5a9f9d9285..4d553d0b8d7a450b9337a9b4375856c5b304217c 100644 (file)
@@ -25,6 +25,8 @@ int viafb_ioctl_get_viafb_info(u_long arg)
 {
        struct viafb_ioctl_info viainfo;
 
+       memset(&viainfo, 0, sizeof(struct viafb_ioctl_info));
+
        viainfo.viafb_id = VIAID;
        viainfo.vendor_id = PCI_VIA_VENDOR_ID;
 
index b036677df8c445420906c722d00611dedcaf8b0a..24efd8ea41bb04bab6830cfddfd2281881ec9ae7 100644 (file)
@@ -213,11 +213,11 @@ config OMAP_WATCHDOG
          here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog timer.
 
 config PNX4008_WATCHDOG
-       tristate "PNX4008 Watchdog"
-       depends on ARCH_PNX4008
+       tristate "PNX4008 and LPC32XX Watchdog"
+       depends on ARCH_PNX4008 || ARCH_LPC32XX
        help
          Say Y here if to include support for the watchdog timer
-         in the PNX4008 processor.
+         in the PNX4008 or LPC32XX processor.
          This driver can be built as a module by choosing M. The module
          will be called pnx4008_wdt.
 
index 88c83aa5730318b512d86a7a048dc60a9758de46..f31493e65b380cd63fef6d55132dd96e7af32d63 100644 (file)
@@ -305,7 +305,7 @@ static int __init sbwdog_init(void)
        if (ret) {
                printk(KERN_ERR "%s: failed to request irq 1 - %d\n",
                                                ident.identity, ret);
-               return ret;
+               goto out;
        }
 
        ret = misc_register(&sbwdog_miscdev);
@@ -313,14 +313,20 @@ static int __init sbwdog_init(void)
                printk(KERN_INFO "%s: timeout is %ld.%ld secs\n",
                                ident.identity,
                                timeout / 1000000, (timeout / 100000) % 10);
-       } else
-               free_irq(1, (void *)user_dog);
+               return 0;
+       }
+       free_irq(1, (void *)user_dog);
+out:
+       unregister_reboot_notifier(&sbwdog_notifier);
+
        return ret;
 }
 
 static void __exit sbwdog_exit(void)
 {
        misc_deregister(&sbwdog_miscdev);
+       free_irq(1, (void *)user_dog);
+       unregister_reboot_notifier(&sbwdog_notifier);
 }
 
 module_init(sbwdog_init);
index 458c499c1223c6f73aaab93f9a6bdc7c816de5a2..18cdeb4c4258a67ccdba77fc517596eaa71819b9 100644 (file)
@@ -449,6 +449,9 @@ static __devinit int ts72xx_wdt_probe(struct platform_device *pdev)
        wdt->pdev = pdev;
        mutex_init(&wdt->lock);
 
+       /* make sure that the watchdog is disabled */
+       ts72xx_wdt_stop(wdt);
+
        error = misc_register(&ts72xx_wdt_miscdev);
        if (error) {
                dev_err(&pdev->dev, "failed to register miscdev\n");
index 29bac5118877ef2028a781e6b409e2fe36463a99..d409495876f11b24fabaebc01d57f02224fe459f 100644 (file)
@@ -755,7 +755,10 @@ int register_xenstore_notifier(struct notifier_block *nb)
 {
        int ret = 0;
 
-       blocking_notifier_chain_register(&xenstore_chain, nb);
+       if (xenstored_ready > 0)
+               ret = nb->notifier_call(nb, 0, NULL);
+       else
+               blocking_notifier_chain_register(&xenstore_chain, nb);
 
        return ret;
 }
@@ -769,7 +772,7 @@ EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
 
 void xenbus_probe(struct work_struct *unused)
 {
-       BUG_ON((xenstored_ready <= 0));
+       xenstored_ready = 1;
 
        /* Enumerate devices in xenstore and watch for changes. */
        xenbus_probe_devices(&xenbus_frontend);
@@ -835,8 +838,8 @@ static int __init xenbus_init(void)
                        xen_store_evtchn = xen_start_info->store_evtchn;
                        xen_store_mfn = xen_start_info->store_mfn;
                        xen_store_interface = mfn_to_virt(xen_store_mfn);
+                       xenstored_ready = 1;
                }
-               xenstored_ready = 1;
        }
 
        /* Initialize the interface to xenstore. */
index 16c8a2a98c1bb6b93fbc8634cb1986cf555bad0e..899f168fd19cc4d4aca48e6d26e51aa08b34db59 100644 (file)
@@ -292,9 +292,11 @@ int v9fs_dir_release(struct inode *inode, struct file *filp)
 
        fid = filp->private_data;
        P9_DPRINTK(P9_DEBUG_VFS,
-                       "inode: %p filp: %p fid: %d\n", inode, filp, fid->fid);
+                       "v9fs_dir_release: inode: %p filp: %p fid: %d\n",
+                       inode, filp, fid ? fid->fid : -1);
        filemap_write_and_wait(inode->i_mapping);
-       p9_client_clunk(fid);
+       if (fid)
+               p9_client_clunk(fid);
        return 0;
 }
 
index c7c23eab94403468d161eac3bd254dfde1e27610..9e670d527646fc4abe2be6f0dfc992f4a3178042 100644 (file)
@@ -730,7 +730,10 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int mode,
                P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
                goto error;
        }
-       dentry->d_op = &v9fs_cached_dentry_operations;
+       if (v9ses->cache)
+               dentry->d_op = &v9fs_cached_dentry_operations;
+       else
+               dentry->d_op = &v9fs_dentry_operations;
        d_instantiate(dentry, inode);
        err = v9fs_fid_add(dentry, fid);
        if (err < 0)
@@ -1128,6 +1131,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
        v9fs_stat2inode(st, dentry->d_inode, dentry->d_inode->i_sb);
                generic_fillattr(dentry->d_inode, stat);
 
+       p9stat_free(st);
        kfree(st);
        return 0;
 }
@@ -1489,6 +1493,7 @@ static int v9fs_readlink(struct dentry *dentry, char *buffer, int buflen)
 
        retval = strnlen(buffer, buflen);
 done:
+       p9stat_free(st);
        kfree(st);
        return retval;
 }
@@ -1942,7 +1947,7 @@ static const struct inode_operations v9fs_dir_inode_operations_dotu = {
        .unlink = v9fs_vfs_unlink,
        .mkdir = v9fs_vfs_mkdir,
        .rmdir = v9fs_vfs_rmdir,
-       .mknod = v9fs_vfs_mknod_dotl,
+       .mknod = v9fs_vfs_mknod,
        .rename = v9fs_vfs_rename,
        .getattr = v9fs_vfs_getattr,
        .setattr = v9fs_vfs_setattr,
index f9311077de6842091df9f257e3e6d91c641a0622..1d12ba0ed3db52fa55e2e6ff4aa48ade2cae1a88 100644 (file)
@@ -122,6 +122,10 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
        fid = v9fs_session_init(v9ses, dev_name, data);
        if (IS_ERR(fid)) {
                retval = PTR_ERR(fid);
+               /*
+                * we need to call session_close to tear down some
+                * of the data structure setup by session_init
+                */
                goto close_session;
        }
 
@@ -144,7 +148,6 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
                retval = -ENOMEM;
                goto release_sb;
        }
-
        sb->s_root = root;
 
        if (v9fs_proto_dotl(v9ses)) {
@@ -152,7 +155,7 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
                st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
                if (IS_ERR(st)) {
                        retval = PTR_ERR(st);
-                       goto clunk_fid;
+                       goto release_sb;
                }
 
                v9fs_stat2inode_dotl(st, root->d_inode);
@@ -162,7 +165,7 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
                st = p9_client_stat(fid);
                if (IS_ERR(st)) {
                        retval = PTR_ERR(st);
-                       goto clunk_fid;
+                       goto release_sb;
                }
 
                root->d_inode->i_ino = v9fs_qid2ino(&st->qid);
@@ -174,19 +177,24 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
 
        v9fs_fid_add(root, fid);
 
-P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n");
+       P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n");
        simple_set_mnt(mnt, sb);
        return 0;
 
 clunk_fid:
        p9_client_clunk(fid);
-
 close_session:
        v9fs_session_close(v9ses);
        kfree(v9ses);
        return retval;
-
 release_sb:
+       /*
+        * we will do the session_close and root dentry release
+        * in the below call. But we need to clunk fid, because we haven't
+        * attached the fid to dentry so it won't get clunked
+        * automatically.
+        */
+       p9_client_clunk(fid);
        deactivate_locked_super(sb);
        return retval;
 }
index 3006b5bc33d697a2f773b2d81016fda57ee5ff68..250b0a73c8a8ca92b78c3a0282d2425ce7649dcf 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -712,8 +712,16 @@ static ssize_t aio_run_iocb(struct kiocb *iocb)
         */
        ret = retry(iocb);
 
-       if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED)
+       if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) {
+               /*
+                * There's no easy way to restart the syscall since other AIO's
+                * may be already running. Just fail this IO with EINTR.
+                */
+               if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR ||
+                            ret == -ERESTARTNOHAND || ret == -ERESTART_RESTARTBLOCK))
+                       ret = -EINTR;
                aio_complete(iocb, ret, 0);
+       }
 out:
        spin_lock_irq(&ctx->ctx_lock);
 
@@ -1659,6 +1667,9 @@ long do_io_submit(aio_context_t ctx_id, long nr,
        if (unlikely(nr < 0))
                return -EINVAL;
 
+       if (unlikely(nr > LONG_MAX/sizeof(*iocbpp)))
+               nr = LONG_MAX/sizeof(*iocbpp);
+
        if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp)))))
                return -EFAULT;
 
index a7528b91393676bb1f1affa72f6b78f38206d4c5..fd0cc0bf9a40396a150ad77b69bf5284531d6125 100644 (file)
@@ -724,7 +724,7 @@ static int __init init_misc_binfmt(void)
 {
        int err = register_filesystem(&bm_fs_type);
        if (!err) {
-               err = register_binfmt(&misc_format);
+               err = insert_binfmt(&misc_format);
                if (err)
                        unregister_filesystem(&bm_fs_type);
        }
index 612a5c38d3c1a5fc49e0d0990e19550c27217650..4d0ff5ee27b86bef6d377b9211694941939999a9 100644 (file)
@@ -413,10 +413,10 @@ int bio_integrity_prep(struct bio *bio)
 
        /* Allocate kernel buffer for protection data */
        len = sectors * blk_integrity_tuple_size(bi);
-       buf = kmalloc(len, GFP_NOIO | __GFP_NOFAIL | q->bounce_gfp);
+       buf = kmalloc(len, GFP_NOIO | q->bounce_gfp);
        if (unlikely(buf == NULL)) {
                printk(KERN_ERR "could not allocate integrity buffer\n");
-               return -EIO;
+               return -ENOMEM;
        }
 
        end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
index bc87b9c1d27ea8e253f5a1b9b395a287ede4106e..0fcd2640c23fdda2c7fd99415b730837c591eba8 100644 (file)
@@ -3,6 +3,7 @@ config CEPH_FS
        depends on INET && EXPERIMENTAL
        select LIBCRC32C
        select CRYPTO_AES
+       select CRYPTO
        help
          Choose Y or M here to include support for mounting the
          experimental Ceph distributed file system.  Ceph is an extremely
index 4cfce1ee31faaf4f2f6aab966acd5d6001753940..efbc604001c8bbfa3b96eb107529f0f03256b1a1 100644 (file)
@@ -411,8 +411,8 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
        if (i_size < page_off + len)
                len = i_size - page_off;
 
-       dout("writepage %p page %p index %lu on %llu~%u\n",
-            inode, page, page->index, page_off, len);
+       dout("writepage %p page %p index %lu on %llu~%u snapc %p\n",
+            inode, page, page->index, page_off, len, snapc);
 
        writeback_stat = atomic_long_inc_return(&client->writeback_count);
        if (writeback_stat >
@@ -766,7 +766,8 @@ get_more_pages:
                        /* ok */
                        if (locked_pages == 0) {
                                /* prepare async write request */
-                               offset = page->index << PAGE_CACHE_SHIFT;
+                               offset = (unsigned long long)page->index
+                                       << PAGE_CACHE_SHIFT;
                                len = wsize;
                                req = ceph_osdc_new_request(&client->osdc,
                                            &ci->i_layout,
index a2069b6680aed83eb0be0af7c584b4a619ae239a..73c153092f7292f20616108a3f2e61cc0630659e 100644 (file)
@@ -814,7 +814,7 @@ int __ceph_caps_used(struct ceph_inode_info *ci)
                used |= CEPH_CAP_PIN;
        if (ci->i_rd_ref)
                used |= CEPH_CAP_FILE_RD;
-       if (ci->i_rdcache_ref || ci->i_rdcache_gen)
+       if (ci->i_rdcache_ref || ci->vfs_inode.i_data.nrpages)
                used |= CEPH_CAP_FILE_CACHE;
        if (ci->i_wr_ref)
                used |= CEPH_CAP_FILE_WR;
@@ -1195,10 +1195,14 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
  * asynchronously back to the MDS once sync writes complete and dirty
  * data is written out.
  *
+ * Unless @again is true, skip cap_snaps that were already sent to
+ * the MDS (i.e., during this session).
+ *
  * Called under i_lock.  Takes s_mutex as needed.
  */
 void __ceph_flush_snaps(struct ceph_inode_info *ci,
-                       struct ceph_mds_session **psession)
+                       struct ceph_mds_session **psession,
+                       int again)
                __releases(ci->vfs_inode->i_lock)
                __acquires(ci->vfs_inode->i_lock)
 {
@@ -1227,7 +1231,7 @@ retry:
                 * pages to be written out.
                 */
                if (capsnap->dirty_pages || capsnap->writing)
-                       continue;
+                       break;
 
                /*
                 * if cap writeback already occurred, we should have dropped
@@ -1240,6 +1244,13 @@ retry:
                        dout("no auth cap (migrating?), doing nothing\n");
                        goto out;
                }
+
+               /* only flush each capsnap once */
+               if (!again && !list_empty(&capsnap->flushing_item)) {
+                       dout("already flushed %p, skipping\n", capsnap);
+                       continue;
+               }
+
                mds = ci->i_auth_cap->session->s_mds;
                mseq = ci->i_auth_cap->mseq;
 
@@ -1276,8 +1287,8 @@ retry:
                              &session->s_cap_snaps_flushing);
                spin_unlock(&inode->i_lock);
 
-               dout("flush_snaps %p cap_snap %p follows %lld size %llu\n",
-                    inode, capsnap, next_follows, capsnap->size);
+               dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n",
+                    inode, capsnap, capsnap->follows, capsnap->flush_tid);
                send_cap_msg(session, ceph_vino(inode).ino, 0,
                             CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0,
                             capsnap->dirty, 0, capsnap->flush_tid, 0, mseq,
@@ -1314,7 +1325,7 @@ static void ceph_flush_snaps(struct ceph_inode_info *ci)
        struct inode *inode = &ci->vfs_inode;
 
        spin_lock(&inode->i_lock);
-       __ceph_flush_snaps(ci, NULL);
+       __ceph_flush_snaps(ci, NULL, 0);
        spin_unlock(&inode->i_lock);
 }
 
@@ -1477,7 +1488,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
 
        /* flush snaps first time around only */
        if (!list_empty(&ci->i_cap_snaps))
-               __ceph_flush_snaps(ci, &session);
+               __ceph_flush_snaps(ci, &session, 0);
        goto retry_locked;
 retry:
        spin_lock(&inode->i_lock);
@@ -1894,7 +1905,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
                if (cap && cap->session == session) {
                        dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
                             cap, capsnap);
-                       __ceph_flush_snaps(ci, &session);
+                       __ceph_flush_snaps(ci, &session, 1);
                } else {
                        pr_err("%p auth cap %p not mds%d ???\n", inode,
                               cap, session->s_mds);
index 6e4f43ff23ec587050eab1b0e735e8d519827c85..a1986eb52045b8184d6ab31e46b8a2497295e9ba 100644 (file)
@@ -1021,11 +1021,15 @@ out_touch:
 static void ceph_dentry_release(struct dentry *dentry)
 {
        struct ceph_dentry_info *di = ceph_dentry(dentry);
-       struct inode *parent_inode = dentry->d_parent->d_inode;
-       u64 snapid = ceph_snap(parent_inode);
+       struct inode *parent_inode = NULL;
+       u64 snapid = CEPH_NOSNAP;
 
+       if (!IS_ROOT(dentry)) {
+               parent_inode = dentry->d_parent->d_inode;
+               if (parent_inode)
+                       snapid = ceph_snap(parent_inode);
+       }
        dout("dentry_release %p parent %p\n", dentry, parent_inode);
-
        if (parent_inode && snapid != CEPH_SNAPDIR) {
                struct ceph_inode_info *ci = ceph_inode(parent_inode);
 
index e7cca414da03bcbd7549889a5ecb00d05ee11901..62377ec37edf05c14cf8567c0d22e19e02762253 100644 (file)
@@ -845,7 +845,7 @@ static void ceph_set_dentry_offset(struct dentry *dn)
  * the caller) if we fail.
  */
 static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
-                                   bool *prehash)
+                                   bool *prehash, bool set_offset)
 {
        struct dentry *realdn;
 
@@ -877,7 +877,8 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
        }
        if ((!prehash || *prehash) && d_unhashed(dn))
                d_rehash(dn);
-       ceph_set_dentry_offset(dn);
+       if (set_offset)
+               ceph_set_dentry_offset(dn);
 out:
        return dn;
 }
@@ -1062,7 +1063,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
                                d_delete(dn);
                                goto done;
                        }
-                       dn = splice_dentry(dn, in, &have_lease);
+                       dn = splice_dentry(dn, in, &have_lease, true);
                        if (IS_ERR(dn)) {
                                err = PTR_ERR(dn);
                                goto done;
@@ -1105,7 +1106,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
                        goto done;
                }
                dout(" linking snapped dir %p to dn %p\n", in, dn);
-               dn = splice_dentry(dn, in, NULL);
+               dn = splice_dentry(dn, in, NULL, true);
                if (IS_ERR(dn)) {
                        err = PTR_ERR(dn);
                        goto done;
@@ -1237,7 +1238,7 @@ retry_lookup:
                                err = PTR_ERR(in);
                                goto out;
                        }
-                       dn = splice_dentry(dn, in, NULL);
+                       dn = splice_dentry(dn, in, NULL, false);
                        if (IS_ERR(dn))
                                dn = NULL;
                }
index f091b1351786368de18757d8cb262a19d1006bf1..fad95f8f2608bc4e86c0876bedd81cb5be46a4ef 100644 (file)
@@ -2374,6 +2374,8 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
                                                num_fcntl_locks,
                                                num_flock_locks);
                unlock_kernel();
+       } else {
+               err = ceph_pagelist_append(pagelist, &rec, reclen);
        }
 
 out_free:
index b6859f47d364ba2134121d48fa6cc720f3b6b1a4..46a368b6dce5378f3b7a486b60700c52f65a809b 100644 (file)
@@ -5,10 +5,18 @@
 
 #include "pagelist.h"
 
+static void ceph_pagelist_unmap_tail(struct ceph_pagelist *pl)
+{
+       struct page *page = list_entry(pl->head.prev, struct page,
+                                      lru);
+       kunmap(page);
+}
+
 int ceph_pagelist_release(struct ceph_pagelist *pl)
 {
        if (pl->mapped_tail)
-               kunmap(pl->mapped_tail);
+               ceph_pagelist_unmap_tail(pl);
+
        while (!list_empty(&pl->head)) {
                struct page *page = list_first_entry(&pl->head, struct page,
                                                     lru);
@@ -26,7 +34,7 @@ static int ceph_pagelist_addpage(struct ceph_pagelist *pl)
        pl->room += PAGE_SIZE;
        list_add_tail(&page->lru, &pl->head);
        if (pl->mapped_tail)
-               kunmap(pl->mapped_tail);
+               ceph_pagelist_unmap_tail(pl);
        pl->mapped_tail = kmap(page);
        return 0;
 }
index 4868b9dcac5a6cc7a4d00610780572f335ef68f2..190b6c4a6f2b91aace658f8de7664f75cc0bdcc4 100644 (file)
@@ -119,6 +119,7 @@ static struct ceph_snap_realm *ceph_create_snap_realm(
        INIT_LIST_HEAD(&realm->children);
        INIT_LIST_HEAD(&realm->child_item);
        INIT_LIST_HEAD(&realm->empty_item);
+       INIT_LIST_HEAD(&realm->dirty_item);
        INIT_LIST_HEAD(&realm->inodes_with_caps);
        spin_lock_init(&realm->inodes_with_caps_lock);
        __insert_snap_realm(&mdsc->snap_realms, realm);
@@ -467,7 +468,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
                INIT_LIST_HEAD(&capsnap->ci_item);
                INIT_LIST_HEAD(&capsnap->flushing_item);
 
-               capsnap->follows = snapc->seq - 1;
+               capsnap->follows = snapc->seq;
                capsnap->issued = __ceph_caps_issued(ci, NULL);
                capsnap->dirty = dirty;
 
@@ -604,6 +605,7 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
        struct ceph_snap_realm *realm;
        int invalidate = 0;
        int err = -ENOMEM;
+       LIST_HEAD(dirty_realms);
 
        dout("update_snap_trace deletion=%d\n", deletion);
 more:
@@ -626,24 +628,6 @@ more:
                }
        }
 
-       if (le64_to_cpu(ri->seq) > realm->seq) {
-               dout("update_snap_trace updating %llx %p %lld -> %lld\n",
-                    realm->ino, realm, realm->seq, le64_to_cpu(ri->seq));
-               /*
-                * if the realm seq has changed, queue a cap_snap for every
-                * inode with open caps.  we do this _before_ we update
-                * the realm info so that we prepare for writeback under the
-                * _previous_ snap context.
-                *
-                * ...unless it's a snap deletion!
-                */
-               if (!deletion)
-                       queue_realm_cap_snaps(realm);
-       } else {
-               dout("update_snap_trace %llx %p seq %lld unchanged\n",
-                    realm->ino, realm, realm->seq);
-       }
-
        /* ensure the parent is correct */
        err = adjust_snap_realm_parent(mdsc, realm, le64_to_cpu(ri->parent));
        if (err < 0)
@@ -651,6 +635,8 @@ more:
        invalidate += err;
 
        if (le64_to_cpu(ri->seq) > realm->seq) {
+               dout("update_snap_trace updating %llx %p %lld -> %lld\n",
+                    realm->ino, realm, realm->seq, le64_to_cpu(ri->seq));
                /* update realm parameters, snap lists */
                realm->seq = le64_to_cpu(ri->seq);
                realm->created = le64_to_cpu(ri->created);
@@ -668,9 +654,17 @@ more:
                if (err < 0)
                        goto fail;
 
+               /* queue realm for cap_snap creation */
+               list_add(&realm->dirty_item, &dirty_realms);
+
                invalidate = 1;
        } else if (!realm->cached_context) {
+               dout("update_snap_trace %llx %p seq %lld new\n",
+                    realm->ino, realm, realm->seq);
                invalidate = 1;
+       } else {
+               dout("update_snap_trace %llx %p seq %lld unchanged\n",
+                    realm->ino, realm, realm->seq);
        }
 
        dout("done with %llx %p, invalidated=%d, %p %p\n", realm->ino,
@@ -683,6 +677,14 @@ more:
        if (invalidate)
                rebuild_snap_realms(realm);
 
+       /*
+        * queue cap snaps _after_ we've built the new snap contexts,
+        * so that i_head_snapc can be set appropriately.
+        */
+       list_for_each_entry(realm, &dirty_realms, dirty_item) {
+               queue_realm_cap_snaps(realm);
+       }
+
        __cleanup_empty_realms(mdsc);
        return 0;
 
@@ -715,7 +717,7 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
                igrab(inode);
                spin_unlock(&mdsc->snap_flush_lock);
                spin_lock(&inode->i_lock);
-               __ceph_flush_snaps(ci, &session);
+               __ceph_flush_snaps(ci, &session, 0);
                spin_unlock(&inode->i_lock);
                iput(inode);
                spin_lock(&mdsc->snap_flush_lock);
@@ -816,6 +818,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
                        };
                        struct inode *inode = ceph_find_inode(sb, vino);
                        struct ceph_inode_info *ci;
+                       struct ceph_snap_realm *oldrealm;
 
                        if (!inode)
                                continue;
@@ -841,18 +844,19 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
                        dout(" will move %p to split realm %llx %p\n",
                             inode, realm->ino, realm);
                        /*
-                        * Remove the inode from the realm's inode
-                        * list, but don't add it to the new realm
-                        * yet.  We don't want the cap_snap to be
-                        * queued (again) by ceph_update_snap_trace()
-                        * below.  Queue it _now_, under the old context.
+                        * Move the inode to the new realm
                         */
                        spin_lock(&realm->inodes_with_caps_lock);
                        list_del_init(&ci->i_snap_realm_item);
+                       list_add(&ci->i_snap_realm_item,
+                                &realm->inodes_with_caps);
+                       oldrealm = ci->i_snap_realm;
+                       ci->i_snap_realm = realm;
                        spin_unlock(&realm->inodes_with_caps_lock);
                        spin_unlock(&inode->i_lock);
 
-                       ceph_queue_cap_snap(ci);
+                       ceph_get_snap_realm(mdsc, realm);
+                       ceph_put_snap_realm(mdsc, oldrealm);
 
                        iput(inode);
                        continue;
@@ -880,43 +884,9 @@ skip_inode:
        ceph_update_snap_trace(mdsc, p, e,
                               op == CEPH_SNAP_OP_DESTROY);
 
-       if (op == CEPH_SNAP_OP_SPLIT) {
-               /*
-                * ok, _now_ add the inodes into the new realm.
-                */
-               for (i = 0; i < num_split_inos; i++) {
-                       struct ceph_vino vino = {
-                               .ino = le64_to_cpu(split_inos[i]),
-                               .snap = CEPH_NOSNAP,
-                       };
-                       struct inode *inode = ceph_find_inode(sb, vino);
-                       struct ceph_inode_info *ci;
-
-                       if (!inode)
-                               continue;
-                       ci = ceph_inode(inode);
-                       spin_lock(&inode->i_lock);
-                       if (list_empty(&ci->i_snap_realm_item)) {
-                               struct ceph_snap_realm *oldrealm =
-                                       ci->i_snap_realm;
-
-                               dout(" moving %p to split realm %llx %p\n",
-                                    inode, realm->ino, realm);
-                               spin_lock(&realm->inodes_with_caps_lock);
-                               list_add(&ci->i_snap_realm_item,
-                                        &realm->inodes_with_caps);
-                               ci->i_snap_realm = realm;
-                               spin_unlock(&realm->inodes_with_caps_lock);
-                               ceph_get_snap_realm(mdsc, realm);
-                               ceph_put_snap_realm(mdsc, oldrealm);
-                       }
-                       spin_unlock(&inode->i_lock);
-                       iput(inode);
-               }
-
+       if (op == CEPH_SNAP_OP_SPLIT)
                /* we took a reference when we created the realm, above */
                ceph_put_snap_realm(mdsc, realm);
-       }
 
        __cleanup_empty_realms(mdsc);
 
index c33897ae5725e82ca269606b78d54214d8abf7af..b87638e84c4bc266e9b325f12e7fb98fc78e8986 100644 (file)
@@ -690,6 +690,8 @@ struct ceph_snap_realm {
 
        struct list_head empty_item;     /* if i have ref==0 */
 
+       struct list_head dirty_item;     /* if realm needs new context */
+
        /* the current set of snaps for this realm */
        struct ceph_snap_context *cached_context;
 
@@ -826,7 +828,8 @@ extern void ceph_put_cap_refs(struct ceph_inode_info *ci, int had);
 extern void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
                                       struct ceph_snap_context *snapc);
 extern void __ceph_flush_snaps(struct ceph_inode_info *ci,
-                              struct ceph_mds_session **psession);
+                              struct ceph_mds_session **psession,
+                              int again);
 extern void ceph_check_caps(struct ceph_inode_info *ci, int flags,
                            struct ceph_mds_session *session);
 extern void ceph_check_delayed_caps(struct ceph_mds_client *mdsc);
index f80a4f25123c3fa912daa1eb4080e69dab3d58fb..143d393881cbd86865063649dd9b27773a5dcf57 100644 (file)
@@ -40,7 +40,9 @@ struct backing_dev_info directly_mappable_cdev_bdi = {
 #endif
                /* permit direct mmap, for read, write or exec */
                BDI_CAP_MAP_DIRECT |
-               BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP),
+               BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP |
+               /* no writeback happens */
+               BDI_CAP_NO_ACCT_AND_WRITEBACK),
 };
 
 static struct kobj_map *cdev_map;
index 0da1debd499d1845420753f50ef7b7da6765c237..917b7d449bb2a6248c28c23284cf82d9c032f285 100644 (file)
@@ -2,8 +2,6 @@ config CIFS
        tristate "CIFS support (advanced network filesystem, SMBFS successor)"
        depends on INET
        select NLS
-       select CRYPTO_MD5
-       select CRYPTO_ARC4
        help
          This is the client VFS module for the Common Internet File System
          (CIFS) protocol which is the successor to the Server Message Block
index 21f0fbd86989a6811dd6323668f0836079f7e621..cfd1ce34e0bc7b8c4794c81e1aa937e7f009ca75 100644 (file)
@@ -597,13 +597,13 @@ decode_negTokenInit(unsigned char *security_blob, int length,
                                if (compare_oid(oid, oidlen, MSKRB5_OID,
                                                MSKRB5_OID_LEN))
                                        server->sec_mskerberos = true;
-                               if (compare_oid(oid, oidlen, KRB5U2U_OID,
+                               else if (compare_oid(oid, oidlen, KRB5U2U_OID,
                                                     KRB5U2U_OID_LEN))
                                        server->sec_kerberosu2u = true;
-                               if (compare_oid(oid, oidlen, KRB5_OID,
+                               else if (compare_oid(oid, oidlen, KRB5_OID,
                                                     KRB5_OID_LEN))
                                        server->sec_kerberos = true;
-                               if (compare_oid(oid, oidlen, NTLMSSP_OID,
+                               else if (compare_oid(oid, oidlen, NTLMSSP_OID,
                                                     NTLMSSP_OID_LEN))
                                        server->sec_ntlmssp = true;
 
index 709f2296bdb4930b2eee0dcb7dd341f94ca42e9b..35042d8f733865c77b31ee8d10dd097d4f0a16d8 100644 (file)
@@ -27,7 +27,6 @@
 #include "md5.h"
 #include "cifs_unicode.h"
 #include "cifsproto.h"
-#include "ntlmssp.h"
 #include <linux/ctype.h>
 #include <linux/random.h>
 
@@ -43,43 +42,21 @@ extern void SMBencrypt(unsigned char *passwd, const unsigned char *c8,
                       unsigned char *p24);
 
 static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu,
-                       struct TCP_Server_Info *server, char *signature)
+                                   const struct mac_key *key, char *signature)
 {
-       int rc;
+       struct  MD5Context context;
 
-       if (cifs_pdu == NULL || server == NULL || signature == NULL)
+       if ((cifs_pdu == NULL) || (signature == NULL) || (key == NULL))
                return -EINVAL;
 
-       if (!server->ntlmssp.sdescmd5) {
-               cERROR(1,
-                       "cifs_calculate_signature: can't generate signature\n");
-               return -1;
-       }
-
-       rc = crypto_shash_init(&server->ntlmssp.sdescmd5->shash);
-       if (rc) {
-               cERROR(1, "cifs_calculate_signature: oould not init md5\n");
-               return rc;
-       }
-
-       if (server->secType == RawNTLMSSP)
-               crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
-                       server->session_key.data.ntlmv2.key,
-                       CIFS_NTLMV2_SESSKEY_SIZE);
-       else
-               crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
-                       (char *)&server->session_key.data,
-                       server->session_key.len);
-
-       crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
-                       cifs_pdu->Protocol, cifs_pdu->smb_buf_length);
+       cifs_MD5_init(&context);
+       cifs_MD5_update(&context, (char *)&key->data, key->len);
+       cifs_MD5_update(&context, cifs_pdu->Protocol, cifs_pdu->smb_buf_length);
 
-       rc = crypto_shash_final(&server->ntlmssp.sdescmd5->shash, signature);
-
-       return rc;
+       cifs_MD5_final(signature, &context);
+       return 0;
 }
 
-
 int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
                  __u32 *pexpected_response_sequence_number)
 {
@@ -101,7 +78,8 @@ int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
        server->sequence_number++;
        spin_unlock(&GlobalMid_Lock);
 
-       rc = cifs_calculate_signature(cifs_pdu, server, smb_signature);
+       rc = cifs_calculate_signature(cifs_pdu, &server->mac_signing_key,
+                                     smb_signature);
        if (rc)
                memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
        else
@@ -111,39 +89,21 @@ int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
 }
 
 static int cifs_calc_signature2(const struct kvec *iov, int n_vec,
-                       struct TCP_Server_Info *server, char *signature)
+                               const struct mac_key *key, char *signature)
 {
+       struct  MD5Context context;
        int i;
-       int rc;
 
-       if (iov == NULL || server == NULL || signature == NULL)
+       if ((iov == NULL) || (signature == NULL) || (key == NULL))
                return -EINVAL;
 
-       if (!server->ntlmssp.sdescmd5) {
-               cERROR(1, "cifs_calc_signature2: can't generate signature\n");
-               return -1;
-       }
-
-       rc = crypto_shash_init(&server->ntlmssp.sdescmd5->shash);
-       if (rc) {
-               cERROR(1, "cifs_calc_signature2: oould not init md5\n");
-               return rc;
-       }
-
-       if (server->secType == RawNTLMSSP)
-               crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
-                       server->session_key.data.ntlmv2.key,
-                       CIFS_NTLMV2_SESSKEY_SIZE);
-       else
-               crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
-                       (char *)&server->session_key.data,
-                       server->session_key.len);
-
+       cifs_MD5_init(&context);
+       cifs_MD5_update(&context, (char *)&key->data, key->len);
        for (i = 0; i < n_vec; i++) {
                if (iov[i].iov_len == 0)
                        continue;
                if (iov[i].iov_base == NULL) {
-                       cERROR(1, "cifs_calc_signature2: null iovec entry");
+                       cERROR(1, "null iovec entry");
                        return -EIO;
                }
                /* The first entry includes a length field (which does not get
@@ -151,18 +111,18 @@ static int cifs_calc_signature2(const struct kvec *iov, int n_vec,
                if (i == 0) {
                        if (iov[0].iov_len <= 8) /* cmd field at offset 9 */
                                break; /* nothing to sign or corrupt header */
-                       crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
-                               iov[i].iov_base + 4, iov[i].iov_len - 4);
+                       cifs_MD5_update(&context, iov[0].iov_base+4,
+                                 iov[0].iov_len-4);
                } else
-                       crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
-                               iov[i].iov_base, iov[i].iov_len);
+                       cifs_MD5_update(&context, iov[i].iov_base, iov[i].iov_len);
        }
 
-       rc = crypto_shash_final(&server->ntlmssp.sdescmd5->shash, signature);
+       cifs_MD5_final(signature, &context);
 
-       return rc;
+       return 0;
 }
 
+
 int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
                   __u32 *pexpected_response_sequence_number)
 {
@@ -185,7 +145,8 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
        server->sequence_number++;
        spin_unlock(&GlobalMid_Lock);
 
-       rc = cifs_calc_signature2(iov, n_vec, server, smb_signature);
+       rc = cifs_calc_signature2(iov, n_vec, &server->mac_signing_key,
+                                     smb_signature);
        if (rc)
                memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
        else
@@ -195,14 +156,14 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
 }
 
 int cifs_verify_signature(struct smb_hdr *cifs_pdu,
-                         struct TCP_Server_Info *server,
+                         const struct mac_key *mac_key,
                          __u32 expected_sequence_number)
 {
-       int rc;
+       unsigned int rc;
        char server_response_sig[8];
        char what_we_think_sig_should_be[20];
 
-       if (cifs_pdu == NULL || server == NULL)
+       if ((cifs_pdu == NULL) || (mac_key == NULL))
                return -EINVAL;
 
        if (cifs_pdu->Command == SMB_COM_NEGOTIATE)
@@ -231,7 +192,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
                                        cpu_to_le32(expected_sequence_number);
        cifs_pdu->Signature.Sequence.Reserved = 0;
 
-       rc = cifs_calculate_signature(cifs_pdu, server,
+       rc = cifs_calculate_signature(cifs_pdu, mac_key,
                what_we_think_sig_should_be);
 
        if (rc)
@@ -248,7 +209,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
 }
 
 /* We fill in key by putting in 40 byte array which was allocated by caller */
-int cifs_calculate_session_key(struct session_key *key, const char *rn,
+int cifs_calculate_mac_key(struct mac_key *key, const char *rn,
                           const char *password)
 {
        char temp_key[16];
@@ -306,52 +267,38 @@ static int calc_ntlmv2_hash(struct cifsSesInfo *ses,
 {
        int rc = 0;
        int len;
-       char nt_hash[CIFS_NTHASH_SIZE];
+       char nt_hash[16];
+       struct HMACMD5Context *pctxt;
        wchar_t *user;
        wchar_t *domain;
-       wchar_t *server;
 
-       if (!ses->server->ntlmssp.sdeschmacmd5) {
-               cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n");
-               return -1;
-       }
+       pctxt = kmalloc(sizeof(struct HMACMD5Context), GFP_KERNEL);
+
+       if (pctxt == NULL)
+               return -ENOMEM;
 
        /* calculate md4 hash of password */
        E_md4hash(ses->password, nt_hash);
 
-       crypto_shash_setkey(ses->server->ntlmssp.hmacmd5, nt_hash,
-                               CIFS_NTHASH_SIZE);
-
-       rc = crypto_shash_init(&ses->server->ntlmssp.sdeschmacmd5->shash);
-       if (rc) {
-               cERROR(1, "calc_ntlmv2_hash: could not init hmacmd5\n");
-               return rc;
-       }
+       /* convert Domainname to unicode and uppercase */
+       hmac_md5_init_limK_to_64(nt_hash, 16, pctxt);
 
        /* convert ses->userName to unicode and uppercase */
        len = strlen(ses->userName);
        user = kmalloc(2 + (len * 2), GFP_KERNEL);
-       if (user == NULL) {
-               cERROR(1, "calc_ntlmv2_hash: user mem alloc failure\n");
-               rc = -ENOMEM;
+       if (user == NULL)
                goto calc_exit_2;
-       }
        len = cifs_strtoUCS((__le16 *)user, ses->userName, len, nls_cp);
        UniStrupr(user);
-
-       crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash,
-                               (char *)user, 2 * len);
+       hmac_md5_update((char *)user, 2*len, pctxt);
 
        /* convert ses->domainName to unicode and uppercase */
        if (ses->domainName) {
                len = strlen(ses->domainName);
 
                domain = kmalloc(2 + (len * 2), GFP_KERNEL);
-               if (domain == NULL) {
-                       cERROR(1, "calc_ntlmv2_hash: domain mem alloc failure");
-                       rc = -ENOMEM;
+               if (domain == NULL)
                        goto calc_exit_1;
-               }
                len = cifs_strtoUCS((__le16 *)domain, ses->domainName, len,
                                        nls_cp);
                /* the following line was removed since it didn't work well
@@ -359,292 +306,65 @@ static int calc_ntlmv2_hash(struct cifsSesInfo *ses,
                   Maybe converting the domain name earlier makes sense */
                /* UniStrupr(domain); */
 
-               crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash,
-                                       (char *)domain, 2 * len);
+               hmac_md5_update((char *)domain, 2*len, pctxt);
 
                kfree(domain);
-       } else if (ses->serverName) {
-               len = strlen(ses->serverName);
-
-               server = kmalloc(2 + (len * 2), GFP_KERNEL);
-               if (server == NULL) {
-                       cERROR(1, "calc_ntlmv2_hash: server mem alloc failure");
-                       rc = -ENOMEM;
-                       goto calc_exit_1;
-               }
-               len = cifs_strtoUCS((__le16 *)server, ses->serverName, len,
-                                       nls_cp);
-               /* the following line was removed since it didn't work well
-                  with lower cased domain name that passed as an option.
-                  Maybe converting the domain name earlier makes sense */
-               /* UniStrupr(domain); */
-
-               crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash,
-                                       (char *)server, 2 * len);
-
-               kfree(server);
        }
-
-       rc = crypto_shash_final(&ses->server->ntlmssp.sdeschmacmd5->shash,
-                                       ses->server->ntlmv2_hash);
-
 calc_exit_1:
        kfree(user);
 calc_exit_2:
        /* BB FIXME what about bytes 24 through 40 of the signing key?
           compare with the NTLM example */
+       hmac_md5_final(ses->server->ntlmv2_hash, pctxt);
 
+       kfree(pctxt);
        return rc;
 }
 
-static int
-find_domain_name(struct cifsSesInfo *ses)
-{
-       int rc = 0;
-       unsigned int attrsize;
-       unsigned int type;
-       unsigned char *blobptr;
-       struct ntlmssp2_name *attrptr;
-
-       if (ses->server->tiblob) {
-               blobptr = ses->server->tiblob;
-               attrptr = (struct ntlmssp2_name *) blobptr;
-
-               while ((type = attrptr->type) != 0) {
-                       blobptr += 2; /* advance attr type */
-                       attrsize = attrptr->length;
-                       blobptr += 2; /* advance attr size */
-                       if (type == NTLMSSP_AV_NB_DOMAIN_NAME) {
-                               if (!ses->domainName) {
-                                       ses->domainName =
-                                               kmalloc(attrptr->length + 1,
-                                                               GFP_KERNEL);
-                                       if (!ses->domainName)
-                                                       return -ENOMEM;
-                                       cifs_from_ucs2(ses->domainName,
-                                               (__le16 *)blobptr,
-                                               attrptr->length,
-                                               attrptr->length,
-                                               load_nls_default(), false);
-                               }
-                       }
-                       blobptr += attrsize; /* advance attr  value */
-                       attrptr = (struct ntlmssp2_name *) blobptr;
-               }
-       } else {
-               ses->server->tilen = 2 * sizeof(struct ntlmssp2_name);
-               ses->server->tiblob = kmalloc(ses->server->tilen, GFP_KERNEL);
-               if (!ses->server->tiblob) {
-                       ses->server->tilen = 0;
-                       cERROR(1, "Challenge target info allocation failure");
-                       return -ENOMEM;
-               }
-               memset(ses->server->tiblob, 0x0, ses->server->tilen);
-               attrptr = (struct ntlmssp2_name *) ses->server->tiblob;
-               attrptr->type = cpu_to_le16(NTLMSSP_DOMAIN_TYPE);
-       }
-
-       return rc;
-}
-
-static int
-CalcNTLMv2_response(const struct TCP_Server_Info *server,
-                        char *v2_session_response)
-{
-       int rc;
-
-       if (!server->ntlmssp.sdeschmacmd5) {
-               cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n");
-               return -1;
-       }
-
-       crypto_shash_setkey(server->ntlmssp.hmacmd5, server->ntlmv2_hash,
-               CIFS_HMAC_MD5_HASH_SIZE);
-
-       rc = crypto_shash_init(&server->ntlmssp.sdeschmacmd5->shash);
-       if (rc) {
-               cERROR(1, "CalcNTLMv2_response: could not init hmacmd5");
-               return rc;
-       }
-
-       memcpy(v2_session_response + CIFS_SERVER_CHALLENGE_SIZE,
-               server->cryptKey, CIFS_SERVER_CHALLENGE_SIZE);
-       crypto_shash_update(&server->ntlmssp.sdeschmacmd5->shash,
-               v2_session_response + CIFS_SERVER_CHALLENGE_SIZE,
-               sizeof(struct ntlmv2_resp) - CIFS_SERVER_CHALLENGE_SIZE);
-
-       if (server->tilen)
-               crypto_shash_update(&server->ntlmssp.sdeschmacmd5->shash,
-                                       server->tiblob, server->tilen);
-
-       rc = crypto_shash_final(&server->ntlmssp.sdeschmacmd5->shash,
-                                       v2_session_response);
-
-       return rc;
-}
-
-int
-setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf,
+void setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf,
                      const struct nls_table *nls_cp)
 {
-       int rc = 0;
+       int rc;
        struct ntlmv2_resp *buf = (struct ntlmv2_resp *)resp_buf;
+       struct HMACMD5Context context;
 
        buf->blob_signature = cpu_to_le32(0x00000101);
        buf->reserved = 0;
        buf->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
        get_random_bytes(&buf->client_chal, sizeof(buf->client_chal));
        buf->reserved2 = 0;
-
-       if (!ses->domainName) {
-               rc = find_domain_name(ses);
-               if (rc) {
-                       cERROR(1, "could not get domain/server name rc %d", rc);
-                       return rc;
-               }
-       }
+       buf->names[0].type = cpu_to_le16(NTLMSSP_DOMAIN_TYPE);
+       buf->names[0].length = 0;
+       buf->names[1].type = 0;
+       buf->names[1].length = 0;
 
        /* calculate buf->ntlmv2_hash */
        rc = calc_ntlmv2_hash(ses, nls_cp);
-       if (rc) {
-               cERROR(1, "could not get v2 hash rc %d", rc);
-               return rc;
-       }
-       rc = CalcNTLMv2_response(ses->server, resp_buf);
-       if (rc) {
+       if (rc)
                cERROR(1, "could not get v2 hash rc %d", rc);
-               return rc;
-       }
-
-       if (!ses->server->ntlmssp.sdeschmacmd5) {
-               cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n");
-               return -1;
-       }
-
-       crypto_shash_setkey(ses->server->ntlmssp.hmacmd5,
-                       ses->server->ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE);
+       CalcNTLMv2_response(ses, resp_buf);
 
-       rc = crypto_shash_init(&ses->server->ntlmssp.sdeschmacmd5->shash);
-       if (rc) {
-               cERROR(1, "setup_ntlmv2_rsp: could not init hmacmd5\n");
-               return rc;
-       }
+       /* now calculate the MAC key for NTLMv2 */
+       hmac_md5_init_limK_to_64(ses->server->ntlmv2_hash, 16, &context);
+       hmac_md5_update(resp_buf, 16, &context);
+       hmac_md5_final(ses->server->mac_signing_key.data.ntlmv2.key, &context);
 
-       crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash,
-                               resp_buf, CIFS_HMAC_MD5_HASH_SIZE);
-
-       rc = crypto_shash_final(&ses->server->ntlmssp.sdeschmacmd5->shash,
-               ses->server->session_key.data.ntlmv2.key);
-
-       memcpy(&ses->server->session_key.data.ntlmv2.resp, resp_buf,
-                       sizeof(struct ntlmv2_resp));
-       ses->server->session_key.len = 16 + sizeof(struct ntlmv2_resp);
-
-       return rc;
+       memcpy(&ses->server->mac_signing_key.data.ntlmv2.resp, resp_buf,
+              sizeof(struct ntlmv2_resp));
+       ses->server->mac_signing_key.len = 16 + sizeof(struct ntlmv2_resp);
 }
 
-int
-calc_seckey(struct TCP_Server_Info *server)
-{
-       int rc;
-       unsigned char sec_key[CIFS_NTLMV2_SESSKEY_SIZE];
-       struct crypto_blkcipher *tfm_arc4;
-       struct scatterlist sgin, sgout;
-       struct blkcipher_desc desc;
-
-       get_random_bytes(sec_key, CIFS_NTLMV2_SESSKEY_SIZE);
-
-       tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)",
-                                               0, CRYPTO_ALG_ASYNC);
-       if (!tfm_arc4 || IS_ERR(tfm_arc4)) {
-               cERROR(1, "could not allocate " "master crypto API arc4\n");
-               return 1;
-       }
-
-       desc.tfm = tfm_arc4;
-
-       crypto_blkcipher_setkey(tfm_arc4,
-               server->session_key.data.ntlmv2.key, CIFS_CPHTXT_SIZE);
-       sg_init_one(&sgin, sec_key, CIFS_CPHTXT_SIZE);
-       sg_init_one(&sgout, server->ntlmssp.ciphertext, CIFS_CPHTXT_SIZE);
-       rc = crypto_blkcipher_encrypt(&desc, &sgout, &sgin, CIFS_CPHTXT_SIZE);
-
-       if (!rc)
-               memcpy(server->session_key.data.ntlmv2.key,
-                               sec_key, CIFS_NTLMV2_SESSKEY_SIZE);
-
-       crypto_free_blkcipher(tfm_arc4);
-
-       return 0;
-}
-
-void
-cifs_crypto_shash_release(struct TCP_Server_Info *server)
-{
-       if (server->ntlmssp.md5)
-               crypto_free_shash(server->ntlmssp.md5);
-
-       if (server->ntlmssp.hmacmd5)
-               crypto_free_shash(server->ntlmssp.hmacmd5);
-
-       kfree(server->ntlmssp.sdeschmacmd5);
-
-       kfree(server->ntlmssp.sdescmd5);
-}
-
-int
-cifs_crypto_shash_allocate(struct TCP_Server_Info *server)
+void CalcNTLMv2_response(const struct cifsSesInfo *ses,
+                        char *v2_session_response)
 {
-       int rc;
-       unsigned int size;
-
-       server->ntlmssp.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0);
-       if (!server->ntlmssp.hmacmd5 ||
-                       IS_ERR(server->ntlmssp.hmacmd5)) {
-               cERROR(1, "could not allocate crypto hmacmd5\n");
-               return 1;
-       }
-
-       server->ntlmssp.md5 = crypto_alloc_shash("md5", 0, 0);
-       if (!server->ntlmssp.md5 || IS_ERR(server->ntlmssp.md5)) {
-               cERROR(1, "could not allocate crypto md5\n");
-               rc = 1;
-               goto cifs_crypto_shash_allocate_ret1;
-       }
-
-       size = sizeof(struct shash_desc) +
-                       crypto_shash_descsize(server->ntlmssp.hmacmd5);
-       server->ntlmssp.sdeschmacmd5 = kmalloc(size, GFP_KERNEL);
-       if (!server->ntlmssp.sdeschmacmd5) {
-               cERROR(1, "cifs_crypto_shash_allocate: can't alloc hmacmd5\n");
-               rc = -ENOMEM;
-               goto cifs_crypto_shash_allocate_ret2;
-       }
-       server->ntlmssp.sdeschmacmd5->shash.tfm = server->ntlmssp.hmacmd5;
-       server->ntlmssp.sdeschmacmd5->shash.flags = 0x0;
+       struct HMACMD5Context context;
+       /* rest of v2 struct already generated */
+       memcpy(v2_session_response + 8, ses->server->cryptKey, 8);
+       hmac_md5_init_limK_to_64(ses->server->ntlmv2_hash, 16, &context);
 
+       hmac_md5_update(v2_session_response+8,
+                       sizeof(struct ntlmv2_resp) - 8, &context);
 
-       size = sizeof(struct shash_desc) +
-                       crypto_shash_descsize(server->ntlmssp.md5);
-       server->ntlmssp.sdescmd5 = kmalloc(size, GFP_KERNEL);
-       if (!server->ntlmssp.sdescmd5) {
-               cERROR(1, "cifs_crypto_shash_allocate: can't alloc md5\n");
-               rc = -ENOMEM;
-               goto cifs_crypto_shash_allocate_ret3;
-       }
-       server->ntlmssp.sdescmd5->shash.tfm = server->ntlmssp.md5;
-       server->ntlmssp.sdescmd5->shash.flags = 0x0;
-
-       return 0;
-
-cifs_crypto_shash_allocate_ret3:
-       kfree(server->ntlmssp.sdeschmacmd5);
-
-cifs_crypto_shash_allocate_ret2:
-       crypto_free_shash(server->ntlmssp.md5);
-
-cifs_crypto_shash_allocate_ret1:
-       crypto_free_shash(server->ntlmssp.hmacmd5);
-
-       return rc;
+       hmac_md5_final(v2_session_response, &context);
+/*     cifs_dump_mem("v2_sess_rsp: ", v2_session_response, 32); */
 }
index c9d0cfc086ebcb609b504afdec6431e89ac75db9..0cdfb8c32ac68c34a98f5cdf412250c0eacdec2b 100644 (file)
@@ -25,9 +25,6 @@
 #include <linux/workqueue.h>
 #include "cifs_fs_sb.h"
 #include "cifsacl.h"
-#include <crypto/internal/hash.h>
-#include <linux/scatterlist.h>
-
 /*
  * The sizes of various internal tables and strings
  */
@@ -100,7 +97,7 @@ enum protocolEnum {
        /* Netbios frames protocol not supported at this time */
 };
 
-struct session_key {
+struct mac_key {
        unsigned int len;
        union {
                char ntlm[CIFS_SESS_KEY_SIZE + 16];
@@ -123,21 +120,6 @@ struct cifs_cred {
        struct cifs_ace *aces;
 };
 
-struct sdesc {
-       struct shash_desc shash;
-       char ctx[];
-};
-
-struct ntlmssp_auth {
-       __u32 client_flags;
-       __u32 server_flags;
-       unsigned char ciphertext[CIFS_CPHTXT_SIZE];
-       struct crypto_shash *hmacmd5;
-       struct crypto_shash *md5;
-       struct sdesc *sdeschmacmd5;
-       struct sdesc *sdescmd5;
-};
-
 /*
  *****************************************************************
  * Except the CIFS PDUs themselves all the
@@ -200,14 +182,11 @@ struct TCP_Server_Info {
        /* 16th byte of RFC1001 workstation name is always null */
        char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
        __u32 sequence_number; /* needed for CIFS PDU signature */
-       struct session_key session_key;
+       struct mac_key mac_signing_key;
        char ntlmv2_hash[16];
        unsigned long lstrp; /* when we got last response from this server */
        u16 dialect; /* dialect index that server chose */
        /* extended security flavors that server supports */
-       unsigned int tilen; /* length of the target info blob */
-       unsigned char *tiblob; /* target info blob in challenge response */
-       struct ntlmssp_auth ntlmssp; /* various keys, ciphers, flags */
        bool    sec_kerberos;           /* supports plain Kerberos */
        bool    sec_mskerberos;         /* supports legacy MS Kerberos */
        bool    sec_kerberosu2u;        /* supports U2U Kerberos */
index 320e0fd0ba7b5f988b559e060173dc61c2aa064c..14d036d8db111f2719f9e50576e94024a105adfc 100644 (file)
  * Size of the session key (crypto key encrypted with the password
  */
 #define CIFS_SESS_KEY_SIZE (24)
-#define CIFS_CLIENT_CHALLENGE_SIZE (8)
-#define CIFS_SERVER_CHALLENGE_SIZE (8)
-#define CIFS_HMAC_MD5_HASH_SIZE (16)
-#define CIFS_CPHTXT_SIZE (16)
-#define CIFS_NTLMV2_SESSKEY_SIZE (16)
-#define CIFS_NTHASH_SIZE (16)
 
 /*
  * Maximum user name length
@@ -669,6 +663,7 @@ struct ntlmv2_resp {
        __le64  time;
        __u64  client_chal; /* random */
        __u32  reserved2;
+       struct ntlmssp2_name names[2];
        /* array of name entries could follow ending in minimum 4 byte struct */
 } __attribute__((packed));
 
index 1378d9133844f08a369057608ed9d312bebc732f..1d60c655e3e0b70fc54ab5a48c2b6f3ffd01477c 100644 (file)
@@ -87,8 +87,9 @@ extern unsigned int smbCalcSize_LE(struct smb_hdr *ptr);
 extern int decode_negTokenInit(unsigned char *security_blob, int length,
                        struct TCP_Server_Info *server);
 extern int cifs_convert_address(struct sockaddr *dst, const char *src, int len);
+extern int cifs_set_port(struct sockaddr *addr, const unsigned short int port);
 extern int cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len,
-                               unsigned short int port);
+                               const unsigned short int port);
 extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr);
 extern void header_assemble(struct smb_hdr *, char /* command */ ,
                            const struct cifsTconInfo *, int /* length of
@@ -361,15 +362,13 @@ extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *, __u32 *);
 extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *,
                          __u32 *);
 extern int cifs_verify_signature(struct smb_hdr *,
-                                struct TCP_Server_Info *server,
+                                const struct mac_key *mac_key,
                                __u32 expected_sequence_number);
-extern int cifs_calculate_session_key(struct session_key *key, const char *rn,
+extern int cifs_calculate_mac_key(struct mac_key *key, const char *rn,
                                 const char *pass);
-extern int setup_ntlmv2_rsp(struct cifsSesInfo *, char *,
+extern void CalcNTLMv2_response(const struct cifsSesInfo *, char *);
+extern void setup_ntlmv2_rsp(struct cifsSesInfo *, char *,
                             const struct nls_table *);
-extern int cifs_crypto_shash_allocate(struct TCP_Server_Info *);
-extern void cifs_crypto_shash_release(struct TCP_Server_Info *);
-extern int calc_seckey(struct TCP_Server_Info *);
 #ifdef CONFIG_CIFS_WEAK_PW_HASH
 extern void calc_lanman_hash(const char *password, const char *cryptkey,
                                bool encrypt, char *lnm_session_key);
index 4bda920d1f754548ea705b94b13be9a1db321699..7e83b356cc9e3a93c2bc1b0e915d118884170474 100644 (file)
@@ -232,7 +232,7 @@ static int
 small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
                void **request_buf)
 {
-       int rc = 0;
+       int rc;
 
        rc = cifs_reconnect_tcon(tcon, smb_command);
        if (rc)
@@ -250,7 +250,7 @@ small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
        if (tcon != NULL)
                cifs_stats_inc(&tcon->num_smbs_sent);
 
-       return rc;
+       return 0;
 }
 
 int
@@ -281,16 +281,9 @@ small_smb_init_no_tc(const int smb_command, const int wct,
 
 /* If the return code is zero, this function must fill in request_buf pointer */
 static int
-smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
-        void **request_buf /* returned */ ,
-        void **response_buf /* returned */ )
+__smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
+                       void **request_buf, void **response_buf)
 {
-       int rc = 0;
-
-       rc = cifs_reconnect_tcon(tcon, smb_command);
-       if (rc)
-               return rc;
-
        *request_buf = cifs_buf_get();
        if (*request_buf == NULL) {
                /* BB should we add a retry in here if not a writepage? */
@@ -309,7 +302,31 @@ smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
        if (tcon != NULL)
                cifs_stats_inc(&tcon->num_smbs_sent);
 
-       return rc;
+       return 0;
+}
+
+/* If the return code is zero, this function must fill in request_buf pointer */
+static int
+smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
+        void **request_buf, void **response_buf)
+{
+       int rc;
+
+       rc = cifs_reconnect_tcon(tcon, smb_command);
+       if (rc)
+               return rc;
+
+       return __smb_init(smb_command, wct, tcon, request_buf, response_buf);
+}
+
+static int
+smb_init_no_reconnect(int smb_command, int wct, struct cifsTconInfo *tcon,
+                       void **request_buf, void **response_buf)
+{
+       if (tcon->ses->need_reconnect || tcon->need_reconnect)
+               return -EHOSTDOWN;
+
+       return __smb_init(smb_command, wct, tcon, request_buf, response_buf);
 }
 
 static int validate_t2(struct smb_t2_rsp *pSMB)
@@ -604,14 +621,11 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
                        else
                                rc = -EINVAL;
 
-                       if (server->secType == Kerberos) {
-                               if (!server->sec_kerberos &&
-                                               !server->sec_mskerberos)
-                                       rc = -EOPNOTSUPP;
-                       } else if (server->secType == RawNTLMSSP) {
-                               if (!server->sec_ntlmssp)
-                                       rc = -EOPNOTSUPP;
-                       } else
+                       if (server->sec_kerberos || server->sec_mskerberos)
+                               server->secType = Kerberos;
+                       else if (server->sec_ntlmssp)
+                               server->secType = RawNTLMSSP;
+                       else
                                rc = -EOPNOTSUPP;
                }
        } else
@@ -4537,8 +4551,8 @@ CIFSSMBQFSUnixInfo(const int xid, struct cifsTconInfo *tcon)
 
        cFYI(1, "In QFSUnixInfo");
 QFSUnixRetry:
-       rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
-                     (void **) &pSMBr);
+       rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon,
+                                  (void **) &pSMB, (void **) &pSMBr);
        if (rc)
                return rc;
 
@@ -4607,8 +4621,8 @@ CIFSSMBSetFSUnixInfo(const int xid, struct cifsTconInfo *tcon, __u64 cap)
        cFYI(1, "In SETFSUnixInfo");
 SETFSUnixRetry:
        /* BB switch to small buf init to save memory */
-       rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
-                     (void **) &pSMBr);
+       rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon,
+                                       (void **) &pSMB, (void **) &pSMBr);
        if (rc)
                return rc;
 
index ec0ea4a43bdb4efc0f3734f439b6af78af97a2f4..88c84a38bccb182c313316a172bbae39ba2ee38f 100644 (file)
@@ -400,7 +400,9 @@ incomplete_rcv:
                        cFYI(1, "call to reconnect done");
                        csocket = server->ssocket;
                        continue;
-               } else if ((length == -ERESTARTSYS) || (length == -EAGAIN)) {
+               } else if (length == -ERESTARTSYS ||
+                          length == -EAGAIN ||
+                          length == -EINTR) {
                        msleep(1); /* minimum sleep to prevent looping
                                allowing socket to clear and app threads to set
                                tcpStatus CifsNeedReconnect if server hung */
@@ -414,18 +416,6 @@ incomplete_rcv:
                        } else
                                continue;
                } else if (length <= 0) {
-                       if (server->tcpStatus == CifsNew) {
-                               cFYI(1, "tcp session abend after SMBnegprot");
-                               /* some servers kill the TCP session rather than
-                                  returning an SMB negprot error, in which
-                                  case reconnecting here is not going to help,
-                                  and so simply return error to mount */
-                               break;
-                       }
-                       if (!try_to_freeze() && (length == -EINTR)) {
-                               cFYI(1, "cifsd thread killed");
-                               break;
-                       }
                        cFYI(1, "Reconnect after unexpected peek error %d",
                                length);
                        cifs_reconnect(server);
@@ -466,27 +456,19 @@ incomplete_rcv:
                           an error on SMB negprot response */
                        cFYI(1, "Negative RFC1002 Session Response Error 0x%x)",
                                pdu_length);
-                       if (server->tcpStatus == CifsNew) {
-                               /* if nack on negprot (rather than
-                               ret of smb negprot error) reconnecting
-                               not going to help, ret error to mount */
-                               break;
-                       } else {
-                               /* give server a second to
-                               clean up before reconnect attempt */
-                               msleep(1000);
-                               /* always try 445 first on reconnect
-                               since we get NACK on some if we ever
-                               connected to port 139 (the NACK is
-                               since we do not begin with RFC1001
-                               session initialize frame) */
-                               server->addr.sockAddr.sin_port =
-                                       htons(CIFS_PORT);
-                               cifs_reconnect(server);
-                               csocket = server->ssocket;
-                               wake_up(&server->response_q);
-                               continue;
-                       }
+                       /* give server a second to clean up  */
+                       msleep(1000);
+                       /* always try 445 first on reconnect since we get NACK
+                        * on some if we ever connected to port 139 (the NACK
+                        * is since we do not begin with RFC1001 session
+                        * initialize frame)
+                        */
+                       cifs_set_port((struct sockaddr *)
+                                       &server->addr.sockAddr, CIFS_PORT);
+                       cifs_reconnect(server);
+                       csocket = server->ssocket;
+                       wake_up(&server->response_q);
+                       continue;
                } else if (temp != (char) 0) {
                        cERROR(1, "Unknown RFC 1002 frame");
                        cifs_dump_mem(" Received Data: ", (char *)smb_buffer,
@@ -522,8 +504,7 @@ incomplete_rcv:
                     total_read += length) {
                        length = kernel_recvmsg(csocket, &smb_msg, &iov, 1,
                                                pdu_length - total_read, 0);
-                       if ((server->tcpStatus == CifsExiting) ||
-                           (length == -EINTR)) {
+                       if (server->tcpStatus == CifsExiting) {
                                /* then will exit */
                                reconnect = 2;
                                break;
@@ -534,8 +515,9 @@ incomplete_rcv:
                                /* Now we will reread sock */
                                reconnect = 1;
                                break;
-                       } else if ((length == -ERESTARTSYS) ||
-                                  (length == -EAGAIN)) {
+                       } else if (length == -ERESTARTSYS ||
+                                  length == -EAGAIN ||
+                                  length == -EINTR) {
                                msleep(1); /* minimum sleep to prevent looping,
                                              allowing socket to clear and app
                                              threads to set tcpStatus
@@ -1708,7 +1690,6 @@ cifs_put_smb_ses(struct cifsSesInfo *ses)
                CIFSSMBLogoff(xid, ses);
                _FreeXid(xid);
        }
-       cifs_crypto_shash_release(server);
        sesInfoFree(ses);
        cifs_put_tcp_session(server);
 }
@@ -1725,9 +1706,6 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
        if (ses) {
                cFYI(1, "Existing smb sess found (status=%d)", ses->status);
 
-               /* existing SMB ses has a server reference already */
-               cifs_put_tcp_session(server);
-
                mutex_lock(&ses->session_mutex);
                rc = cifs_negotiate_protocol(xid, ses);
                if (rc) {
@@ -1750,6 +1728,9 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
                        }
                }
                mutex_unlock(&ses->session_mutex);
+
+               /* existing SMB ses has a server reference already */
+               cifs_put_tcp_session(server);
                FreeXid(xid);
                return ses;
        }
@@ -1788,23 +1769,13 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
        ses->linux_uid = volume_info->linux_uid;
        ses->overrideSecFlg = volume_info->secFlg;
 
-       rc = cifs_crypto_shash_allocate(server);
-       if (rc) {
-               cERROR(1, "could not setup hash structures rc %d", rc);
-               goto get_ses_fail;
-       }
-       server->tilen = 0;
-       server->tiblob = NULL;
-
        mutex_lock(&ses->session_mutex);
        rc = cifs_negotiate_protocol(xid, ses);
        if (!rc)
                rc = cifs_setup_session(xid, ses, volume_info->local_nls);
        mutex_unlock(&ses->session_mutex);
-       if (rc) {
-               cifs_crypto_shash_release(ses->server);
+       if (rc)
                goto get_ses_fail;
-       }
 
        /* success, put it on the list */
        write_lock(&cifs_tcp_ses_lock);
index 86a164f08a74a51399c2152799ec178c93902fa6..53cce8cc2224f4abe4754d2cc320a05a6f36d215 100644 (file)
@@ -801,6 +801,8 @@ retry_iget5_locked:
                        inode->i_flags |= S_NOATIME | S_NOCMTIME;
                if (inode->i_state & I_NEW) {
                        inode->i_ino = hash;
+                       if (S_ISREG(inode->i_mode))
+                               inode->i_data.backing_dev_info = sb->s_bdi;
 #ifdef CONFIG_CIFS_FSCACHE
                        /* initialize per-inode cache cookie pointer */
                        CIFS_I(inode)->fscache = NULL;
@@ -1462,28 +1464,17 @@ int cifs_rename(struct inode *source_dir, struct dentry *source_dentry,
 {
        char *fromName = NULL;
        char *toName = NULL;
-       struct cifs_sb_info *cifs_sb_source;
-       struct cifs_sb_info *cifs_sb_target;
+       struct cifs_sb_info *cifs_sb;
        struct cifsTconInfo *tcon;
        FILE_UNIX_BASIC_INFO *info_buf_source = NULL;
        FILE_UNIX_BASIC_INFO *info_buf_target;
        int xid, rc, tmprc;
 
-       cifs_sb_target = CIFS_SB(target_dir->i_sb);
-       cifs_sb_source = CIFS_SB(source_dir->i_sb);
-       tcon = cifs_sb_source->tcon;
+       cifs_sb = CIFS_SB(source_dir->i_sb);
+       tcon = cifs_sb->tcon;
 
        xid = GetXid();
 
-       /*
-        * BB: this might be allowed if same server, but different share.
-        * Consider adding support for this
-        */
-       if (tcon != cifs_sb_target->tcon) {
-               rc = -EXDEV;
-               goto cifs_rename_exit;
-       }
-
        /*
         * we already have the rename sem so we do not need to
         * grab it again here to protect the path integrity
@@ -1519,17 +1510,16 @@ int cifs_rename(struct inode *source_dir, struct dentry *source_dentry,
                info_buf_target = info_buf_source + 1;
                tmprc = CIFSSMBUnixQPathInfo(xid, tcon, fromName,
                                        info_buf_source,
-                                       cifs_sb_source->local_nls,
-                                       cifs_sb_source->mnt_cifs_flags &
+                                       cifs_sb->local_nls,
+                                       cifs_sb->mnt_cifs_flags &
                                        CIFS_MOUNT_MAP_SPECIAL_CHR);
                if (tmprc != 0)
                        goto unlink_target;
 
-               tmprc = CIFSSMBUnixQPathInfo(xid, tcon,
-                                       toName, info_buf_target,
-                                       cifs_sb_target->local_nls,
-                                       /* remap based on source sb */
-                                       cifs_sb_source->mnt_cifs_flags &
+               tmprc = CIFSSMBUnixQPathInfo(xid, tcon, toName,
+                                       info_buf_target,
+                                       cifs_sb->local_nls,
+                                       cifs_sb->mnt_cifs_flags &
                                        CIFS_MOUNT_MAP_SPECIAL_CHR);
 
                if (tmprc == 0 && (info_buf_source->UniqueId ==
index f97851119e6c1b965530f57062b3ae97b53f4e00..9aad47a2d62f6d861035e1ce0b8b6876804aed61 100644 (file)
@@ -206,26 +206,30 @@ cifs_convert_address(struct sockaddr *dst, const char *src, int len)
 }
 
 int
-cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len,
-                  const unsigned short int port)
+cifs_set_port(struct sockaddr *addr, const unsigned short int port)
 {
-       if (!cifs_convert_address(dst, src, len))
-               return 0;
-
-       switch (dst->sa_family) {
+       switch (addr->sa_family) {
        case AF_INET:
-               ((struct sockaddr_in *)dst)->sin_port = htons(port);
+               ((struct sockaddr_in *)addr)->sin_port = htons(port);
                break;
        case AF_INET6:
-               ((struct sockaddr_in6 *)dst)->sin6_port = htons(port);
+               ((struct sockaddr_in6 *)addr)->sin6_port = htons(port);
                break;
        default:
                return 0;
        }
-
        return 1;
 }
 
+int
+cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len,
+                  const unsigned short int port)
+{
+       if (!cifs_convert_address(dst, src, len))
+               return 0;
+       return cifs_set_port(dst, port);
+}
+
 /*****************************************************************************
 convert a NT status code to a dos class/code
  *****************************************************************************/
index 1db0f0746a5b4242f927e9203d8749711180918c..49c9a4e7531979c3e65615dd277ad4b0815ed0ae 100644 (file)
 #define NTLMSSP_NEGOTIATE_KEY_XCH   0x40000000
 #define NTLMSSP_NEGOTIATE_56        0x80000000
 
-/* Define AV Pair Field IDs */
-#define NTLMSSP_AV_EOL                 0
-#define NTLMSSP_AV_NB_COMPUTER_NAME    1
-#define NTLMSSP_AV_NB_DOMAIN_NAME      2
-#define NTLMSSP_AV_DNS_COMPUTER_NAME   3
-#define NTLMSSP_AV_DNS_DOMAIN_NAME     4
-#define NTLMSSP_AV_DNS_TREE_NAME       5
-#define NTLMSSP_AV_FLAGS               6
-#define NTLMSSP_AV_TIMESTAMP           7
-#define NTLMSSP_AV_RESTRICTION         8
-#define NTLMSSP_AV_TARGET_NAME         9
-#define NTLMSSP_AV_CHANNEL_BINDINGS    10
-
 /* Although typedefs are not commonly used for structure definitions */
 /* in the Linux kernel, in this particular case they are useful      */
 /* to more closely match the standards document for NTLMSSP from     */
index 795095f4eac69ba204257a597522e465dafac371..0a57cb7db5dd7554030e599cd111379e343083df 100644 (file)
@@ -383,9 +383,6 @@ static int decode_ascii_ssetup(char **pbcc_area, int bleft,
 static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
                                    struct cifsSesInfo *ses)
 {
-       unsigned int tioffset; /* challeng message target info area */
-       unsigned int tilen; /* challeng message target info area length  */
-
        CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr;
 
        if (blob_len < sizeof(CHALLENGE_MESSAGE)) {
@@ -408,20 +405,6 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
        /* BB spec says that if AvId field of MsvAvTimestamp is populated then
                we must set the MIC field of the AUTHENTICATE_MESSAGE */
 
-       ses->server->ntlmssp.server_flags = le32_to_cpu(pblob->NegotiateFlags);
-
-       tioffset = cpu_to_le16(pblob->TargetInfoArray.BufferOffset);
-       tilen = cpu_to_le16(pblob->TargetInfoArray.Length);
-       ses->server->tilen = tilen;
-       if (tilen) {
-               ses->server->tiblob = kmalloc(tilen, GFP_KERNEL);
-               if (!ses->server->tiblob) {
-                       cERROR(1, "Challenge target info allocation failure");
-                       return -ENOMEM;
-               }
-               memcpy(ses->server->tiblob,  bcc_ptr + tioffset, tilen);
-       }
-
        return 0;
 }
 
@@ -442,13 +425,12 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
        /* BB is NTLMV2 session security format easier to use here? */
        flags = NTLMSSP_NEGOTIATE_56 |  NTLMSSP_REQUEST_TARGET |
                NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
-               NTLMSSP_NEGOTIATE_NTLM;
+               NTLMSSP_NEGOTIATE_NT_ONLY | NTLMSSP_NEGOTIATE_NTLM;
        if (ses->server->secMode &
-          (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
-               flags |= NTLMSSP_NEGOTIATE_SIGN |
-                       NTLMSSP_NEGOTIATE_KEY_XCH |
-                       NTLMSSP_NEGOTIATE_EXTENDED_SEC;
-       }
+          (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+               flags |= NTLMSSP_NEGOTIATE_SIGN;
+       if (ses->server->secMode & SECMODE_SIGN_REQUIRED)
+               flags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN;
 
        sec_blob->NegotiateFlags |= cpu_to_le32(flags);
 
@@ -469,12 +451,10 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
                                   struct cifsSesInfo *ses,
                                   const struct nls_table *nls_cp, bool first)
 {
-       int rc;
-       unsigned int size;
        AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer;
        __u32 flags;
        unsigned char *tmp;
-       struct ntlmv2_resp ntlmv2_response = {};
+       char ntlm_session_key[CIFS_SESS_KEY_SIZE];
 
        memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
        sec_blob->MessageType = NtLmAuthenticate;
@@ -497,25 +477,19 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
        sec_blob->LmChallengeResponse.Length = 0;
        sec_blob->LmChallengeResponse.MaximumLength = 0;
 
-       sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
-       rc = setup_ntlmv2_rsp(ses, (char *)&ntlmv2_response, nls_cp);
-       if (rc) {
-               cERROR(1, "error rc: %d during ntlmssp ntlmv2 setup", rc);
-               goto setup_ntlmv2_ret;
-       }
-       size =  sizeof(struct ntlmv2_resp);
-       memcpy(tmp, (char *)&ntlmv2_response, size);
-       tmp += size;
-       if (ses->server->tilen > 0) {
-               memcpy(tmp, ses->server->tiblob, ses->server->tilen);
-               tmp += ses->server->tilen;
-       } else
-               ses->server->tilen = 0;
+       /* calculate session key,  BB what about adding similar ntlmv2 path? */
+       SMBNTencrypt(ses->password, ses->server->cryptKey, ntlm_session_key);
+       if (first)
+               cifs_calculate_mac_key(&ses->server->mac_signing_key,
+                                      ntlm_session_key, ses->password);
 
-       sec_blob->NtChallengeResponse.Length = cpu_to_le16(size +
-                               ses->server->tilen);
+       memcpy(tmp, ntlm_session_key, CIFS_SESS_KEY_SIZE);
+       sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
+       sec_blob->NtChallengeResponse.Length = cpu_to_le16(CIFS_SESS_KEY_SIZE);
        sec_blob->NtChallengeResponse.MaximumLength =
-               cpu_to_le16(size + ses->server->tilen);
+                               cpu_to_le16(CIFS_SESS_KEY_SIZE);
+
+       tmp += CIFS_SESS_KEY_SIZE;
 
        if (ses->domainName == NULL) {
                sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
@@ -527,6 +501,7 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
                len = cifs_strtoUCS((__le16 *)tmp, ses->domainName,
                                    MAX_USERNAME_SIZE, nls_cp);
                len *= 2; /* unicode is 2 bytes each */
+               len += 2; /* trailing null */
                sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
                sec_blob->DomainName.Length = cpu_to_le16(len);
                sec_blob->DomainName.MaximumLength = cpu_to_le16(len);
@@ -543,6 +518,7 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
                len = cifs_strtoUCS((__le16 *)tmp, ses->userName,
                                    MAX_USERNAME_SIZE, nls_cp);
                len *= 2; /* unicode is 2 bytes each */
+               len += 2; /* trailing null */
                sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
                sec_blob->UserName.Length = cpu_to_le16(len);
                sec_blob->UserName.MaximumLength = cpu_to_le16(len);
@@ -554,26 +530,9 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
        sec_blob->WorkstationName.MaximumLength = 0;
        tmp += 2;
 
-       if ((ses->server->ntlmssp.server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) &&
-                       !calc_seckey(ses->server)) {
-               memcpy(tmp, ses->server->ntlmssp.ciphertext, CIFS_CPHTXT_SIZE);
-               sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
-               sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
-               sec_blob->SessionKey.MaximumLength =
-                       cpu_to_le16(CIFS_CPHTXT_SIZE);
-               tmp += CIFS_CPHTXT_SIZE;
-       } else {
-               sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
-               sec_blob->SessionKey.Length = 0;
-               sec_blob->SessionKey.MaximumLength = 0;
-       }
-
-       ses->server->sequence_number = 0;
-
-setup_ntlmv2_ret:
-       if (ses->server->tilen > 0)
-               kfree(ses->server->tiblob);
-
+       sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
+       sec_blob->SessionKey.Length = 0;
+       sec_blob->SessionKey.MaximumLength = 0;
        return tmp - pbuffer;
 }
 
@@ -587,14 +546,15 @@ static void setup_ntlmssp_neg_req(SESSION_SETUP_ANDX *pSMB,
        return;
 }
 
-static int setup_ntlmssp_auth_req(char *ntlmsspblob,
+static int setup_ntlmssp_auth_req(SESSION_SETUP_ANDX *pSMB,
                                  struct cifsSesInfo *ses,
                                  const struct nls_table *nls, bool first_time)
 {
        int bloblen;
 
-       bloblen = build_ntlmssp_auth_blob(ntlmsspblob, ses, nls,
+       bloblen = build_ntlmssp_auth_blob(&pSMB->req.SecurityBlob[0], ses, nls,
                                          first_time);
+       pSMB->req.SecurityBlobLength = cpu_to_le16(bloblen);
 
        return bloblen;
 }
@@ -730,7 +690,7 @@ ssetup_ntlmssp_authenticate:
 
                if (first_time) /* should this be moved into common code
                                  with similar ntlmv2 path? */
-                       cifs_calculate_session_key(&ses->server->session_key,
+                       cifs_calculate_mac_key(&ses->server->mac_signing_key,
                                ntlm_session_key, ses->password);
                /* copy session key */
 
@@ -769,21 +729,12 @@ ssetup_ntlmssp_authenticate:
                        cpu_to_le16(sizeof(struct ntlmv2_resp));
 
                /* calculate session key */
-               rc = setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp);
-               if (rc) {
-                       kfree(v2_sess_key);
-                       goto ssetup_exit;
-               }
+               setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp);
                /* FIXME: calculate MAC key */
                memcpy(bcc_ptr, (char *)v2_sess_key,
                       sizeof(struct ntlmv2_resp));
                bcc_ptr += sizeof(struct ntlmv2_resp);
                kfree(v2_sess_key);
-               if (ses->server->tilen > 0) {
-                       memcpy(bcc_ptr, ses->server->tiblob,
-                               ses->server->tilen);
-                       bcc_ptr += ses->server->tilen;
-               }
                if (ses->capabilities & CAP_UNICODE) {
                        if (iov[0].iov_len % 2) {
                                *bcc_ptr = 0;
@@ -814,15 +765,15 @@ ssetup_ntlmssp_authenticate:
                }
                /* bail out if key is too long */
                if (msg->sesskey_len >
-                   sizeof(ses->server->session_key.data.krb5)) {
+                   sizeof(ses->server->mac_signing_key.data.krb5)) {
                        cERROR(1, "Kerberos signing key too long (%u bytes)",
                                msg->sesskey_len);
                        rc = -EOVERFLOW;
                        goto ssetup_exit;
                }
                if (first_time) {
-                       ses->server->session_key.len = msg->sesskey_len;
-                       memcpy(ses->server->session_key.data.krb5,
+                       ses->server->mac_signing_key.len = msg->sesskey_len;
+                       memcpy(ses->server->mac_signing_key.data.krb5,
                                msg->data, msg->sesskey_len);
                }
                pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
@@ -864,28 +815,12 @@ ssetup_ntlmssp_authenticate:
                        if (phase == NtLmNegotiate) {
                                setup_ntlmssp_neg_req(pSMB, ses);
                                iov[1].iov_len = sizeof(NEGOTIATE_MESSAGE);
-                               iov[1].iov_base = &pSMB->req.SecurityBlob[0];
                        } else if (phase == NtLmAuthenticate) {
                                int blob_len;
-                               char *ntlmsspblob;
-
-                               ntlmsspblob = kmalloc(5 *
-                                       sizeof(struct _AUTHENTICATE_MESSAGE),
-                                       GFP_KERNEL);
-                               if (!ntlmsspblob) {
-                                       cERROR(1, "Can't allocate NTLMSSP");
-                                       rc = -ENOMEM;
-                                       goto ssetup_exit;
-                               }
-
-                               blob_len = setup_ntlmssp_auth_req(ntlmsspblob,
-                                                               ses,
-                                                               nls_cp,
-                                                               first_time);
+                               blob_len = setup_ntlmssp_auth_req(pSMB, ses,
+                                                                 nls_cp,
+                                                                 first_time);
                                iov[1].iov_len = blob_len;
-                               iov[1].iov_base = ntlmsspblob;
-                               pSMB->req.SecurityBlobLength =
-                                       cpu_to_le16(blob_len);
                                /* Make sure that we tell the server that we
                                   are using the uid that it just gave us back
                                   on the response (challenge) */
@@ -895,6 +830,7 @@ ssetup_ntlmssp_authenticate:
                                rc = -ENOSYS;
                                goto ssetup_exit;
                        }
+                       iov[1].iov_base = &pSMB->req.SecurityBlob[0];
                        /* unicode strings must be word aligned */
                        if ((iov[0].iov_len + iov[1].iov_len) % 2) {
                                *bcc_ptr = 0;
index e0588cdf4cc5d5a1e8a73c1190c21f2d6cbe4986..82f78c4d6978ceafdab5789182193b899a435202 100644 (file)
@@ -543,7 +543,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
                    (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
                                             SECMODE_SIGN_ENABLED))) {
                        rc = cifs_verify_signature(midQ->resp_buf,
-                                               ses->server,
+                                               &ses->server->mac_signing_key,
                                                midQ->sequence_number+1);
                        if (rc) {
                                cERROR(1, "Unexpected SMB signature");
@@ -731,7 +731,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
                    (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
                                             SECMODE_SIGN_ENABLED))) {
                        rc = cifs_verify_signature(out_buf,
-                                               ses->server,
+                                               &ses->server->mac_signing_key,
                                                midQ->sequence_number+1);
                        if (rc) {
                                cERROR(1, "Unexpected SMB signature");
@@ -981,7 +981,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
            (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
                                     SECMODE_SIGN_ENABLED))) {
                rc = cifs_verify_signature(out_buf,
-                                          ses->server,
+                                          &ses->server->mac_signing_key,
                                           midQ->sequence_number+1);
                if (rc) {
                        cERROR(1, "Unexpected SMB signature");
index de89645777c7c2b06cb62b3657cd72b26a9c4f9e..116af7546cf0de6ef076e04ad74b4a2e9aa4bade 100644 (file)
@@ -184,8 +184,8 @@ static ssize_t coda_psdev_write(struct file *file, const char __user *buf,
        }
 
        /* adjust outsize. is this useful ?? */
-        req->uc_outSize = nbytes;      
-        req->uc_flags |= REQ_WRITE;
+       req->uc_outSize = nbytes;
+       req->uc_flags |= CODA_REQ_WRITE;
        count = nbytes;
 
        /* Convert filedescriptor into a file handle */
index 718c7062aec129844cda361e2f4b8a143aa31861..0644a154672b93012a4d7a7f864ba7f505b43213 100644 (file)
@@ -1153,7 +1153,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
 {
        compat_ssize_t tot_len;
        struct iovec iovstack[UIO_FASTIOV];
-       struct iovec *iov;
+       struct iovec *iov = iovstack;
        ssize_t ret;
        io_fn_t fn;
        iov_fn_t fnv;
index 51f270b479b6938a4a730ea56f9011c30f99563f..48d74c7391d13f4f07393c45d19825e937ecbcd1 100644 (file)
@@ -634,7 +634,7 @@ static int dio_send_cur_page(struct dio *dio)
        int ret = 0;
 
        if (dio->bio) {
-               loff_t cur_offset = dio->block_in_file << dio->blkbits;
+               loff_t cur_offset = dio->cur_page_fs_offset;
                loff_t bio_next_offset = dio->logical_offset_in_bio +
                        dio->bio->bi_size;
 
@@ -659,7 +659,7 @@ static int dio_send_cur_page(struct dio *dio)
                 * Submit now if the underlying fs is about to perform a
                 * metadata read
                 */
-               if (dio->boundary)
+               else if (dio->boundary)
                        dio_bio_submit(dio);
        }
 
index 2d9455282744bce582e48e0ecec4f4a6d332a28c..828dd2461d6beb7c37ed7df74ef11177c2bcba6d 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -376,6 +376,9 @@ static int count(const char __user * const __user * argv, int max)
                        argv++;
                        if (i++ >= max)
                                return -E2BIG;
+
+                       if (fatal_signal_pending(current))
+                               return -ERESTARTNOHAND;
                        cond_resched();
                }
        }
@@ -419,6 +422,12 @@ static int copy_strings(int argc, const char __user *const __user *argv,
                while (len > 0) {
                        int offset, bytes_to_copy;
 
+                       if (fatal_signal_pending(current)) {
+                               ret = -ERESTARTNOHAND;
+                               goto out;
+                       }
+                       cond_resched();
+
                        offset = pos % PAGE_SIZE;
                        if (offset == 0)
                                offset = PAGE_SIZE;
@@ -594,6 +603,11 @@ int setup_arg_pages(struct linux_binprm *bprm,
 #else
        stack_top = arch_align_stack(stack_top);
        stack_top = PAGE_ALIGN(stack_top);
+
+       if (unlikely(stack_top < mmap_min_addr) ||
+           unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
+               return -ENOMEM;
+
        stack_shift = vma->vm_end - stack_top;
 
        bprm->p -= stack_shift;
index 6769fd0f35b88373fdb8d0b265668a976a7ab251..f8cc34f542c3a1cc8b4f53309ffae317ecdddf15 100644 (file)
@@ -769,11 +769,15 @@ EXPORT_SYMBOL(kill_fasync);
 
 static int __init fcntl_init(void)
 {
-       /* please add new bits here to ensure allocation uniqueness */
-       BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
+       /*
+        * Please add new bits here to ensure allocation uniqueness.
+        * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
+        * is defined as O_NONBLOCK on some platforms and not on others.
+        */
+       BUILD_BUG_ON(18 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
                O_RDONLY        | O_WRONLY      | O_RDWR        |
                O_CREAT         | O_EXCL        | O_NOCTTY      |
-               O_TRUNC         | O_APPEND      | O_NONBLOCK    |
+               O_TRUNC         | O_APPEND      | /* O_NONBLOCK | */
                __O_SYNC        | O_DSYNC       | FASYNC        |
                O_DIRECT        | O_LARGEFILE   | O_DIRECTORY   |
                O_NOFOLLOW      | O_NOATIME     | O_CLOEXEC     |
index 7d9d06ba184b409e2ae90050ce82ca365cf3ce82..ab38fef1c9a1a52eab7128fa6a7dad217d4ad744 100644 (file)
@@ -52,8 +52,6 @@ struct wb_writeback_work {
 #define CREATE_TRACE_POINTS
 #include <trace/events/writeback.h>
 
-#define inode_to_bdi(inode)    ((inode)->i_mapping->backing_dev_info)
-
 /*
  * We don't actually have pdflush, but this one is exported though /proc...
  */
@@ -71,6 +69,16 @@ int writeback_in_progress(struct backing_dev_info *bdi)
        return test_bit(BDI_writeback_running, &bdi->state);
 }
 
+static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
+{
+       struct super_block *sb = inode->i_sb;
+
+       if (strcmp(sb->s_type->name, "bdev") == 0)
+               return inode->i_mapping->backing_dev_info;
+
+       return sb->s_bdi;
+}
+
 static void bdi_queue_work(struct backing_dev_info *bdi,
                struct wb_writeback_work *work)
 {
@@ -808,7 +816,7 @@ int bdi_writeback_thread(void *data)
                        wb->last_active = jiffies;
 
                set_current_state(TASK_INTERRUPTIBLE);
-               if (!list_empty(&bdi->work_list)) {
+               if (!list_empty(&bdi->work_list) || kthread_should_stop()) {
                        __set_current_state(TASK_RUNNING);
                        continue;
                }
index 69ad053ffd78cb0f2669516b5327571f37d65254..cde755cca5642d41fb9cbbe05ac2d01f70f53c69 100644 (file)
@@ -276,7 +276,7 @@ static void flush_bg_queue(struct fuse_conn *fc)
  * Called with fc->lock, unlocks it
  */
 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
-__releases(&fc->lock)
+__releases(fc->lock)
 {
        void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
        req->end = NULL;
@@ -306,8 +306,8 @@ __releases(&fc->lock)
 
 static void wait_answer_interruptible(struct fuse_conn *fc,
                                      struct fuse_req *req)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        if (signal_pending(current))
                return;
@@ -325,8 +325,8 @@ static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
 }
 
 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        if (!fc->no_interrupt) {
                /* Any signal may interrupt this */
@@ -905,8 +905,8 @@ static int request_pending(struct fuse_conn *fc)
 
 /* Wait until a request is available on the pending list */
 static void request_wait(struct fuse_conn *fc)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        DECLARE_WAITQUEUE(wait, current);
 
@@ -934,7 +934,7 @@ __acquires(&fc->lock)
  */
 static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
                               size_t nbytes, struct fuse_req *req)
-__releases(&fc->lock)
+__releases(fc->lock)
 {
        struct fuse_in_header ih;
        struct fuse_interrupt_in arg;
@@ -1354,7 +1354,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
        loff_t file_size;
        unsigned int num;
        unsigned int offset;
-       size_t total_len;
+       size_t total_len = 0;
 
        req = fuse_get_req(fc);
        if (IS_ERR(req))
@@ -1720,8 +1720,8 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
  * This function releases and reacquires fc->lock
  */
 static void end_requests(struct fuse_conn *fc, struct list_head *head)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        while (!list_empty(head)) {
                struct fuse_req *req;
@@ -1744,8 +1744,8 @@ __acquires(&fc->lock)
  * locked).
  */
 static void end_io_requests(struct fuse_conn *fc)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        while (!list_empty(&fc->io)) {
                struct fuse_req *req =
@@ -1769,6 +1769,16 @@ __acquires(&fc->lock)
        }
 }
 
+static void end_queued_requests(struct fuse_conn *fc)
+__releases(fc->lock)
+__acquires(fc->lock)
+{
+       fc->max_background = UINT_MAX;
+       flush_bg_queue(fc);
+       end_requests(fc, &fc->pending);
+       end_requests(fc, &fc->processing);
+}
+
 /*
  * Abort all requests.
  *
@@ -1795,8 +1805,7 @@ void fuse_abort_conn(struct fuse_conn *fc)
                fc->connected = 0;
                fc->blocked = 0;
                end_io_requests(fc);
-               end_requests(fc, &fc->pending);
-               end_requests(fc, &fc->processing);
+               end_queued_requests(fc);
                wake_up_all(&fc->waitq);
                wake_up_all(&fc->blocked_waitq);
                kill_fasync(&fc->fasync, SIGIO, POLL_IN);
@@ -1811,8 +1820,9 @@ int fuse_dev_release(struct inode *inode, struct file *file)
        if (fc) {
                spin_lock(&fc->lock);
                fc->connected = 0;
-               end_requests(fc, &fc->pending);
-               end_requests(fc, &fc->processing);
+               fc->blocked = 0;
+               end_queued_requests(fc);
+               wake_up_all(&fc->blocked_waitq);
                spin_unlock(&fc->lock);
                fuse_conn_put(fc);
        }
index 147c1f71bdb9f0213307fd3e63f6e3b30fc3f403..c8224587123f6e2ff84c8933f8d56a50ffd06c80 100644 (file)
@@ -1144,8 +1144,8 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
 
 /* Called under fc->lock, may release and reacquire it */
 static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        struct fuse_inode *fi = get_fuse_inode(req->inode);
        loff_t size = i_size_read(req->inode);
@@ -1183,8 +1183,8 @@ __acquires(&fc->lock)
  * Called with fc->lock
  */
 void fuse_flush_writepages(struct inode *inode)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        struct fuse_conn *fc = get_fuse_conn(inode);
        struct fuse_inode *fi = get_fuse_inode(inode);
index cde1248a62255ae9bdb03b4c2757a71e973fa089..ac750bd31a6f311e8a3dfc859a257c6f85b2f79d 100644 (file)
@@ -932,7 +932,7 @@ int gfs2_logd(void *data)
 
                do {
                        prepare_to_wait(&sdp->sd_logd_waitq, &wait,
-                                       TASK_UNINTERRUPTIBLE);
+                                       TASK_INTERRUPTIBLE);
                        if (!gfs2_ail_flush_reqd(sdp) &&
                            !gfs2_jrnl_flush_reqd(sdp) &&
                            !kthread_should_stop())
index e20ee85955d1c77c3a410da2c82893cd38acce8e..f3f3578393a417085812ba1ad7e0b7c1e4e5c981 100644 (file)
@@ -115,7 +115,7 @@ static int minix_mkdir(struct inode * dir, struct dentry *dentry, int mode)
 
        inode_inc_link_count(dir);
 
-       inode = minix_new_inode(dir, mode, &err);
+       inode = minix_new_inode(dir, S_IFDIR | mode, &err);
        if (!inode)
                goto out_dir;
 
index 6c2aad49d7318054b57c22b58c6b4eaa0be50178..f7e13db613cbce9a3014b826c9d00414169f4de8 100644 (file)
@@ -63,6 +63,7 @@ config NFS_V3_ACL
 config NFS_V4
        bool "NFS client support for NFS version 4"
        depends on NFS_FS
+       select SUNRPC_GSS
        help
          This option enables support for version 4 of the NFS protocol
          (RFC 3530) in the kernel's NFS client.
index 4e7df2adb2125724a4ea9ade0fc767606d54c4b3..e7340729af896e2bce1097fe4f8521618680173e 100644 (file)
@@ -275,7 +275,7 @@ static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1,
            sin1->sin6_scope_id != sin2->sin6_scope_id)
                return 0;
 
-       return ipv6_addr_equal(&sin1->sin6_addr, &sin1->sin6_addr);
+       return ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr);
 }
 #else  /* !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE) */
 static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1,
index eb51bd6201da0cd361d8265b4c6b0e3edee50ec9..05bf3c0dc751d5d489b1c7be2cf594005caf1695 100644 (file)
@@ -723,10 +723,6 @@ static int do_vfs_lock(struct file *file, struct file_lock *fl)
                default:
                        BUG();
        }
-       if (res < 0)
-               dprintk(KERN_WARNING "%s: VFS is out of sync with lock manager"
-                       " - error %d!\n",
-                               __func__, res);
        return res;
 }
 
index ec3966e4706b2f70d8709199f86456392a6e6584..f4cbf0c306c64d6583cbfffaa14cfbc7a467dfaa 100644 (file)
@@ -431,7 +431,15 @@ static int nfs_statfs(struct dentry *dentry, struct kstatfs *buf)
                goto out_err;
 
        error = server->nfs_client->rpc_ops->statfs(server, fh, &res);
+       if (unlikely(error == -ESTALE)) {
+               struct dentry *pd_dentry;
 
+               pd_dentry = dget_parent(dentry);
+               if (pd_dentry != NULL) {
+                       nfs_zap_caches(pd_dentry->d_inode);
+                       dput(pd_dentry);
+               }
+       }
        nfs_free_fattr(res.fattr);
        if (error < 0)
                goto out_err;
index 95932f523aef2b2b7ef4686b8bb4a18383b4a395..4264377552e207f57550b9e72d53776e0689ca81 100644 (file)
@@ -69,6 +69,7 @@ config NFSD_V4
        depends on NFSD && PROC_FS && EXPERIMENTAL
        select NFSD_V3
        select FS_POSIX_ACL
+       select SUNRPC_GSS
        help
          This option enables support in your system's NFS server for
          version 4 of the NFS protocol (RFC 3530).
index 3dfef062396845d2b45cc42a22064ec4402ee05f..cf0d2ffb3c84a149bc904323cd53599620c8c917 100644 (file)
@@ -440,7 +440,7 @@ test_share(struct nfs4_stateid *stp, struct nfsd4_open *open) {
 
 static int nfs4_access_to_omode(u32 access)
 {
-       switch (access) {
+       switch (access & NFS4_SHARE_ACCESS_BOTH) {
        case NFS4_SHARE_ACCESS_READ:
                return O_RDONLY;
        case NFS4_SHARE_ACCESS_WRITE:
index a76e0aa5cd3fc5188a392639618858a7e7e34092..391915093fe1c494a58ec8feae54cd576fd2464a 100644 (file)
@@ -209,7 +209,10 @@ static int ocfs2_acl_set_mode(struct inode *inode, struct buffer_head *di_bh,
        }
 
        inode->i_mode = new_mode;
+       inode->i_ctime = CURRENT_TIME;
        di->i_mode = cpu_to_le16(inode->i_mode);
+       di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
+       di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
 
        ocfs2_journal_dirty(handle, di_bh);
 
index 215e12ce1d85e2079359838cf287f1c3c670ff31..592fae5007d1245baade87453ce731121aa6efe5 100644 (file)
@@ -6672,7 +6672,7 @@ int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end,
        last_page_bytes = PAGE_ALIGN(end);
        index = start >> PAGE_CACHE_SHIFT;
        do {
-               pages[numpages] = grab_cache_page(mapping, index);
+               pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS);
                if (!pages[numpages]) {
                        ret = -ENOMEM;
                        mlog_errno(ret);
index ec6d123395932b69b6a67ba0b5256be02b811c6f..c7ee03c22226253d970cce94beb11f6353b3e1d0 100644 (file)
@@ -439,7 +439,7 @@ int ocfs2_block_check_validate(void *data, size_t blocksize,
 
        ocfs2_blockcheck_inc_failure(stats);
        mlog(ML_ERROR,
-            "CRC32 failed: stored: %u, computed %u.  Applying ECC.\n",
+            "CRC32 failed: stored: 0x%x, computed 0x%x. Applying ECC.\n",
             (unsigned int)check.bc_crc32e, (unsigned int)crc);
 
        /* Ok, try ECC fixups */
@@ -453,7 +453,7 @@ int ocfs2_block_check_validate(void *data, size_t blocksize,
                goto out;
        }
 
-       mlog(ML_ERROR, "Fixed CRC32 failed: stored: %u, computed %u\n",
+       mlog(ML_ERROR, "Fixed CRC32 failed: stored: 0x%x, computed 0x%x\n",
             (unsigned int)check.bc_crc32e, (unsigned int)crc);
 
        rc = -EIO;
index 1361997cf205d132a04ddeb87760aa233c3e707d..cbe2f057cc2826cb6af4ed833319fe27c4bd565f 100644 (file)
@@ -977,7 +977,7 @@ static int o2net_tx_can_proceed(struct o2net_node *nn,
 int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
                           size_t caller_veclen, u8 target_node, int *status)
 {
-       int ret;
+       int ret = 0;
        struct o2net_msg *msg = NULL;
        size_t veclen, caller_bytes = 0;
        struct kvec *vec = NULL;
index f04ebcfffc4a5e1516c2a7307ffa8032db8e19d7..c49f6de0e7abb6e096ddc56e795957e0194bb8dd 100644 (file)
@@ -3931,6 +3931,15 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
                goto out_commit;
        }
 
+       cpos = split_hash;
+       ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle,
+                                      data_ac, meta_ac, new_dx_leaves,
+                                      num_dx_leaves);
+       if (ret) {
+               mlog_errno(ret);
+               goto out_commit;
+       }
+
        for (i = 0; i < num_dx_leaves; i++) {
                ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
                                              orig_dx_leaves[i],
@@ -3939,15 +3948,14 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
                        mlog_errno(ret);
                        goto out_commit;
                }
-       }
 
-       cpos = split_hash;
-       ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle,
-                                      data_ac, meta_ac, new_dx_leaves,
-                                      num_dx_leaves);
-       if (ret) {
-               mlog_errno(ret);
-               goto out_commit;
+               ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
+                                             new_dx_leaves[i],
+                                             OCFS2_JOURNAL_ACCESS_WRITE);
+               if (ret) {
+                       mlog_errno(ret);
+                       goto out_commit;
+               }
        }
 
        ocfs2_dx_dir_transfer_leaf(dir, split_hash, handle, tmp_dx_leaf,
index 4b6ae2c13b47a85c6a31f6f7d2a1072099354935..765298908f1d3905bbabf3c5e597a8bf72ebd4a9 100644 (file)
@@ -1030,6 +1030,7 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm,
                         struct dlm_lock_resource *res);
 void dlm_clean_master_list(struct dlm_ctxt *dlm,
                           u8 dead_node);
+void dlm_force_free_mles(struct dlm_ctxt *dlm);
 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock);
 int __dlm_lockres_has_locks(struct dlm_lock_resource *res);
 int __dlm_lockres_unused(struct dlm_lock_resource *res);
index 5efdd37dfe484f2f6207ac2c7c6927fda0cc1ea0..901ca52bf86b293639ea39c8e1f460b66bc1f265 100644 (file)
@@ -636,8 +636,14 @@ static void *lockres_seq_start(struct seq_file *m, loff_t *pos)
        spin_lock(&dlm->track_lock);
        if (oldres)
                track_list = &oldres->tracking;
-       else
+       else {
                track_list = &dlm->tracking_list;
+               if (list_empty(track_list)) {
+                       dl = NULL;
+                       spin_unlock(&dlm->track_lock);
+                       goto bail;
+               }
+       }
 
        list_for_each_entry(res, track_list, tracking) {
                if (&res->tracking == &dlm->tracking_list)
@@ -660,6 +666,7 @@ static void *lockres_seq_start(struct seq_file *m, loff_t *pos)
        } else
                dl = NULL;
 
+bail:
        /* passed to seq_show */
        return dl;
 }
index 153abb5abef024d2ca63f6d4a23ee1c126b89f13..11a5c87fd7f7c00de41c61c00fae625e46675973 100644 (file)
@@ -693,6 +693,7 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm)
 
                dlm_mark_domain_leaving(dlm);
                dlm_leave_domain(dlm);
+               dlm_force_free_mles(dlm);
                dlm_complete_dlm_shutdown(dlm);
        }
        dlm_put(dlm);
index ffb4c68dafa495bc739165eb30f718f1eb0865ec..f564b0e5f80d8c89e08eeba4a5e133b24cb67373 100644 (file)
@@ -3433,3 +3433,43 @@ void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
        wake_up(&res->wq);
        wake_up(&dlm->migration_wq);
 }
+
+void dlm_force_free_mles(struct dlm_ctxt *dlm)
+{
+       int i;
+       struct hlist_head *bucket;
+       struct dlm_master_list_entry *mle;
+       struct hlist_node *tmp, *list;
+
+       /*
+        * We notified all other nodes that we are exiting the domain and
+        * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still
+        * around we force free them and wake any processes that are waiting
+        * on the mles
+        */
+       spin_lock(&dlm->spinlock);
+       spin_lock(&dlm->master_lock);
+
+       BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING);
+       BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES));
+
+       for (i = 0; i < DLM_HASH_BUCKETS; i++) {
+               bucket = dlm_master_hash(dlm, i);
+               hlist_for_each_safe(list, tmp, bucket) {
+                       mle = hlist_entry(list, struct dlm_master_list_entry,
+                                         master_hash_node);
+                       if (mle->type != DLM_MLE_BLOCK) {
+                               mlog(ML_ERROR, "bad mle: %p\n", mle);
+                               dlm_print_one_mle(mle);
+                       }
+                       atomic_set(&mle->woken, 1);
+                       wake_up(&mle->wq);
+
+                       __dlm_unlink_mle(dlm, mle);
+                       __dlm_mle_detach_hb_events(dlm, mle);
+                       __dlm_put_mle(mle);
+               }
+       }
+       spin_unlock(&dlm->master_lock);
+       spin_unlock(&dlm->spinlock);
+}
index d1ce48e1b3d6029e5861cf2863cbb0a544511b40..1d596d8c4a4a55dfd185ecaff4d7d9900bf10b24 100644 (file)
@@ -84,6 +84,7 @@ enum {
        OI_LS_PARENT,
        OI_LS_RENAME1,
        OI_LS_RENAME2,
+       OI_LS_REFLINK_TARGET,
 };
 
 int ocfs2_dlm_init(struct ocfs2_super *osb);
index 81296b4e364632dd5936f59d8adeab9832f2d2fd..9a03c151b5ceabc169215d99777abb92e2d2ad36 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/writeback.h>
 #include <linux/falloc.h>
 #include <linux/quotaops.h>
+#include <linux/blkdev.h>
 
 #define MLOG_MASK_PREFIX ML_INODE
 #include <cluster/masklog.h>
@@ -190,8 +191,16 @@ static int ocfs2_sync_file(struct file *file, int datasync)
        if (err)
                goto bail;
 
-       if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
+       if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) {
+               /*
+                * We still have to flush drive's caches to get data to the
+                * platter
+                */
+               if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER)
+                       blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL,
+                                          NULL, BLKDEV_IFL_WAIT);
                goto bail;
+       }
 
        journal = osb->journal->j_journal;
        err = jbd2_journal_force_commit(journal);
@@ -774,7 +783,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
        BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT));
        BUG_ON(abs_from & (inode->i_blkbits - 1));
 
-       page = grab_cache_page(mapping, index);
+       page = find_or_create_page(mapping, index, GFP_NOFS);
        if (!page) {
                ret = -ENOMEM;
                mlog_errno(ret);
@@ -2329,7 +2338,7 @@ out_dio:
        BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
 
        if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
-           ((file->f_flags & O_DIRECT) && has_refcount)) {
+           ((file->f_flags & O_DIRECT) && !direct_io)) {
                ret = filemap_fdatawrite_range(file->f_mapping, pos,
                                               pos + count - 1);
                if (ret < 0)
index 0492464916b19324e73425e29c473956b0b4bd33..eece3e05d9d0124d04b81c940c1289f876e700bb 100644 (file)
@@ -488,7 +488,11 @@ static int ocfs2_read_locked_inode(struct inode *inode,
                                                     OCFS2_BH_IGNORE_CACHE);
        } else {
                status = ocfs2_read_blocks_sync(osb, args->fi_blkno, 1, &bh);
-               if (!status)
+               /*
+                * If buffer is in jbd, then its checksum may not have been
+                * computed as yet.
+                */
+               if (!status && !buffer_jbd(bh))
                        status = ocfs2_validate_inode_block(osb->sb, bh);
        }
        if (status < 0) {
index af2b8fe1f13999e26f6e2543a047847bcf517c4e..4c18f4ad93b43cae6e5ddcd5a2781fbc79292484 100644 (file)
@@ -74,9 +74,11 @@ static int __ocfs2_page_mkwrite(struct inode *inode, struct buffer_head *di_bh,
        /*
         * Another node might have truncated while we were waiting on
         * cluster locks.
+        * We don't check size == 0 before the shift. This is borrowed
+        * from do_generic_file_read.
         */
-       last_index = size >> PAGE_CACHE_SHIFT;
-       if (page->index > last_index) {
+       last_index = (size - 1) >> PAGE_CACHE_SHIFT;
+       if (unlikely(!size || page->index > last_index)) {
                ret = -EINVAL;
                goto out;
        }
@@ -107,7 +109,7 @@ static int __ocfs2_page_mkwrite(struct inode *inode, struct buffer_head *di_bh,
         * because the "write" would invalidate their data.
         */
        if (page->index == last_index)
-               len = size & ~PAGE_CACHE_MASK;
+               len = ((size - 1) & ~PAGE_CACHE_MASK) + 1;
 
        ret = ocfs2_write_begin_nolock(mapping, pos, len, 0, &locked_page,
                                       &fsdata, di_bh, page);
index f171b51a74f78d6e268b5d743a24df4e702f9643..a00dda2e4f16698e5c7d8e0946ba1f09506651c6 100644 (file)
@@ -472,32 +472,23 @@ leave:
        return status;
 }
 
-static int ocfs2_mknod_locked(struct ocfs2_super *osb,
-                             struct inode *dir,
-                             struct inode *inode,
-                             dev_t dev,
-                             struct buffer_head **new_fe_bh,
-                             struct buffer_head *parent_fe_bh,
-                             handle_t *handle,
-                             struct ocfs2_alloc_context *inode_ac)
+static int __ocfs2_mknod_locked(struct inode *dir,
+                               struct inode *inode,
+                               dev_t dev,
+                               struct buffer_head **new_fe_bh,
+                               struct buffer_head *parent_fe_bh,
+                               handle_t *handle,
+                               struct ocfs2_alloc_context *inode_ac,
+                               u64 fe_blkno, u64 suballoc_loc, u16 suballoc_bit)
 {
        int status = 0;
+       struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
        struct ocfs2_dinode *fe = NULL;
        struct ocfs2_extent_list *fel;
-       u64 suballoc_loc, fe_blkno = 0;
-       u16 suballoc_bit;
        u16 feat;
 
        *new_fe_bh = NULL;
 
-       status = ocfs2_claim_new_inode(handle, dir, parent_fe_bh,
-                                      inode_ac, &suballoc_loc,
-                                      &suballoc_bit, &fe_blkno);
-       if (status < 0) {
-               mlog_errno(status);
-               goto leave;
-       }
-
        /* populate as many fields early on as possible - many of
         * these are used by the support functions here and in
         * callers. */
@@ -591,6 +582,34 @@ leave:
        return status;
 }
 
+static int ocfs2_mknod_locked(struct ocfs2_super *osb,
+                             struct inode *dir,
+                             struct inode *inode,
+                             dev_t dev,
+                             struct buffer_head **new_fe_bh,
+                             struct buffer_head *parent_fe_bh,
+                             handle_t *handle,
+                             struct ocfs2_alloc_context *inode_ac)
+{
+       int status = 0;
+       u64 suballoc_loc, fe_blkno = 0;
+       u16 suballoc_bit;
+
+       *new_fe_bh = NULL;
+
+       status = ocfs2_claim_new_inode(handle, dir, parent_fe_bh,
+                                      inode_ac, &suballoc_loc,
+                                      &suballoc_bit, &fe_blkno);
+       if (status < 0) {
+               mlog_errno(status);
+               return status;
+       }
+
+       return __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh,
+                                   parent_fe_bh, handle, inode_ac,
+                                   fe_blkno, suballoc_loc, suballoc_bit);
+}
+
 static int ocfs2_mkdir(struct inode *dir,
                       struct dentry *dentry,
                       int mode)
@@ -1852,61 +1871,117 @@ bail:
        return status;
 }
 
-static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
-                                   struct inode **ret_orphan_dir,
-                                   u64 blkno,
-                                   char *name,
-                                   struct ocfs2_dir_lookup_result *lookup)
+static int ocfs2_lookup_lock_orphan_dir(struct ocfs2_super *osb,
+                                       struct inode **ret_orphan_dir,
+                                       struct buffer_head **ret_orphan_dir_bh)
 {
        struct inode *orphan_dir_inode;
        struct buffer_head *orphan_dir_bh = NULL;
-       int status = 0;
-
-       status = ocfs2_blkno_stringify(blkno, name);
-       if (status < 0) {
-               mlog_errno(status);
-               return status;
-       }
+       int ret = 0;
 
        orphan_dir_inode = ocfs2_get_system_file_inode(osb,
                                                       ORPHAN_DIR_SYSTEM_INODE,
                                                       osb->slot_num);
        if (!orphan_dir_inode) {
-               status = -ENOENT;
-               mlog_errno(status);
-               return status;
+               ret = -ENOENT;
+               mlog_errno(ret);
+               return ret;
        }
 
        mutex_lock(&orphan_dir_inode->i_mutex);
 
-       status = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1);
-       if (status < 0) {
-               mlog_errno(status);
-               goto leave;
+       ret = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1);
+       if (ret < 0) {
+               mutex_unlock(&orphan_dir_inode->i_mutex);
+               iput(orphan_dir_inode);
+
+               mlog_errno(ret);
+               return ret;
        }
 
-       status = ocfs2_prepare_dir_for_insert(osb, orphan_dir_inode,
-                                             orphan_dir_bh, name,
-                                             OCFS2_ORPHAN_NAMELEN, lookup);
-       if (status < 0) {
-               ocfs2_inode_unlock(orphan_dir_inode, 1);
+       *ret_orphan_dir = orphan_dir_inode;
+       *ret_orphan_dir_bh = orphan_dir_bh;
 
-               mlog_errno(status);
-               goto leave;
+       return 0;
+}
+
+static int __ocfs2_prepare_orphan_dir(struct inode *orphan_dir_inode,
+                                     struct buffer_head *orphan_dir_bh,
+                                     u64 blkno,
+                                     char *name,
+                                     struct ocfs2_dir_lookup_result *lookup)
+{
+       int ret;
+       struct ocfs2_super *osb = OCFS2_SB(orphan_dir_inode->i_sb);
+
+       ret = ocfs2_blkno_stringify(blkno, name);
+       if (ret < 0) {
+               mlog_errno(ret);
+               return ret;
+       }
+
+       ret = ocfs2_prepare_dir_for_insert(osb, orphan_dir_inode,
+                                          orphan_dir_bh, name,
+                                          OCFS2_ORPHAN_NAMELEN, lookup);
+       if (ret < 0) {
+               mlog_errno(ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+/**
+ * ocfs2_prepare_orphan_dir() - Prepare an orphan directory for
+ * insertion of an orphan.
+ * @osb: ocfs2 file system
+ * @ret_orphan_dir: Orphan dir inode - returned locked!
+ * @blkno: Actual block number of the inode to be inserted into orphan dir.
+ * @lookup: dir lookup result, to be passed back into functions like
+ *          ocfs2_orphan_add
+ *
+ * Returns zero on success and the ret_orphan_dir, name and lookup
+ * fields will be populated.
+ *
+ * Returns non-zero on failure. 
+ */
+static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
+                                   struct inode **ret_orphan_dir,
+                                   u64 blkno,
+                                   char *name,
+                                   struct ocfs2_dir_lookup_result *lookup)
+{
+       struct inode *orphan_dir_inode = NULL;
+       struct buffer_head *orphan_dir_bh = NULL;
+       int ret = 0;
+
+       ret = ocfs2_lookup_lock_orphan_dir(osb, &orphan_dir_inode,
+                                          &orphan_dir_bh);
+       if (ret < 0) {
+               mlog_errno(ret);
+               return ret;
+       }
+
+       ret = __ocfs2_prepare_orphan_dir(orphan_dir_inode, orphan_dir_bh,
+                                        blkno, name, lookup);
+       if (ret < 0) {
+               mlog_errno(ret);
+               goto out;
        }
 
        *ret_orphan_dir = orphan_dir_inode;
 
-leave:
-       if (status) {
+out:
+       brelse(orphan_dir_bh);
+
+       if (ret) {
+               ocfs2_inode_unlock(orphan_dir_inode, 1);
                mutex_unlock(&orphan_dir_inode->i_mutex);
                iput(orphan_dir_inode);
        }
 
-       brelse(orphan_dir_bh);
-
-       mlog_exit(status);
-       return status;
+       mlog_exit(ret);
+       return ret;
 }
 
 static int ocfs2_orphan_add(struct ocfs2_super *osb,
@@ -2053,6 +2128,99 @@ leave:
        return status;
 }
 
+/**
+ * ocfs2_prep_new_orphaned_file() - Prepare the orphan dir to recieve a newly
+ * allocated file. This is different from the typical 'add to orphan dir'
+ * operation in that the inode does not yet exist. This is a problem because
+ * the orphan dir stringifies the inode block number to come up with it's
+ * dirent. Obviously if the inode does not yet exist we have a chicken and egg
+ * problem. This function works around it by calling deeper into the orphan
+ * and suballoc code than other callers. Use this only by necessity.
+ * @dir: The directory which this inode will ultimately wind up under - not the
+ * orphan dir!
+ * @dir_bh: buffer_head the @dir inode block
+ * @orphan_name: string of length (CFS2_ORPHAN_NAMELEN + 1). Will be filled
+ * with the string to be used for orphan dirent. Pass back to the orphan dir
+ * code.
+ * @ret_orphan_dir: orphan dir inode returned to be passed back into orphan
+ * dir code.
+ * @ret_di_blkno: block number where the new inode will be allocated.
+ * @orphan_insert: Dir insert context to be passed back into orphan dir code.
+ * @ret_inode_ac: Inode alloc context to be passed back to the allocator.
+ *
+ * Returns zero on success and the ret_orphan_dir, name and lookup
+ * fields will be populated.
+ *
+ * Returns non-zero on failure. 
+ */
+static int ocfs2_prep_new_orphaned_file(struct inode *dir,
+                                       struct buffer_head *dir_bh,
+                                       char *orphan_name,
+                                       struct inode **ret_orphan_dir,
+                                       u64 *ret_di_blkno,
+                                       struct ocfs2_dir_lookup_result *orphan_insert,
+                                       struct ocfs2_alloc_context **ret_inode_ac)
+{
+       int ret;
+       u64 di_blkno;
+       struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
+       struct inode *orphan_dir = NULL;
+       struct buffer_head *orphan_dir_bh = NULL;
+       struct ocfs2_alloc_context *inode_ac = NULL;
+
+       ret = ocfs2_lookup_lock_orphan_dir(osb, &orphan_dir, &orphan_dir_bh);
+       if (ret < 0) {
+               mlog_errno(ret);
+               return ret;
+       }
+
+       /* reserve an inode spot */
+       ret = ocfs2_reserve_new_inode(osb, &inode_ac);
+       if (ret < 0) {
+               if (ret != -ENOSPC)
+                       mlog_errno(ret);
+               goto out;
+       }
+
+       ret = ocfs2_find_new_inode_loc(dir, dir_bh, inode_ac,
+                                      &di_blkno);
+       if (ret) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       ret = __ocfs2_prepare_orphan_dir(orphan_dir, orphan_dir_bh,
+                                        di_blkno, orphan_name, orphan_insert);
+       if (ret < 0) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+out:
+       if (ret == 0) {
+               *ret_orphan_dir = orphan_dir;
+               *ret_di_blkno = di_blkno;
+               *ret_inode_ac = inode_ac;
+               /*
+                * orphan_name and orphan_insert are already up to
+                * date via prepare_orphan_dir
+                */
+       } else {
+               /* Unroll reserve_new_inode* */
+               if (inode_ac)
+                       ocfs2_free_alloc_context(inode_ac);
+
+               /* Unroll orphan dir locking */
+               mutex_unlock(&orphan_dir->i_mutex);
+               ocfs2_inode_unlock(orphan_dir, 1);
+               iput(orphan_dir);
+       }
+
+       brelse(orphan_dir_bh);
+
+       return 0;
+}
+
 int ocfs2_create_inode_in_orphan(struct inode *dir,
                                 int mode,
                                 struct inode **new_inode)
@@ -2068,6 +2236,8 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
        struct buffer_head *new_di_bh = NULL;
        struct ocfs2_alloc_context *inode_ac = NULL;
        struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
+       u64 uninitialized_var(di_blkno), suballoc_loc;
+       u16 suballoc_bit;
 
        status = ocfs2_inode_lock(dir, &parent_di_bh, 1);
        if (status < 0) {
@@ -2076,20 +2246,9 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
                return status;
        }
 
-       /*
-        * We give the orphan dir the root blkno to fake an orphan name,
-        * and allocate enough space for our insertion.
-        */
-       status = ocfs2_prepare_orphan_dir(osb, &orphan_dir,
-                                         osb->root_blkno,
-                                         orphan_name, &orphan_insert);
-       if (status < 0) {
-               mlog_errno(status);
-               goto leave;
-       }
-
-       /* reserve an inode spot */
-       status = ocfs2_reserve_new_inode(osb, &inode_ac);
+       status = ocfs2_prep_new_orphaned_file(dir, parent_di_bh,
+                                             orphan_name, &orphan_dir,
+                                             &di_blkno, &orphan_insert, &inode_ac);
        if (status < 0) {
                if (status != -ENOSPC)
                        mlog_errno(status);
@@ -2116,17 +2275,20 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
                goto leave;
        did_quota_inode = 1;
 
-       inode->i_nlink = 0;
-       /* do the real work now. */
-       status = ocfs2_mknod_locked(osb, dir, inode,
-                                   0, &new_di_bh, parent_di_bh, handle,
-                                   inode_ac);
+       status = ocfs2_claim_new_inode_at_loc(handle, dir, inode_ac,
+                                             &suballoc_loc,
+                                             &suballoc_bit, di_blkno);
        if (status < 0) {
                mlog_errno(status);
                goto leave;
        }
 
-       status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, orphan_name);
+       inode->i_nlink = 0;
+       /* do the real work now. */
+       status = __ocfs2_mknod_locked(dir, inode,
+                                     0, &new_di_bh, parent_di_bh, handle,
+                                     inode_ac, di_blkno, suballoc_loc,
+                                     suballoc_bit);
        if (status < 0) {
                mlog_errno(status);
                goto leave;
index 33f1c9a8258d1d4de5a17fe1ebf1d0d39c4098c3..fa31d05e41b7e558fa24df43682dbd107b568c76 100644 (file)
 #define OCFS2_HAS_REFCOUNT_FL   (0x0010)
 
 /* Inode attributes, keep in sync with EXT2 */
-#define OCFS2_SECRM_FL         (0x00000001)    /* Secure deletion */
-#define OCFS2_UNRM_FL          (0x00000002)    /* Undelete */
-#define OCFS2_COMPR_FL         (0x00000004)    /* Compress file */
-#define OCFS2_SYNC_FL          (0x00000008)    /* Synchronous updates */
-#define OCFS2_IMMUTABLE_FL     (0x00000010)    /* Immutable file */
-#define OCFS2_APPEND_FL                (0x00000020)    /* writes to file may only append */
-#define OCFS2_NODUMP_FL                (0x00000040)    /* do not dump file */
-#define OCFS2_NOATIME_FL       (0x00000080)    /* do not update atime */
-#define OCFS2_DIRSYNC_FL       (0x00010000)    /* dirsync behaviour (directories only) */
-
-#define OCFS2_FL_VISIBLE       (0x000100FF)    /* User visible flags */
-#define OCFS2_FL_MODIFIABLE    (0x000100FF)    /* User modifiable flags */
+#define OCFS2_SECRM_FL                 FS_SECRM_FL     /* Secure deletion */
+#define OCFS2_UNRM_FL                  FS_UNRM_FL      /* Undelete */
+#define OCFS2_COMPR_FL                 FS_COMPR_FL     /* Compress file */
+#define OCFS2_SYNC_FL                  FS_SYNC_FL      /* Synchronous updates */
+#define OCFS2_IMMUTABLE_FL             FS_IMMUTABLE_FL /* Immutable file */
+#define OCFS2_APPEND_FL                        FS_APPEND_FL    /* writes to file may only append */
+#define OCFS2_NODUMP_FL                        FS_NODUMP_FL    /* do not dump file */
+#define OCFS2_NOATIME_FL               FS_NOATIME_FL   /* do not update atime */
+/* Reserved for compression usage... */
+#define OCFS2_DIRTY_FL                 FS_DIRTY_FL
+#define OCFS2_COMPRBLK_FL              FS_COMPRBLK_FL  /* One or more compressed clusters */
+#define OCFS2_NOCOMP_FL                        FS_NOCOMP_FL    /* Don't compress */
+#define OCFS2_ECOMPR_FL                        FS_ECOMPR_FL    /* Compression error */
+/* End compression flags --- maybe not all used */
+#define OCFS2_BTREE_FL                 FS_BTREE_FL     /* btree format dir */
+#define OCFS2_INDEX_FL                 FS_INDEX_FL     /* hash-indexed directory */
+#define OCFS2_IMAGIC_FL                        FS_IMAGIC_FL    /* AFS directory */
+#define OCFS2_JOURNAL_DATA_FL          FS_JOURNAL_DATA_FL /* Reserved for ext3 */
+#define OCFS2_NOTAIL_FL                        FS_NOTAIL_FL    /* file tail should not be merged */
+#define OCFS2_DIRSYNC_FL               FS_DIRSYNC_FL   /* dirsync behaviour (directories only) */
+#define OCFS2_TOPDIR_FL                        FS_TOPDIR_FL    /* Top of directory hierarchies*/
+#define OCFS2_RESERVED_FL              FS_RESERVED_FL  /* reserved for ext2 lib */
+
+#define OCFS2_FL_VISIBLE               FS_FL_USER_VISIBLE      /* User visible flags */
+#define OCFS2_FL_MODIFIABLE            FS_FL_USER_MODIFIABLE   /* User modifiable flags */
 
 /*
  * Extent record flags (e_node.leaf.flags)
index 2d3420af1a839e0e13626a34c289cd2227bf095a..5d241505690b71e2ec0b259cbc39f1ed46d683d5 100644 (file)
 /*
  * ioctl commands
  */
-#define OCFS2_IOC_GETFLAGS     _IOR('f', 1, long)
-#define OCFS2_IOC_SETFLAGS     _IOW('f', 2, long)
-#define OCFS2_IOC32_GETFLAGS   _IOR('f', 1, int)
-#define OCFS2_IOC32_SETFLAGS   _IOW('f', 2, int)
+#define OCFS2_IOC_GETFLAGS     FS_IOC_GETFLAGS
+#define OCFS2_IOC_SETFLAGS     FS_IOC_SETFLAGS
+#define OCFS2_IOC32_GETFLAGS   FS_IOC32_GETFLAGS
+#define OCFS2_IOC32_SETFLAGS   FS_IOC32_SETFLAGS
 
 /*
  * Space reservation / allocation / free ioctls and argument structure
index 73a11ccfd4c280681abe672c5e9cd81e3b229a93..efdd7560740655beba6f53dccef235b5ce89cdbf 100644 (file)
@@ -2960,7 +2960,7 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
                if (map_end & (PAGE_CACHE_SIZE - 1))
                        to = map_end & (PAGE_CACHE_SIZE - 1);
 
-               page = grab_cache_page(mapping, page_index);
+               page = find_or_create_page(mapping, page_index, GFP_NOFS);
 
                /*
                 * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page
@@ -3179,7 +3179,8 @@ static int ocfs2_cow_sync_writeback(struct super_block *sb,
                if (map_end > end)
                        map_end = end;
 
-               page = grab_cache_page(context->inode->i_mapping, page_index);
+               page = find_or_create_page(context->inode->i_mapping,
+                                          page_index, GFP_NOFS);
                BUG_ON(!page);
 
                wait_on_page_writeback(page);
@@ -4200,8 +4201,9 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
                goto out;
        }
 
-       mutex_lock(&new_inode->i_mutex);
-       ret = ocfs2_inode_lock(new_inode, &new_bh, 1);
+       mutex_lock_nested(&new_inode->i_mutex, I_MUTEX_CHILD);
+       ret = ocfs2_inode_lock_nested(new_inode, &new_bh, 1,
+                                     OI_LS_REFLINK_TARGET);
        if (ret) {
                mlog_errno(ret);
                goto out_unlock;
index d8b6e4259b80022cb824326f4ce2c665089b5818..3e78db361bc70b3ffc327a6f21b6d4580305d0eb 100644 (file)
@@ -732,25 +732,23 @@ int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap,
                           struct ocfs2_alloc_reservation *resv,
                           int *cstart, int *clen)
 {
-       unsigned int wanted = *clen;
-
        if (resv == NULL || ocfs2_resmap_disabled(resmap))
                return -ENOSPC;
 
        spin_lock(&resv_lock);
 
-       /*
-        * We don't want to over-allocate for temporary
-        * windows. Otherwise, we run the risk of fragmenting the
-        * allocation space.
-        */
-       wanted = ocfs2_resv_window_bits(resmap, resv);
-       if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen)
-               wanted = *clen;
-
        if (ocfs2_resv_empty(resv)) {
-               mlog(0, "empty reservation, find new window\n");
+               /*
+                * We don't want to over-allocate for temporary
+                * windows. Otherwise, we run the risk of fragmenting the
+                * allocation space.
+                */
+               unsigned int wanted = ocfs2_resv_window_bits(resmap, resv);
 
+               if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen)
+                       wanted = *clen;
+
+               mlog(0, "empty reservation, find new window\n");
                /*
                 * Try to get a window here. If it works, we must fall
                 * through and test the bitmap . This avoids some
index a8e6a95a353f03dcb8a34cf928ded84ff7e6d127..849c2f0e0a0e664e983adf61f620abedf87fcb72 100644 (file)
@@ -57,11 +57,28 @@ struct ocfs2_suballoc_result {
        u64             sr_bg_blkno;    /* The bg we allocated from.  Set
                                           to 0 when a block group is
                                           contiguous. */
+       u64             sr_bg_stable_blkno; /*
+                                            * Doesn't change, always
+                                            * set to target block
+                                            * group descriptor
+                                            * block.
+                                            */
        u64             sr_blkno;       /* The first allocated block */
        unsigned int    sr_bit_offset;  /* The bit in the bg */
        unsigned int    sr_bits;        /* How many bits we claimed */
 };
 
+static u64 ocfs2_group_from_res(struct ocfs2_suballoc_result *res)
+{
+       if (res->sr_blkno == 0)
+               return 0;
+
+       if (res->sr_bg_blkno)
+               return res->sr_bg_blkno;
+
+       return ocfs2_which_suballoc_group(res->sr_blkno, res->sr_bit_offset);
+}
+
 static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg);
 static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe);
 static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl);
@@ -138,6 +155,10 @@ void ocfs2_free_ac_resource(struct ocfs2_alloc_context *ac)
        brelse(ac->ac_bh);
        ac->ac_bh = NULL;
        ac->ac_resv = NULL;
+       if (ac->ac_find_loc_priv) {
+               kfree(ac->ac_find_loc_priv);
+               ac->ac_find_loc_priv = NULL;
+       }
 }
 
 void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac)
@@ -336,7 +357,7 @@ out:
 static void ocfs2_bg_discontig_add_extent(struct ocfs2_super *osb,
                                          struct ocfs2_group_desc *bg,
                                          struct ocfs2_chain_list *cl,
-                                         u64 p_blkno, u32 clusters)
+                                         u64 p_blkno, unsigned int clusters)
 {
        struct ocfs2_extent_list *el = &bg->bg_list;
        struct ocfs2_extent_rec *rec;
@@ -348,7 +369,7 @@ static void ocfs2_bg_discontig_add_extent(struct ocfs2_super *osb,
        rec->e_blkno = cpu_to_le64(p_blkno);
        rec->e_cpos = cpu_to_le32(le16_to_cpu(bg->bg_bits) /
                                  le16_to_cpu(cl->cl_bpc));
-       rec->e_leaf_clusters = cpu_to_le32(clusters);
+       rec->e_leaf_clusters = cpu_to_le16(clusters);
        le16_add_cpu(&bg->bg_bits, clusters * le16_to_cpu(cl->cl_bpc));
        le16_add_cpu(&bg->bg_free_bits_count,
                     clusters * le16_to_cpu(cl->cl_bpc));
@@ -1678,6 +1699,15 @@ static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac,
        if (!ret)
                ocfs2_bg_discontig_fix_result(ac, gd, res);
 
+       /*
+        * sr_bg_blkno might have been changed by
+        * ocfs2_bg_discontig_fix_result
+        */
+       res->sr_bg_stable_blkno = group_bh->b_blocknr;
+
+       if (ac->ac_find_loc_only)
+               goto out_loc_only;
+
        ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh,
                                               res->sr_bits,
                                               le16_to_cpu(gd->bg_chain));
@@ -1691,6 +1721,7 @@ static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac,
        if (ret < 0)
                mlog_errno(ret);
 
+out_loc_only:
        *bits_left = le16_to_cpu(gd->bg_free_bits_count);
 
 out:
@@ -1708,7 +1739,6 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
 {
        int status;
        u16 chain;
-       u32 tmp_used;
        u64 next_group;
        struct inode *alloc_inode = ac->ac_inode;
        struct buffer_head *group_bh = NULL;
@@ -1770,6 +1800,11 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
        if (!status)
                ocfs2_bg_discontig_fix_result(ac, bg, res);
 
+       /*
+        * sr_bg_blkno might have been changed by
+        * ocfs2_bg_discontig_fix_result
+        */
+       res->sr_bg_stable_blkno = group_bh->b_blocknr;
 
        /*
         * Keep track of previous block descriptor read. When
@@ -1796,22 +1831,17 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
                }
        }
 
-       /* Ok, claim our bits now: set the info on dinode, chainlist
-        * and then the group */
-       status = ocfs2_journal_access_di(handle,
-                                        INODE_CACHE(alloc_inode),
-                                        ac->ac_bh,
-                                        OCFS2_JOURNAL_ACCESS_WRITE);
-       if (status < 0) {
+       if (ac->ac_find_loc_only)
+               goto out_loc_only;
+
+       status = ocfs2_alloc_dinode_update_counts(alloc_inode, handle,
+                                                 ac->ac_bh, res->sr_bits,
+                                                 chain);
+       if (status) {
                mlog_errno(status);
                goto bail;
        }
 
-       tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
-       fe->id1.bitmap1.i_used = cpu_to_le32(res->sr_bits + tmp_used);
-       le32_add_cpu(&cl->cl_recs[chain].c_free, -res->sr_bits);
-       ocfs2_journal_dirty(handle, ac->ac_bh);
-
        status = ocfs2_block_group_set_bits(handle,
                                            alloc_inode,
                                            bg,
@@ -1826,6 +1856,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
        mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits,
             (unsigned long long)le64_to_cpu(fe->i_blkno));
 
+out_loc_only:
        *bits_left = le16_to_cpu(bg->bg_free_bits_count);
 bail:
        brelse(group_bh);
@@ -1845,6 +1876,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
        int status;
        u16 victim, i;
        u16 bits_left = 0;
+       u64 hint = ac->ac_last_group;
        struct ocfs2_chain_list *cl;
        struct ocfs2_dinode *fe;
 
@@ -1872,7 +1904,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
                goto bail;
        }
 
-       res->sr_bg_blkno = ac->ac_last_group;
+       res->sr_bg_blkno = hint;
        if (res->sr_bg_blkno) {
                /* Attempt to short-circuit the usual search mechanism
                 * by jumping straight to the most recently used
@@ -1896,8 +1928,10 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
 
        status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
                                    res, &bits_left);
-       if (!status)
+       if (!status) {
+               hint = ocfs2_group_from_res(res);
                goto set_hint;
+       }
        if (status < 0 && status != -ENOSPC) {
                mlog_errno(status);
                goto bail;
@@ -1920,8 +1954,10 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
                ac->ac_chain = i;
                status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
                                            res, &bits_left);
-               if (!status)
+               if (!status) {
+                       hint = ocfs2_group_from_res(res);
                        break;
+               }
                if (status < 0 && status != -ENOSPC) {
                        mlog_errno(status);
                        goto bail;
@@ -1936,7 +1972,7 @@ set_hint:
                if (bits_left < min_bits)
                        ac->ac_last_group = 0;
                else
-                       ac->ac_last_group = res->sr_bg_blkno;
+                       ac->ac_last_group = hint;
        }
 
 bail:
@@ -2016,6 +2052,136 @@ static inline void ocfs2_save_inode_ac_group(struct inode *dir,
        OCFS2_I(dir)->ip_last_used_slot = ac->ac_alloc_slot;
 }
 
+int ocfs2_find_new_inode_loc(struct inode *dir,
+                            struct buffer_head *parent_fe_bh,
+                            struct ocfs2_alloc_context *ac,
+                            u64 *fe_blkno)
+{
+       int ret;
+       handle_t *handle = NULL;
+       struct ocfs2_suballoc_result *res;
+
+       BUG_ON(!ac);
+       BUG_ON(ac->ac_bits_given != 0);
+       BUG_ON(ac->ac_bits_wanted != 1);
+       BUG_ON(ac->ac_which != OCFS2_AC_USE_INODE);
+
+       res = kzalloc(sizeof(*res), GFP_NOFS);
+       if (res == NULL) {
+               ret = -ENOMEM;
+               mlog_errno(ret);
+               goto out;
+       }
+
+       ocfs2_init_inode_ac_group(dir, parent_fe_bh, ac);
+
+       /*
+        * The handle started here is for chain relink. Alternatively,
+        * we could just disable relink for these calls.
+        */
+       handle = ocfs2_start_trans(OCFS2_SB(dir->i_sb), OCFS2_SUBALLOC_ALLOC);
+       if (IS_ERR(handle)) {
+               ret = PTR_ERR(handle);
+               handle = NULL;
+               mlog_errno(ret);
+               goto out;
+       }
+
+       /*
+        * This will instruct ocfs2_claim_suballoc_bits and
+        * ocfs2_search_one_group to search but save actual allocation
+        * for later.
+        */
+       ac->ac_find_loc_only = 1;
+
+       ret = ocfs2_claim_suballoc_bits(ac, handle, 1, 1, res);
+       if (ret < 0) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       ac->ac_find_loc_priv = res;
+       *fe_blkno = res->sr_blkno;
+
+out:
+       if (handle)
+               ocfs2_commit_trans(OCFS2_SB(dir->i_sb), handle);
+
+       if (ret)
+               kfree(res);
+
+       return ret;
+}
+
+int ocfs2_claim_new_inode_at_loc(handle_t *handle,
+                                struct inode *dir,
+                                struct ocfs2_alloc_context *ac,
+                                u64 *suballoc_loc,
+                                u16 *suballoc_bit,
+                                u64 di_blkno)
+{
+       int ret;
+       u16 chain;
+       struct ocfs2_suballoc_result *res = ac->ac_find_loc_priv;
+       struct buffer_head *bg_bh = NULL;
+       struct ocfs2_group_desc *bg;
+       struct ocfs2_dinode *di = (struct ocfs2_dinode *) ac->ac_bh->b_data;
+
+       /*
+        * Since di_blkno is being passed back in, we check for any
+        * inconsistencies which may have happened between
+        * calls. These are code bugs as di_blkno is not expected to
+        * change once returned from ocfs2_find_new_inode_loc()
+        */
+       BUG_ON(res->sr_blkno != di_blkno);
+
+       ret = ocfs2_read_group_descriptor(ac->ac_inode, di,
+                                         res->sr_bg_stable_blkno, &bg_bh);
+       if (ret) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       bg = (struct ocfs2_group_desc *) bg_bh->b_data;
+       chain = le16_to_cpu(bg->bg_chain);
+
+       ret = ocfs2_alloc_dinode_update_counts(ac->ac_inode, handle,
+                                              ac->ac_bh, res->sr_bits,
+                                              chain);
+       if (ret) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       ret = ocfs2_block_group_set_bits(handle,
+                                        ac->ac_inode,
+                                        bg,
+                                        bg_bh,
+                                        res->sr_bit_offset,
+                                        res->sr_bits);
+       if (ret < 0) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits,
+            (unsigned long long)di_blkno);
+
+       atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
+
+       BUG_ON(res->sr_bits != 1);
+
+       *suballoc_loc = res->sr_bg_blkno;
+       *suballoc_bit = res->sr_bit_offset;
+       ac->ac_bits_given++;
+       ocfs2_save_inode_ac_group(dir, ac);
+
+out:
+       brelse(bg_bh);
+
+       return ret;
+}
+
 int ocfs2_claim_new_inode(handle_t *handle,
                          struct inode *dir,
                          struct buffer_head *parent_fe_bh,
@@ -2567,7 +2733,8 @@ out:
  * suballoc_bit.
  */
 static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno,
-                                      u16 *suballoc_slot, u16 *suballoc_bit)
+                                      u16 *suballoc_slot, u64 *group_blkno,
+                                      u16 *suballoc_bit)
 {
        int status;
        struct buffer_head *inode_bh = NULL;
@@ -2604,6 +2771,8 @@ static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno,
                *suballoc_slot = le16_to_cpu(inode_fe->i_suballoc_slot);
        if (suballoc_bit)
                *suballoc_bit = le16_to_cpu(inode_fe->i_suballoc_bit);
+       if (group_blkno)
+               *group_blkno = le64_to_cpu(inode_fe->i_suballoc_loc);
 
 bail:
        brelse(inode_bh);
@@ -2621,7 +2790,8 @@ bail:
  */
 static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb,
                                   struct inode *suballoc,
-                                  struct buffer_head *alloc_bh, u64 blkno,
+                                  struct buffer_head *alloc_bh,
+                                  u64 group_blkno, u64 blkno,
                                   u16 bit, int *res)
 {
        struct ocfs2_dinode *alloc_di;
@@ -2642,10 +2812,8 @@ static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb,
                goto bail;
        }
 
-       if (alloc_di->i_suballoc_loc)
-               bg_blkno = le64_to_cpu(alloc_di->i_suballoc_loc);
-       else
-               bg_blkno = ocfs2_which_suballoc_group(blkno, bit);
+       bg_blkno = group_blkno ? group_blkno :
+                  ocfs2_which_suballoc_group(blkno, bit);
        status = ocfs2_read_group_descriptor(suballoc, alloc_di, bg_blkno,
                                             &group_bh);
        if (status < 0) {
@@ -2680,6 +2848,7 @@ bail:
 int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
 {
        int status;
+       u64 group_blkno = 0;
        u16 suballoc_bit = 0, suballoc_slot = 0;
        struct inode *inode_alloc_inode;
        struct buffer_head *alloc_bh = NULL;
@@ -2687,7 +2856,7 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
        mlog_entry("blkno: %llu", (unsigned long long)blkno);
 
        status = ocfs2_get_suballoc_slot_bit(osb, blkno, &suballoc_slot,
-                                            &suballoc_bit);
+                                            &group_blkno, &suballoc_bit);
        if (status < 0) {
                mlog(ML_ERROR, "get alloc slot and bit failed %d\n", status);
                goto bail;
@@ -2715,7 +2884,7 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
        }
 
        status = ocfs2_test_suballoc_bit(osb, inode_alloc_inode, alloc_bh,
-                                        blkno, suballoc_bit, res);
+                                        group_blkno, blkno, suballoc_bit, res);
        if (status < 0)
                mlog(ML_ERROR, "test suballoc bit failed %d\n", status);
 
index a017dd3ee7d9ce2d6c0429d090b57ac077ed585a..b8afabfeede4c43694bdb8bf0a9664b0befd24a6 100644 (file)
@@ -56,6 +56,9 @@ struct ocfs2_alloc_context {
        u64    ac_max_block;  /* Highest block number to allocate. 0 is
                                 is the same as ~0 - unlimited */
 
+       int    ac_find_loc_only;  /* hack for reflink operation ordering */
+       struct ocfs2_suballoc_result *ac_find_loc_priv; /* */
+
        struct ocfs2_alloc_reservation  *ac_resv;
 };
 
@@ -197,4 +200,22 @@ int ocfs2_lock_allocators(struct inode *inode, struct ocfs2_extent_tree *et,
                          struct ocfs2_alloc_context **meta_ac);
 
 int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res);
+
+
+
+/*
+ * The following two interfaces are for ocfs2_create_inode_in_orphan().
+ */
+int ocfs2_find_new_inode_loc(struct inode *dir,
+                            struct buffer_head *parent_fe_bh,
+                            struct ocfs2_alloc_context *ac,
+                            u64 *fe_blkno);
+
+int ocfs2_claim_new_inode_at_loc(handle_t *handle,
+                                struct inode *dir,
+                                struct ocfs2_alloc_context *ac,
+                                u64 *suballoc_loc,
+                                u16 *suballoc_bit,
+                                u64 di_blkno);
+
 #endif /* _CHAINALLOC_H_ */
index 32499d213fc4f80f37efde6f71196283236896bf..9975457c981f904ca18a512dd7bcbe0f092ac664 100644 (file)
@@ -128,7 +128,7 @@ static void *ocfs2_fast_follow_link(struct dentry *dentry,
        }
 
        /* Fast symlinks can't be large */
-       len = strlen(target);
+       len = strnlen(target, ocfs2_fast_symlink_chars(inode->i_sb));
        link = kzalloc(len + 1, GFP_NOFS);
        if (!link) {
                status = -ENOMEM;
index d03469f618012ea4aff64b62f1f77e3e8b8ec47d..06fa5e77c40ef7aa0a3c0483ddbf3b9dd59f9dcf 100644 (file)
@@ -1286,13 +1286,11 @@ int ocfs2_xattr_get_nolock(struct inode *inode,
        xis.inode_bh = xbs.inode_bh = di_bh;
        di = (struct ocfs2_dinode *)di_bh->b_data;
 
-       down_read(&oi->ip_xattr_sem);
        ret = ocfs2_xattr_ibody_get(inode, name_index, name, buffer,
                                    buffer_size, &xis);
        if (ret == -ENODATA && di->i_xattr_loc)
                ret = ocfs2_xattr_block_get(inode, name_index, name, buffer,
                                            buffer_size, &xbs);
-       up_read(&oi->ip_xattr_sem);
 
        return ret;
 }
@@ -1316,8 +1314,10 @@ static int ocfs2_xattr_get(struct inode *inode,
                mlog_errno(ret);
                return ret;
        }
+       down_read(&OCFS2_I(inode)->ip_xattr_sem);
        ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index,
                                     name, buffer, buffer_size);
+       up_read(&OCFS2_I(inode)->ip_xattr_sem);
 
        ocfs2_inode_unlock(inode, 0);
 
index a1c43e7c8a7be4ce70c729f3338eae09097f4025..8e4addaa542458badbe0e62da644ec1c64194f99 100644 (file)
@@ -2675,7 +2675,7 @@ static const struct pid_entry tgid_base_stuff[] = {
        INF("auxv",       S_IRUSR, proc_pid_auxv),
        ONE("status",     S_IRUGO, proc_pid_status),
        ONE("personality", S_IRUSR, proc_pid_personality),
-       INF("limits",     S_IRUSR, proc_pid_limits),
+       INF("limits",     S_IRUGO, proc_pid_limits),
 #ifdef CONFIG_SCHED_DEBUG
        REG("sched",      S_IRUGO|S_IWUSR, proc_pid_sched_operations),
 #endif
@@ -3011,7 +3011,7 @@ static const struct pid_entry tid_base_stuff[] = {
        INF("auxv",      S_IRUSR, proc_pid_auxv),
        ONE("status",    S_IRUGO, proc_pid_status),
        ONE("personality", S_IRUSR, proc_pid_personality),
-       INF("limits",    S_IRUSR, proc_pid_limits),
+       INF("limits",    S_IRUGO, proc_pid_limits),
 #ifdef CONFIG_SCHED_DEBUG
        REG("sched",     S_IRUGO|S_IWUSR, proc_pid_sched_operations),
 #endif
index 180cf5a0bd67119218c170b265cf944c103b889b..3b8b456603318f1ff017056cdeda40129d559ab4 100644 (file)
@@ -146,7 +146,7 @@ u64 stable_page_flags(struct page *page)
        u |= kpf_copy_bit(k, KPF_HWPOISON,      PG_hwpoison);
 #endif
 
-#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
+#ifdef CONFIG_ARCH_USES_PG_UNCACHED
        u |= kpf_copy_bit(k, KPF_UNCACHED,      PG_uncached);
 #endif
 
index 439fc1f1c1c41487ad76d23523d995a9f416b926..1dbca4e8cc164d08276d4a1f5a875e7726fc7020 100644 (file)
@@ -224,7 +224,8 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
        /* We don't show the stack guard page in /proc/maps */
        start = vma->vm_start;
        if (vma->vm_flags & VM_GROWSDOWN)
-               start += PAGE_SIZE;
+               if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
+                       start += PAGE_SIZE;
 
        seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
                        start,
@@ -362,13 +363,13 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
                        mss->referenced += PAGE_SIZE;
                mapcount = page_mapcount(page);
                if (mapcount >= 2) {
-                       if (pte_dirty(ptent))
+                       if (pte_dirty(ptent) || PageDirty(page))
                                mss->shared_dirty += PAGE_SIZE;
                        else
                                mss->shared_clean += PAGE_SIZE;
                        mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
                } else {
-                       if (pte_dirty(ptent))
+                       if (pte_dirty(ptent) || PageDirty(page))
                                mss->private_dirty += PAGE_SIZE;
                        else
                                mss->private_clean += PAGE_SIZE;
index 91c817ff02c3847a874e1e99f7638b1148869a77..2367fb3f70bc6ba468ab0ccb628b5b88fabac955 100644 (file)
@@ -163,7 +163,7 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
 
 static const struct file_operations proc_vmcore_operations = {
        .read           = read_vmcore,
-       .llseek         = generic_file_llseek,
+       .llseek         = default_llseek,
 };
 
 static struct vmcore* __init get_new_element(void)
index f53505de071217399e39bf2013304ba46bd0c7f5..5cbb81e134aca031b21e3c88661400ff1c1b174f 100644 (file)
@@ -170,6 +170,7 @@ int reiserfs_prepare_write(struct file *f, struct page *page,
 int reiserfs_unpack(struct inode *inode, struct file *filp)
 {
        int retval = 0;
+       int depth;
        int index;
        struct page *page;
        struct address_space *mapping;
@@ -188,8 +189,8 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
        /* we need to make sure nobody is changing the file size beneath
         ** us
         */
-       mutex_lock(&inode->i_mutex);
-       reiserfs_write_lock(inode->i_sb);
+       reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb);
+       depth = reiserfs_write_lock_once(inode->i_sb);
 
        write_from = inode->i_size & (blocksize - 1);
        /* if we are on a block boundary, we are already unpacked.  */
@@ -224,6 +225,6 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
 
       out:
        mutex_unlock(&inode->i_mutex);
-       reiserfs_write_unlock(inode->i_sb);
+       reiserfs_write_unlock_once(inode->i_sb, depth);
        return retval;
 }
index d72cf2bb054a54bd8fcfbd7dc75f582ad2d29591..286e36e21dae587672216d7b7722488e91b45758 100644 (file)
@@ -1932,7 +1932,8 @@ xfs_buf_init(void)
        if (!xfs_buf_zone)
                goto out;
 
-       xfslogd_workqueue = create_workqueue("xfslogd");
+       xfslogd_workqueue = alloc_workqueue("xfslogd",
+                                       WQ_RESCUER | WQ_HIGHPRI, 1);
        if (!xfslogd_workqueue)
                goto out_free_buf_zone;
 
index 4fec427b83efc9dbf42ae9cda3aa64c5c71cdbec..3b9e626f7cd1562877cc340d0c43c5a776dd2e35 100644 (file)
@@ -785,6 +785,8 @@ xfs_ioc_fsgetxattr(
 {
        struct fsxattr          fa;
 
+       memset(&fa, 0, sizeof(struct fsxattr));
+
        xfs_ilock(ip, XFS_ILOCK_SHARED);
        fa.fsx_xflags = xfs_ip2xflags(ip);
        fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog;
index ed575fb4b49597806200f676680ff787d5be9d12..7e206fc1fa362ed4bb761a8f5ad11ef39c2dd5c0 100644 (file)
@@ -405,9 +405,15 @@ xlog_cil_push(
        new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS);
        new_ctx->ticket = xlog_cil_ticket_alloc(log);
 
-       /* lock out transaction commit, but don't block on background push */
+       /*
+        * Lock out transaction commit, but don't block for background pushes
+        * unless we are well over the CIL space limit. See the definition of
+        * XLOG_CIL_HARD_SPACE_LIMIT() for the full explanation of the logic
+        * used here.
+        */
        if (!down_write_trylock(&cil->xc_ctx_lock)) {
-               if (!push_seq)
+               if (!push_seq &&
+                   cil->xc_ctx->space_used < XLOG_CIL_HARD_SPACE_LIMIT(log))
                        goto out_free_ticket;
                down_write(&cil->xc_ctx_lock);
        }
@@ -422,7 +428,7 @@ xlog_cil_push(
                goto out_skip;
 
        /* check for a previously pushed seqeunce */
-       if (push_seq < cil->xc_ctx->sequence)
+       if (push_seq && push_seq < cil->xc_ctx->sequence)
                goto out_skip;
 
        /*
index ced52b98b322e3eb1be0e0c7dfc70f6096d0cd80..edcdfe01617f673bc1047caca3acc243f66a0658 100644 (file)
@@ -426,13 +426,13 @@ struct xfs_cil {
 };
 
 /*
- * The amount of log space we should the CIL to aggregate is difficult to size.
- * Whatever we chose we have to make we can get a reservation for the log space
- * effectively, that it is large enough to capture sufficient relogging to
- * reduce log buffer IO significantly, but it is not too large for the log or
- * induces too much latency when writing out through the iclogs. We track both
- * space consumed and the number of vectors in the checkpoint context, so we
- * need to decide which to use for limiting.
+ * The amount of log space we allow the CIL to aggregate is difficult to size.
+ * Whatever we choose, we have to make sure we can get a reservation for the
+ * log space effectively, that it is large enough to capture sufficient
+ * relogging to reduce log buffer IO significantly, but it is not too large for
+ * the log or induces too much latency when writing out through the iclogs. We
+ * track both space consumed and the number of vectors in the checkpoint
+ * context, so we need to decide which to use for limiting.
  *
  * Every log buffer we write out during a push needs a header reserved, which
  * is at least one sector and more for v2 logs. Hence we need a reservation of
@@ -459,16 +459,21 @@ struct xfs_cil {
  * checkpoint transaction ticket is specific to the checkpoint context, rather
  * than the CIL itself.
  *
- * With dynamic reservations, we can basically make up arbitrary limits for the
- * checkpoint size so long as they don't violate any other size rules.  Hence
- * the initial maximum size for the checkpoint transaction will be set to a
- * quarter of the log or 8MB, which ever is smaller. 8MB is an arbitrary limit
- * right now based on the latency of writing out a large amount of data through
- * the circular iclog buffers.
+ * With dynamic reservations, we can effectively make up arbitrary limits for
+ * the checkpoint size so long as they don't violate any other size rules.
+ * Recovery imposes a rule that no transaction exceed half the log, so we are
+ * limited by that.  Furthermore, the log transaction reservation subsystem
+ * tries to keep 25% of the log free, so we need to keep below that limit or we
+ * risk running out of free log space to start any new transactions.
+ *
+ * In order to keep background CIL push efficient, we will set a lower
+ * threshold at which background pushing is attempted without blocking current
+ * transaction commits.  A separate, higher bound defines when CIL pushes are
+ * enforced to ensure we stay within our maximum checkpoint size bounds.
+ * threshold, yet give us plenty of space for aggregation on large logs.
  */
-
-#define XLOG_CIL_SPACE_LIMIT(log)      \
-       (min((log->l_logsize >> 2), (8 * 1024 * 1024)))
+#define XLOG_CIL_SPACE_LIMIT(log)      (log->l_logsize >> 3)
+#define XLOG_CIL_HARD_SPACE_LIMIT(log) (3 * (log->l_logsize >> 4))
 
 /*
  * The reservation head lsn is not made up of a cycle number and block number.
index c0786d446a00b88adf144ab97f05af06679cccf7..984cdc62e30bc52da4cef907f3dd5094aa5ffd71 100644 (file)
@@ -55,7 +55,7 @@
 extern u8 acpi_gbl_permanent_mmap;
 
 /*
- * Globals that are publically available, allowing for
+ * Globals that are publicly available, allowing for
  * run time configuration
  */
 extern u32 acpi_dbg_level;
index c7376bf80b0604bf8f8b9394989178d6de45ecc2..8ca18e26d7e39fe429a8179d48f2f9f17f58a589 100644 (file)
  * While the GPIO programming interface defines valid GPIO numbers
  * to be in the range 0..MAX_INT, this library restricts them to the
  * smaller range 0..ARCH_NR_GPIOS-1.
+ *
+ * ARCH_NR_GPIOS is somewhat arbitrary; it usually reflects the sum of
+ * builtin/SoC GPIOs plus a number of GPIOs on expanders; the latter is
+ * actually an estimate of a board-specific value.
  */
 
 #ifndef ARCH_NR_GPIOS
 #define ARCH_NR_GPIOS          256
 #endif
 
+/*
+ * "valid" GPIO numbers are nonnegative and may be passed to
+ * setup routines like gpio_request().  only some valid numbers
+ * can successfully be requested and used.
+ *
+ * Invalid GPIO numbers are useful for indicating no-such-GPIO in
+ * platform data and other tables.
+ */
+
 static inline int gpio_is_valid(int number)
 {
-       /* only some non-negative numbers are valid */
        return ((unsigned)number) < ARCH_NR_GPIOS;
 }
 
index 7809d230adee3f90c9537f6c00ec66bb53c1bd27..4c9461a4f9e67b4b3e67c5bb73192aed44695079 100644 (file)
@@ -612,7 +612,7 @@ struct drm_gem_object {
        struct kref refcount;
 
        /** Handle count of this object. Each handle also holds a reference */
-       struct kref handlecount;
+       atomic_t handle_count; /* number of handles on this object */
 
        /** Related drm device */
        struct drm_device *dev;
@@ -808,7 +808,6 @@ struct drm_driver {
         */
        int (*gem_init_object) (struct drm_gem_object *obj);
        void (*gem_free_object) (struct drm_gem_object *obj);
-       void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
 
        /* vga arb irq handler */
        void (*vgaarb_irq)(struct drm_device *dev, bool state);
@@ -1175,6 +1174,7 @@ extern int drm_release(struct inode *inode, struct file *filp);
 extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
 extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma);
 extern void drm_vm_open_locked(struct vm_area_struct *vma);
+extern void drm_vm_close_locked(struct vm_area_struct *vma);
 extern resource_size_t drm_core_get_map_ofs(struct drm_local_map * map);
 extern resource_size_t drm_core_get_reg_ofs(struct drm_device *dev);
 extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
@@ -1455,12 +1455,11 @@ int drm_gem_init(struct drm_device *dev);
 void drm_gem_destroy(struct drm_device *dev);
 void drm_gem_object_release(struct drm_gem_object *obj);
 void drm_gem_object_free(struct kref *kref);
-void drm_gem_object_free_unlocked(struct kref *kref);
 struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
                                            size_t size);
 int drm_gem_object_init(struct drm_device *dev,
                        struct drm_gem_object *obj, size_t size);
-void drm_gem_object_handle_free(struct kref *kref);
+void drm_gem_object_handle_free(struct drm_gem_object *obj);
 void drm_gem_vm_open(struct vm_area_struct *vma);
 void drm_gem_vm_close(struct vm_area_struct *vma);
 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
@@ -1483,8 +1482,12 @@ drm_gem_object_unreference(struct drm_gem_object *obj)
 static inline void
 drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
 {
-       if (obj != NULL)
-               kref_put(&obj->refcount, drm_gem_object_free_unlocked);
+       if (obj != NULL) {
+               struct drm_device *dev = obj->dev;
+               mutex_lock(&dev->struct_mutex);
+               kref_put(&obj->refcount, drm_gem_object_free);
+               mutex_unlock(&dev->struct_mutex);
+       }
 }
 
 int drm_gem_handle_create(struct drm_file *file_priv,
@@ -1495,7 +1498,7 @@ static inline void
 drm_gem_object_handle_reference(struct drm_gem_object *obj)
 {
        drm_gem_object_reference(obj);
-       kref_get(&obj->handlecount);
+       atomic_inc(&obj->handle_count);
 }
 
 static inline void
@@ -1504,12 +1507,15 @@ drm_gem_object_handle_unreference(struct drm_gem_object *obj)
        if (obj == NULL)
                return;
 
+       if (atomic_read(&obj->handle_count) == 0)
+               return;
        /*
         * Must bump handle count first as this may be the last
         * ref, in which case the object would disappear before we
         * checked for a name
         */
-       kref_put(&obj->handlecount, drm_gem_object_handle_free);
+       if (atomic_dec_and_test(&obj->handle_count))
+               drm_gem_object_handle_free(obj);
        drm_gem_object_unreference(obj);
 }
 
@@ -1519,12 +1525,17 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
        if (obj == NULL)
                return;
 
+       if (atomic_read(&obj->handle_count) == 0)
+               return;
+
        /*
        * Must bump handle count first as this may be the last
        * ref, in which case the object would disappear before we
        * checked for a name
        */
-       kref_put(&obj->handlecount, drm_gem_object_handle_free);
+
+       if (atomic_dec_and_test(&obj->handle_count))
+               drm_gem_object_handle_free(obj);
        drm_gem_object_unreference_unlocked(obj);
 }
 
index c9f3cc5949a82eb0ae5ba4b463891df5f96806dd..3e5a51af757c76ba07f514d00265a5e7300b6627 100644 (file)
@@ -386,7 +386,15 @@ struct drm_connector_funcs {
        void (*dpms)(struct drm_connector *connector, int mode);
        void (*save)(struct drm_connector *connector);
        void (*restore)(struct drm_connector *connector);
-       enum drm_connector_status (*detect)(struct drm_connector *connector);
+
+       /* Check to see if anything is attached to the connector.
+        * @force is set to false whilst polling, true when checking the
+        * connector due to user request. @force can be used by the driver
+        * to avoid expensive, destructive operations during automated
+        * probing.
+        */
+       enum drm_connector_status (*detect)(struct drm_connector *connector,
+                                           bool force);
        int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height);
        int (*set_property)(struct drm_connector *connector, struct drm_property *property,
                             uint64_t val);
index 3a9940ef728bb5d2412c4cb54d84ed746e15d870..883c1d4398996d8ba807ca5d54940cec28c7809e 100644 (file)
@@ -85,7 +85,6 @@
        {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
        {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
        {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
-       {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \
        {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
        {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
index 5a53d8f039a2934ab02abcf19b312aee80b88558..0c991023ee475fad85136a04b00cb8d2699a382b 100644 (file)
@@ -579,6 +579,7 @@ void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
 int cgroup_scan_tasks(struct cgroup_scanner *scan);
 int cgroup_attach_task(struct cgroup *, struct task_struct *);
 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
+
 static inline int cgroup_attach_task_current_cg(struct task_struct *tsk)
 {
        return cgroup_attach_task_all(current, tsk);
index 9ddc8780e8db7bfe45496587bc47e5fb00d12073..5778b559d59c3222ee825897b7430523aa96c353 100644 (file)
@@ -360,5 +360,8 @@ extern ssize_t compat_rw_copy_check_uvector(int type,
                const struct compat_iovec __user *uvector, unsigned long nr_segs,
                unsigned long fast_segs, struct iovec *fast_pointer,
                struct iovec **ret_pointer);
+
+extern void __user *compat_alloc_user_space(unsigned long len);
+
 #endif /* CONFIG_COMPAT */
 #endif /* _LINUX_COMPAT_H */
index 36ca9721a0c28a0150b05153b4f442d2c558d6d4..1be416bbbb82540802a0a742ba2c22934a9a6659 100644 (file)
@@ -53,6 +53,7 @@ struct cpuidle_state {
 #define CPUIDLE_FLAG_BALANCED  (0x40) /* medium latency, moderate savings */
 #define CPUIDLE_FLAG_DEEP      (0x80) /* high latency, large savings */
 #define CPUIDLE_FLAG_IGNORE    (0x100) /* ignore during this idle period */
+#define CPUIDLE_FLAG_TLB_FLUSHED (0x200) /* tlb will be flushed */
 
 #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
 
index ce29b8151198b6c0d40b8f420efe9c2c920d9b4f..ba8319ae5fcc3e75edc0d52240da0b16e18da68d 100644 (file)
@@ -102,6 +102,9 @@ static inline u64 dma_get_mask(struct device *dev)
        return DMA_BIT_MASK(32);
 }
 
+#ifdef ARCH_HAS_DMA_SET_COHERENT_MASK
+int dma_set_coherent_mask(struct device *dev, u64 mask);
+#else
 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
 {
        if (!dma_supported(dev, mask))
@@ -109,6 +112,7 @@ static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
        dev->coherent_dma_mask = mask;
        return 0;
 }
+#endif
 
 extern u64 dma_get_required_mask(struct device *dev);
 
index c61d4ca27bcc26906699101b4f8eee7ce8e8525b..e2106495cc11383ad9a14d7ac119400b18a8f83a 100644 (file)
@@ -548,7 +548,7 @@ static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
        return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
 }
 
-static unsigned short dma_dev_to_maxpq(struct dma_device *dma)
+static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
 {
        return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
 }
index 2c958f4fce1ed6f1d6f4a8c3fc865155cd1eac81..926b50322a469c9d16c48ed35e10c4c12bd695f3 100644 (file)
@@ -136,6 +136,7 @@ extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
 
 extern int elevator_init(struct request_queue *, char *);
 extern void elevator_exit(struct elevator_queue *);
+extern int elevator_change(struct request_queue *, const char *);
 extern int elv_rq_merge_ok(struct request *, struct bio *);
 
 /*
index 76041b6147582ef62eb0daedafbf1771a8e844c6..63d069bd80b702c392d4052bf2be937f90763692 100644 (file)
@@ -1093,6 +1093,10 @@ struct file_lock {
 
 #include <linux/fcntl.h>
 
+/* temporary stubs for BKL removal */
+#define lock_flocks() lock_kernel()
+#define unlock_flocks() unlock_kernel()
+
 extern void send_sigio(struct fown_struct *fown, int fd, int band);
 
 #ifdef CONFIG_FILE_LOCKING
index 03f616b78cfa8b857e727ea220a60c05a87bb89f..e41f7dd1ae676eb778a00c0f7cdced047312eff7 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/errno.h>
 
 struct device;
+struct gpio_chip;
 
 /*
  * Some platforms don't support the GPIO programming interface.
index ee3049cb9ba5782e27447d3984a914a2d39ae87b..52baa79d69a763f7f94f728b115fe0337190addd 100644 (file)
@@ -63,6 +63,9 @@
  *            IRQ lines will appear.  Similarly to gpio_base, the expander
  *            will create a block of irqs beginning at this number.
  *            This value is ignored if irq_summary is < 0.
+ * @reset_during_probe: If set to true, the driver will trigger a full
+ *                      reset of the chip at the beginning of the probe
+ *                      in order to place it in a known state.
  */
 struct sx150x_platform_data {
        unsigned gpio_base;
@@ -73,6 +76,7 @@ struct sx150x_platform_data {
        u16      io_polarity;
        int      irq_summary;
        unsigned irq_base;
+       bool     reset_during_probe;
 };
 
 #endif /* __LINUX_I2C_SX150X_H */
index 0a6b3d5c490ccfcd3ab9a3dbdba9b41f88e1c4b8..7fb59279373823339f6fdda86158952e3e6fac45 100644 (file)
@@ -79,7 +79,7 @@ io_mapping_free(struct io_mapping *mapping)
 }
 
 /* Atomic map/unmap */
-static inline void *
+static inline void __iomem *
 io_mapping_map_atomic_wc(struct io_mapping *mapping,
                         unsigned long offset,
                         int slot)
@@ -94,12 +94,12 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
 }
 
 static inline void
-io_mapping_unmap_atomic(void *vaddr, int slot)
+io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
 {
        iounmap_atomic(vaddr, slot);
 }
 
-static inline void *
+static inline void __iomem *
 io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
 {
        resource_size_t phys_addr;
@@ -111,7 +111,7 @@ io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
 }
 
 static inline void
-io_mapping_unmap(void *vaddr)
+io_mapping_unmap(void __iomem *vaddr)
 {
        iounmap(vaddr);
 }
@@ -125,38 +125,38 @@ struct io_mapping;
 static inline struct io_mapping *
 io_mapping_create_wc(resource_size_t base, unsigned long size)
 {
-       return (struct io_mapping *) ioremap_wc(base, size);
+       return (struct io_mapping __force *) ioremap_wc(base, size);
 }
 
 static inline void
 io_mapping_free(struct io_mapping *mapping)
 {
-       iounmap(mapping);
+       iounmap((void __force __iomem *) mapping);
 }
 
 /* Atomic map/unmap */
-static inline void *
+static inline void __iomem *
 io_mapping_map_atomic_wc(struct io_mapping *mapping,
                         unsigned long offset,
                         int slot)
 {
-       return ((char *) mapping) + offset;
+       return ((char __force __iomem *) mapping) + offset;
 }
 
 static inline void
-io_mapping_unmap_atomic(void *vaddr, int slot)
+io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
 {
 }
 
 /* Non-atomic map/unmap */
-static inline void *
+static inline void __iomem *
 io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
 {
-       return ((char *) mapping) + offset;
+       return ((char __force __iomem *) mapping) + offset;
 }
 
 static inline void
-io_mapping_unmap(void *vaddr)
+io_mapping_unmap(void __iomem *vaddr)
 {
 }
 
index 4aa95f203f3ee773a6ab4bbdbb632efaf970785e..62dbee554f608c91fe7b2b3bf01f390260821c52 100644 (file)
@@ -214,7 +214,7 @@ __kfifo_must_check_helper(unsigned int val)
  */
 #define kfifo_reset(fifo) \
 (void)({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        __tmp->kfifo.in = __tmp->kfifo.out = 0; \
 })
 
@@ -228,7 +228,7 @@ __kfifo_must_check_helper(unsigned int val)
  */
 #define kfifo_reset_out(fifo)  \
 (void)({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        __tmp->kfifo.out = __tmp->kfifo.in; \
 })
 
@@ -238,7 +238,7 @@ __kfifo_must_check_helper(unsigned int val)
  */
 #define kfifo_len(fifo) \
 ({ \
-       typeof(fifo + 1) __tmpl = (fifo); \
+       typeof((fifo) + 1) __tmpl = (fifo); \
        __tmpl->kfifo.in - __tmpl->kfifo.out; \
 })
 
@@ -248,7 +248,7 @@ __kfifo_must_check_helper(unsigned int val)
  */
 #define        kfifo_is_empty(fifo) \
 ({ \
-       typeof(fifo + 1) __tmpq = (fifo); \
+       typeof((fifo) + 1) __tmpq = (fifo); \
        __tmpq->kfifo.in == __tmpq->kfifo.out; \
 })
 
@@ -258,7 +258,7 @@ __kfifo_must_check_helper(unsigned int val)
  */
 #define        kfifo_is_full(fifo) \
 ({ \
-       typeof(fifo + 1) __tmpq = (fifo); \
+       typeof((fifo) + 1) __tmpq = (fifo); \
        kfifo_len(__tmpq) > __tmpq->kfifo.mask; \
 })
 
@@ -269,7 +269,7 @@ __kfifo_must_check_helper(unsigned int val)
 #define        kfifo_avail(fifo) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmpq = (fifo); \
+       typeof((fifo) + 1) __tmpq = (fifo); \
        const size_t __recsize = sizeof(*__tmpq->rectype); \
        unsigned int __avail = kfifo_size(__tmpq) - kfifo_len(__tmpq); \
        (__recsize) ? ((__avail <= __recsize) ? 0 : \
@@ -284,7 +284,7 @@ __kfifo_must_check_helper( \
  */
 #define        kfifo_skip(fifo) \
 (void)({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
        if (__recsize) \
@@ -302,7 +302,7 @@ __kfifo_must_check_helper( \
 #define kfifo_peek_len(fifo) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
        (!__recsize) ? kfifo_len(__tmp) * sizeof(*__tmp->type) : \
@@ -325,7 +325,7 @@ __kfifo_must_check_helper( \
 #define kfifo_alloc(fifo, size, gfp_mask) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
        __is_kfifo_ptr(__tmp) ? \
        __kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \
@@ -339,7 +339,7 @@ __kfifo_must_check_helper( \
  */
 #define kfifo_free(fifo) \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
        if (__is_kfifo_ptr(__tmp)) \
                __kfifo_free(__kfifo); \
@@ -358,7 +358,7 @@ __kfifo_must_check_helper( \
  */
 #define kfifo_init(fifo, buffer, size) \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
        __is_kfifo_ptr(__tmp) ? \
        __kfifo_init(__kfifo, buffer, size, sizeof(*__tmp->type)) : \
@@ -379,8 +379,8 @@ __kfifo_must_check_helper( \
  */
 #define        kfifo_put(fifo, val) \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
-       typeof(val + 1) __val = (val); \
+       typeof((fifo) + 1) __tmp = (fifo); \
+       typeof((val) + 1) __val = (val); \
        unsigned int __ret; \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -421,8 +421,8 @@ __kfifo_must_check_helper( \
 #define        kfifo_get(fifo, val) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
-       typeof(val + 1) __val = (val); \
+       typeof((fifo) + 1) __tmp = (fifo); \
+       typeof((val) + 1) __val = (val); \
        unsigned int __ret; \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -462,8 +462,8 @@ __kfifo_must_check_helper( \
 #define        kfifo_peek(fifo, val) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
-       typeof(val + 1) __val = (val); \
+       typeof((fifo) + 1) __tmp = (fifo); \
+       typeof((val) + 1) __val = (val); \
        unsigned int __ret; \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -501,8 +501,8 @@ __kfifo_must_check_helper( \
  */
 #define        kfifo_in(fifo, buf, n) \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
-       typeof(buf + 1) __buf = (buf); \
+       typeof((fifo) + 1) __tmp = (fifo); \
+       typeof((buf) + 1) __buf = (buf); \
        unsigned long __n = (n); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -554,8 +554,8 @@ __kfifo_must_check_helper( \
 #define        kfifo_out(fifo, buf, n) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
-       typeof(buf + 1) __buf = (buf); \
+       typeof((fifo) + 1) __tmp = (fifo); \
+       typeof((buf) + 1) __buf = (buf); \
        unsigned long __n = (n); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -611,7 +611,7 @@ __kfifo_must_check_helper( \
 #define        kfifo_from_user(fifo, from, len, copied) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        const void __user *__from = (from); \
        unsigned int __len = (len); \
        unsigned int *__copied = (copied); \
@@ -639,7 +639,7 @@ __kfifo_must_check_helper( \
 #define        kfifo_to_user(fifo, to, len, copied) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        void __user *__to = (to); \
        unsigned int __len = (len); \
        unsigned int *__copied = (copied); \
@@ -666,7 +666,7 @@ __kfifo_must_check_helper( \
  */
 #define        kfifo_dma_in_prepare(fifo, sgl, nents, len) \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        struct scatterlist *__sgl = (sgl); \
        int __nents = (nents); \
        unsigned int __len = (len); \
@@ -690,7 +690,7 @@ __kfifo_must_check_helper( \
  */
 #define kfifo_dma_in_finish(fifo, len) \
 (void)({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        unsigned int __len = (len); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -717,7 +717,7 @@ __kfifo_must_check_helper( \
  */
 #define        kfifo_dma_out_prepare(fifo, sgl, nents, len) \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo);  \
        struct scatterlist *__sgl = (sgl); \
        int __nents = (nents); \
        unsigned int __len = (len); \
@@ -741,7 +741,7 @@ __kfifo_must_check_helper( \
  */
 #define kfifo_dma_out_finish(fifo, len) \
 (void)({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        unsigned int __len = (len); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -766,8 +766,8 @@ __kfifo_must_check_helper( \
 #define        kfifo_out_peek(fifo, buf, n) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
-       typeof(buf + 1) __buf = (buf); \
+       typeof((fifo) + 1) __tmp = (fifo); \
+       typeof((buf) + 1) __buf = (buf); \
        unsigned long __n = (n); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
index 74d691ee9121c5bb3aa8336d7eeffcc03d88cc46..3319a6967626e02f91c340080b21950a8310a67f 100644 (file)
@@ -16,6 +16,9 @@
 struct stable_node;
 struct mem_cgroup;
 
+struct page *ksm_does_need_to_copy(struct page *page,
+                       struct vm_area_struct *vma, unsigned long address);
+
 #ifdef CONFIG_KSM
 int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
                unsigned long end, int advice, unsigned long *vm_flags);
@@ -70,19 +73,14 @@ static inline void set_page_stable_node(struct page *page,
  * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
  * but what if the vma was unmerged while the page was swapped out?
  */
-struct page *ksm_does_need_to_copy(struct page *page,
-                       struct vm_area_struct *vma, unsigned long address);
-static inline struct page *ksm_might_need_to_copy(struct page *page,
+static inline int ksm_might_need_to_copy(struct page *page,
                        struct vm_area_struct *vma, unsigned long address)
 {
        struct anon_vma *anon_vma = page_anon_vma(page);
 
-       if (!anon_vma ||
-           (anon_vma->root == vma->anon_vma->root &&
-            page->index == linear_page_index(vma, address)))
-               return page;
-
-       return ksm_does_need_to_copy(page, vma, address);
+       return anon_vma &&
+               (anon_vma->root != vma->anon_vma->root ||
+                page->index != linear_page_index(vma, address));
 }
 
 int page_referenced_ksm(struct page *page,
@@ -115,10 +113,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
        return 0;
 }
 
-static inline struct page *ksm_might_need_to_copy(struct page *page,
+static inline int ksm_might_need_to_copy(struct page *page,
                        struct vm_area_struct *vma, unsigned long address)
 {
-       return page;
+       return 0;
 }
 
 static inline int page_referenced_ksm(struct page *page,
index b288cb713b902182cca71156e5d9f75c7452a117..f549056fb20bd5533555918cc1b1f9805c2cdcc3 100644 (file)
        int i;                                                          \
        preempt_disable();                                              \
        rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);           \
-       for_each_online_cpu(i) {                                        \
+       for_each_possible_cpu(i) {                                      \
                arch_spinlock_t *lock;                                  \
                lock = &per_cpu(name##_lock, i);                        \
                arch_spin_lock(lock);                                   \
  void name##_global_unlock(void) {                                     \
        int i;                                                          \
        rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);              \
-       for_each_online_cpu(i) {                                        \
+       for_each_possible_cpu(i) {                                      \
                arch_spinlock_t *lock;                                  \
                lock = &per_cpu(name##_lock, i);                        \
                arch_spin_unlock(lock);                                 \
index f010f18a0f863f39e139d65469debae94232b259..45fb2967b66d6949094289a467ae120d12014c0d 100644 (file)
@@ -335,6 +335,7 @@ enum {
        ATA_EHI_HOTPLUGGED      = (1 << 0),  /* could have been hotplugged */
        ATA_EHI_NO_AUTOPSY      = (1 << 2),  /* no autopsy */
        ATA_EHI_QUIET           = (1 << 3),  /* be quiet */
+       ATA_EHI_NO_RECOVERY     = (1 << 4),  /* no recovery */
 
        ATA_EHI_DID_SOFTRESET   = (1 << 16), /* already soft-reset this port */
        ATA_EHI_DID_HARDRESET   = (1 << 17), /* already soft-reset this port */
@@ -723,6 +724,7 @@ struct ata_port {
        struct ata_ioports      ioaddr; /* ATA cmd/ctl/dma register blocks */
        u8                      ctl;    /* cache of ATA control register */
        u8                      last_ctl;       /* Cache last written value */
+       struct ata_link*        sff_pio_task_link; /* link currently used */
        struct delayed_work     sff_pio_task;
 #ifdef CONFIG_ATA_BMDMA
        struct ata_bmdma_prd    *bmdma_prd;     /* BMDMA SG list */
@@ -1594,7 +1596,7 @@ extern void ata_sff_irq_on(struct ata_port *ap);
 extern void ata_sff_irq_clear(struct ata_port *ap);
 extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
                            u8 status, int in_wq);
-extern void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay);
+extern void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay);
 extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc);
 extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc);
 extern unsigned int ata_sff_port_intr(struct ata_port *ap,
index e6b1210772ceace3fc70817a32e1a411096a6b22..74949fbef8c608b9c5ef6dab3b508041a11243f3 100644 (file)
@@ -864,6 +864,12 @@ int set_page_dirty(struct page *page);
 int set_page_dirty_lock(struct page *page);
 int clear_page_dirty_for_io(struct page *page);
 
+/* Is the vma a continuation of the stack vma above it? */
+static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
+{
+       return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
+}
+
 extern unsigned long move_page_tables(struct vm_area_struct *vma,
                unsigned long old_addr, struct vm_area_struct *new_vma,
                unsigned long new_addr, unsigned long len);
index 329a8faa6e37bb32bd6e65f1b8fd758ac45baa36..245cdacee5443791eb5418d10bde3daa190351a9 100644 (file)
@@ -38,6 +38,8 @@
  *      [8:0] Byte/block count
  */
 
+#define R4_MEMORY_PRESENT (1 << 27)
+
 /*
   SDIO status in R5
   Type
index 6e6e62648a4d4a6d792fe207d42105563b112aaa..3984c4eb41fdc9c85759dbffe308df5a806391f4 100644 (file)
@@ -283,6 +283,13 @@ struct zone {
        /* zone watermarks, access with *_wmark_pages(zone) macros */
        unsigned long watermark[NR_WMARK];
 
+       /*
+        * When free pages are below this point, additional steps are taken
+        * when reading the number of free pages to avoid per-cpu counter
+        * drift allowing watermarks to be breached
+        */
+       unsigned long percpu_drift_mark;
+
        /*
         * We don't know if the memory that we're going to allocate will be freeable
         * or/and it will be released eventually, so to avoid totally wasting several
@@ -441,6 +448,12 @@ static inline int zone_is_oom_locked(const struct zone *zone)
        return test_bit(ZONE_OOM_LOCKED, &zone->flags);
 }
 
+#ifdef CONFIG_SMP
+unsigned long zone_nr_free_pages(struct zone *zone);
+#else
+#define zone_nr_free_pages(zone) zone_page_state(zone, NR_FREE_PAGES)
+#endif /* CONFIG_SMP */
+
 /*
  * The "priority" of VM scanning is how much of the queues we will scan in one
  * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
index 8a6b9fdc7ffae0c20335b8a2c73726396a934ee2..aace066bad8f067758cda4d341b9ce8941d2ae1c 100644 (file)
@@ -686,17 +686,16 @@ extern int module_sysfs_initialized;
 
 
 #ifdef CONFIG_GENERIC_BUG
-int  module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *,
+void module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *,
                         struct module *);
 void module_bug_cleanup(struct module *);
 
 #else  /* !CONFIG_GENERIC_BUG */
 
-static inline int  module_bug_finalize(const Elf_Ehdr *hdr,
+static inline void module_bug_finalize(const Elf_Ehdr *hdr,
                                        const Elf_Shdr *sechdrs,
                                        struct module *mod)
 {
-       return 0;
 }
 static inline void module_bug_cleanup(struct module *mod) {}
 #endif /* CONFIG_GENERIC_BUG */
index 878cab4f5fcc5db95585184c22aea5d905a34295..f363bc8fdc74c821c99aa59d5bfcb9554c012c9a 100644 (file)
@@ -78,6 +78,14 @@ struct mutex_waiter {
 # include <linux/mutex-debug.h>
 #else
 # define __DEBUG_MUTEX_INITIALIZER(lockname)
+/**
+ * mutex_init - initialize the mutex
+ * @mutex: the mutex to be initialized
+ *
+ * Initialize the mutex to unlocked state.
+ *
+ * It is not allowed to initialize an already locked mutex.
+ */
 # define mutex_init(mutex) \
 do {                                                   \
        static struct lock_class_key __key;             \
index 10d33309e9a61351aa4d3597540f201c97dc9294..570fddeb0388f62b7b8dd0a32ec5136123856033 100644 (file)
 #define PCI_DEVICE_ID_VLSI_82C147      0x0105
 #define PCI_DEVICE_ID_VLSI_VAS96011    0x0702
 
+/* AMD RD890 Chipset */
+#define PCI_DEVICE_ID_RD890_IOMMU      0x5a23
+
 #define PCI_VENDOR_ID_ADL              0x1005
 #define PCI_DEVICE_ID_ADL_2301         0x2301
 
index d50ba858cfe0c15325ff5fcf410d7a18f1a8aad3..d1a9193960f17601d4637bfae03f3c9a3ac5bc83 100644 (file)
@@ -274,8 +274,14 @@ static inline int dquot_alloc_space(struct inode *inode, qsize_t nr)
        int ret;
 
        ret = dquot_alloc_space_nodirty(inode, nr);
-       if (!ret)
-               mark_inode_dirty_sync(inode);
+       if (!ret) {
+               /*
+                * Mark inode fully dirty. Since we are allocating blocks, inode
+                * would become fully dirty soon anyway and it reportedly
+                * reduces inode_lock contention.
+                */
+               mark_inode_dirty(inode);
+       }
        return ret;
 }
 
index 9fbc54a2585d42cb9276adf2c2d168f53e883f63..83af1f8d8b746cde3b8369d7125f3cdfe07da6d3 100644 (file)
@@ -454,7 +454,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
  * Makes rcu_dereference_check() do the dirty work.
  */
 #define rcu_dereference_bh(p) \
-               rcu_dereference_check(p, rcu_read_lock_bh_held())
+               rcu_dereference_check(p, rcu_read_lock_bh_held() || irqs_disabled())
 
 /**
  * rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched
index 7415839ac890f538b88611f7843b5b770e73292f..5310d27abd2a503ad523059ea4832f34ecfbb194 100644 (file)
@@ -26,6 +26,9 @@ struct semaphore {
        .wait_list      = LIST_HEAD_INIT((name).wait_list),             \
 }
 
+#define DEFINE_SEMAPHORE(name) \
+       struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
+
 #define DECLARE_MUTEX(name)    \
        struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
 
index cc813f95a2f2b6a4ddf19d04ed1b60fc321c9712..c91302f3a25789b8045b34d9c0a9fa814c29b0ae 100644 (file)
@@ -14,7 +14,9 @@
 #define SPI_MODE_OFFSET                        6
 #define SPI_SCPH_OFFSET                        6
 #define SPI_SCOL_OFFSET                        7
+
 #define SPI_TMOD_OFFSET                        8
+#define SPI_TMOD_MASK                  (0x3 << SPI_TMOD_OFFSET)
 #define        SPI_TMOD_TR                     0x0             /* xmit & recv */
 #define SPI_TMOD_TO                    0x1             /* xmit only */
 #define SPI_TMOD_RO                    0x2             /* recv only */
index 569dc722a600d55834055cf22474858ef69f5bae..85f38a63f098a2c55989f1e5d4e070595e525a99 100644 (file)
@@ -30,7 +30,7 @@ struct rpc_inode;
  * The high-level client handle
  */
 struct rpc_clnt {
-       struct kref             cl_kref;        /* Number of references */
+       atomic_t                cl_count;       /* Number of references */
        struct list_head        cl_clients;     /* Global list of clients */
        struct list_head        cl_tasks;       /* List of tasks */
        spinlock_t              cl_lock;        /* spinlock */
index 2fee51a11b7399aea7ea7427ae32777b45cf8903..7cdd63366f883a164a7f5d5b74ff8882c62b4f8c 100644 (file)
@@ -19,6 +19,7 @@ struct bio;
 #define SWAP_FLAG_PREFER       0x8000  /* set if swap priority specified */
 #define SWAP_FLAG_PRIO_MASK    0x7fff
 #define SWAP_FLAG_PRIO_SHIFT   0
+#define SWAP_FLAG_DISCARD      0x10000 /* discard swap cluster after use */
 
 static inline int current_is_kswapd(void)
 {
@@ -142,7 +143,7 @@ struct swap_extent {
 enum {
        SWP_USED        = (1 << 0),     /* is slot in swap_info[] used? */
        SWP_WRITEOK     = (1 << 1),     /* ok to write to this swap?    */
-       SWP_DISCARDABLE = (1 << 2),     /* blkdev supports discard */
+       SWP_DISCARDABLE = (1 << 2),     /* swapon+blkdev support discard */
        SWP_DISCARDING  = (1 << 3),     /* now discarding a free cluster */
        SWP_SOLIDSTATE  = (1 << 4),     /* blkdev seeks are cheap */
        SWP_CONTINUED   = (1 << 5),     /* swap_map has count continuation */
@@ -315,6 +316,7 @@ extern long nr_swap_pages;
 extern long total_swap_pages;
 extern void si_swapinfo(struct sysinfo *);
 extern swp_entry_t get_swap_page(void);
+extern swp_entry_t get_swap_page_of_type(int);
 extern int valid_swaphandles(swp_entry_t, unsigned long *);
 extern int add_swap_count_continuation(swp_entry_t, gfp_t);
 extern void swap_shmem_alloc(swp_entry_t);
@@ -331,13 +333,6 @@ extern int reuse_swap_page(struct page *);
 extern int try_to_free_swap(struct page *);
 struct backing_dev_info;
 
-#ifdef CONFIG_HIBERNATION
-void hibernation_freeze_swap(void);
-void hibernation_thaw_swap(void);
-swp_entry_t get_swap_for_hibernation(int type);
-void swap_free_for_hibernation(swp_entry_t val);
-#endif
-
 /* linux/mm/thrash.c */
 extern struct mm_struct *swap_token_mm;
 extern void grab_swap_token(struct mm_struct *);
index 7f43ccdc1d38c0eb919efe4891e1b91ec19c3705..eaaea37b3b75dd64b73a34a0e3beb31417bdd0d6 100644 (file)
@@ -170,6 +170,28 @@ static inline unsigned long zone_page_state(struct zone *zone,
        return x;
 }
 
+/*
+ * More accurate version that also considers the currently pending
+ * deltas. For that we need to loop over all cpus to find the current
+ * deltas. There is no synchronization so the result cannot be
+ * exactly accurate either.
+ */
+static inline unsigned long zone_page_state_snapshot(struct zone *zone,
+                                       enum zone_stat_item item)
+{
+       long x = atomic_long_read(&zone->vm_stat[item]);
+
+#ifdef CONFIG_SMP
+       int cpu;
+       for_each_online_cpu(cpu)
+               x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
+
+       if (x < 0)
+               x = 0;
+#endif
+       return x;
+}
+
 extern unsigned long global_reclaimable_pages(void);
 extern unsigned long zone_reclaimable_pages(struct zone *zone);
 
index 0836ccc5712146f87d13e9e35a9480e980708392..3efc9f3f43a0862cad51aa325e08c9f24d129f8e 100644 (file)
@@ -614,6 +614,7 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
                (wait)->private = current;                              \
                (wait)->func = autoremove_wake_function;                \
                INIT_LIST_HEAD(&(wait)->task_list);                     \
+               (wait)->flags = 0;                                      \
        } while (0)
 
 /**
index f11100f964824c250b4eacb21e96e36640527ac6..25e02c941bac34a5322510f0461998b1054259fc 100644 (file)
@@ -235,6 +235,10 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
 #define work_clear_pending(work) \
        clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
 
+/*
+ * Workqueue flags and constants.  For details, please refer to
+ * Documentation/workqueue.txt.
+ */
 enum {
        WQ_NON_REENTRANT        = 1 << 0, /* guarantee non-reentrance */
        WQ_UNBOUND              = 1 << 1, /* not bound to any cpu */
index 40a8f462a8224b298690cb07892f93afe8c15214..0e0d49bbb867f239be5690968227c53e7c0226c0 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -743,6 +743,8 @@ static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in,
            {
                struct semid_ds out;
 
+               memset(&out, 0, sizeof(out));
+
                ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
 
                out.sem_otime   = in->sem_otime;
index ed19afd9e3fe3f74eacada4c6b79deca210a689c..c9483d8f6140ed6cb4e06fa6139e2aeb907b3d47 100644 (file)
@@ -1798,13 +1798,13 @@ out:
 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
 {
        struct cgroupfs_root *root;
-       struct cgroup *cur_cg;
        int retval = 0;
 
        cgroup_lock();
        for_each_active_root(root) {
-               cur_cg = task_cgroup_from_root(from, root);
-               retval = cgroup_attach_task(cur_cg, tsk);
+               struct cgroup *from_cg = task_cgroup_from_root(from, root);
+
+               retval = cgroup_attach_task(from_cg, tsk);
                if (retval)
                        break;
        }
index e167efce8423e2cdaf2709412c36cc3ae642c90f..c9e2ec0b34a8cc38cda604273618a22c8d2d928c 100644 (file)
@@ -1126,3 +1126,24 @@ compat_sys_sysinfo(struct compat_sysinfo __user *info)
 
        return 0;
 }
+
+/*
+ * Allocate user-space memory for the duration of a single system call,
+ * in order to marshall parameters inside a compat thunk.
+ */
+void __user *compat_alloc_user_space(unsigned long len)
+{
+       void __user *ptr;
+
+       /* If len would occupy more than half of the entire compat space... */
+       if (unlikely(len > (((compat_uptr_t)~0) >> 1)))
+               return NULL;
+
+       ptr = arch_compat_alloc_user_space(len);
+
+       if (unlikely(!access_ok(VERIFY_WRITE, ptr, len)))
+               return NULL;
+
+       return ptr;
+}
+EXPORT_SYMBOL_GPL(compat_alloc_user_space);
index 75bd9b3ebbb7cf501800115a531e86e7c30c5a28..20059ef4459a4ff293428337d9936ff438a8eb96 100644 (file)
@@ -274,7 +274,6 @@ static int kdb_bp(int argc, const char **argv)
        int i, bpno;
        kdb_bp_t *bp, *bp_check;
        int diag;
-       int free;
        char *symname = NULL;
        long offset = 0ul;
        int nextarg;
@@ -305,7 +304,6 @@ static int kdb_bp(int argc, const char **argv)
        /*
         * Find an empty bp structure to allocate
         */
-       free = KDB_MAXBPT;
        for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) {
                if (bp->bp_free)
                        break;
index b7e9d60a675d3a08a1096ce725380229217d9fa3..c445f8cc408d777dd7a94aec3fa78c07e1d98b98 100644 (file)
@@ -356,10 +356,10 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
                if (IS_ERR(pol))
                        goto fail_nomem_policy;
                vma_set_policy(tmp, pol);
+               tmp->vm_mm = mm;
                if (anon_vma_fork(tmp, mpnt))
                        goto fail_nomem_anon_vma_fork;
                tmp->vm_flags &= ~VM_LOCKED;
-               tmp->vm_mm = mm;
                tmp->vm_next = tmp->vm_prev = NULL;
                file = tmp->vm_file;
                if (file) {
index ef3c3f88a7a35e36d8f1fb551c56e534cea52687..f83972b16564d00676154900f09d3c70affd773c 100644 (file)
  * @children: child nodes
  * @all: list head for list of all nodes
  * @parent: parent node
- * @info: associated profiling data structure if not a directory
- * @ghost: when an object file containing profiling data is unloaded we keep a
- *         copy of the profiling data here to allow collecting coverage data
- *         for cleanup code. Such a node is called a "ghost".
+ * @loaded_info: array of pointers to profiling data sets for loaded object
+ *   files.
+ * @num_loaded: number of profiling data sets for loaded object files.
+ * @unloaded_info: accumulated copy of profiling data sets for unloaded
+ *   object files. Used only when gcov_persist=1.
  * @dentry: main debugfs entry, either a directory or data file
  * @links: associated symbolic links
  * @name: data file basename
@@ -51,10 +52,11 @@ struct gcov_node {
        struct list_head children;
        struct list_head all;
        struct gcov_node *parent;
-       struct gcov_info *info;
-       struct gcov_info *ghost;
+       struct gcov_info **loaded_info;
+       struct gcov_info *unloaded_info;
        struct dentry *dentry;
        struct dentry **links;
+       int num_loaded;
        char name[0];
 };
 
@@ -136,16 +138,37 @@ static const struct seq_operations gcov_seq_ops = {
 };
 
 /*
- * Return the profiling data set for a given node. This can either be the
- * original profiling data structure or a duplicate (also called "ghost")
- * in case the associated object file has been unloaded.
+ * Return a profiling data set associated with the given node. This is
+ * either a data set for a loaded object file or a data set copy in case
+ * all associated object files have been unloaded.
  */
 static struct gcov_info *get_node_info(struct gcov_node *node)
 {
-       if (node->info)
-               return node->info;
+       if (node->num_loaded > 0)
+               return node->loaded_info[0];
 
-       return node->ghost;
+       return node->unloaded_info;
+}
+
+/*
+ * Return a newly allocated profiling data set which contains the sum of
+ * all profiling data associated with the given node.
+ */
+static struct gcov_info *get_accumulated_info(struct gcov_node *node)
+{
+       struct gcov_info *info;
+       int i = 0;
+
+       if (node->unloaded_info)
+               info = gcov_info_dup(node->unloaded_info);
+       else
+               info = gcov_info_dup(node->loaded_info[i++]);
+       if (!info)
+               return NULL;
+       for (; i < node->num_loaded; i++)
+               gcov_info_add(info, node->loaded_info[i]);
+
+       return info;
 }
 
 /*
@@ -163,9 +186,10 @@ static int gcov_seq_open(struct inode *inode, struct file *file)
        mutex_lock(&node_lock);
        /*
         * Read from a profiling data copy to minimize reference tracking
-        * complexity and concurrent access.
+        * complexity and concurrent access and to keep accumulating multiple
+        * profiling data sets associated with one node simple.
         */
-       info = gcov_info_dup(get_node_info(node));
+       info = get_accumulated_info(node);
        if (!info)
                goto out_unlock;
        iter = gcov_iter_new(info);
@@ -225,12 +249,25 @@ static struct gcov_node *get_node_by_name(const char *name)
        return NULL;
 }
 
+/*
+ * Reset all profiling data associated with the specified node.
+ */
+static void reset_node(struct gcov_node *node)
+{
+       int i;
+
+       if (node->unloaded_info)
+               gcov_info_reset(node->unloaded_info);
+       for (i = 0; i < node->num_loaded; i++)
+               gcov_info_reset(node->loaded_info[i]);
+}
+
 static void remove_node(struct gcov_node *node);
 
 /*
  * write() implementation for gcov data files. Reset profiling data for the
- * associated file. If the object file has been unloaded (i.e. this is
- * a "ghost" node), remove the debug fs node as well.
+ * corresponding file. If all associated object files have been unloaded,
+ * remove the debug fs node as well.
  */
 static ssize_t gcov_seq_write(struct file *file, const char __user *addr,
                              size_t len, loff_t *pos)
@@ -245,10 +282,10 @@ static ssize_t gcov_seq_write(struct file *file, const char __user *addr,
        node = get_node_by_name(info->filename);
        if (node) {
                /* Reset counts or remove node for unloaded modules. */
-               if (node->ghost)
+               if (node->num_loaded == 0)
                        remove_node(node);
                else
-                       gcov_info_reset(node->info);
+                       reset_node(node);
        }
        /* Reset counts for open file. */
        gcov_info_reset(info);
@@ -378,7 +415,10 @@ static void init_node(struct gcov_node *node, struct gcov_info *info,
        INIT_LIST_HEAD(&node->list);
        INIT_LIST_HEAD(&node->children);
        INIT_LIST_HEAD(&node->all);
-       node->info = info;
+       if (node->loaded_info) {
+               node->loaded_info[0] = info;
+               node->num_loaded = 1;
+       }
        node->parent = parent;
        if (name)
                strcpy(node->name, name);
@@ -394,9 +434,13 @@ static struct gcov_node *new_node(struct gcov_node *parent,
        struct gcov_node *node;
 
        node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL);
-       if (!node) {
-               pr_warning("out of memory\n");
-               return NULL;
+       if (!node)
+               goto err_nomem;
+       if (info) {
+               node->loaded_info = kcalloc(1, sizeof(struct gcov_info *),
+                                          GFP_KERNEL);
+               if (!node->loaded_info)
+                       goto err_nomem;
        }
        init_node(node, info, name, parent);
        /* Differentiate between gcov data file nodes and directory nodes. */
@@ -416,6 +460,11 @@ static struct gcov_node *new_node(struct gcov_node *parent,
        list_add(&node->all, &all_head);
 
        return node;
+
+err_nomem:
+       kfree(node);
+       pr_warning("out of memory\n");
+       return NULL;
 }
 
 /* Remove symbolic links associated with node. */
@@ -441,8 +490,9 @@ static void release_node(struct gcov_node *node)
        list_del(&node->all);
        debugfs_remove(node->dentry);
        remove_links(node);
-       if (node->ghost)
-               gcov_info_free(node->ghost);
+       kfree(node->loaded_info);
+       if (node->unloaded_info)
+               gcov_info_free(node->unloaded_info);
        kfree(node);
 }
 
@@ -477,7 +527,7 @@ static struct gcov_node *get_child_by_name(struct gcov_node *parent,
 
 /*
  * write() implementation for reset file. Reset all profiling data to zero
- * and remove ghost nodes.
+ * and remove nodes for which all associated object files are unloaded.
  */
 static ssize_t reset_write(struct file *file, const char __user *addr,
                           size_t len, loff_t *pos)
@@ -487,8 +537,8 @@ static ssize_t reset_write(struct file *file, const char __user *addr,
        mutex_lock(&node_lock);
 restart:
        list_for_each_entry(node, &all_head, all) {
-               if (node->info)
-                       gcov_info_reset(node->info);
+               if (node->num_loaded > 0)
+                       reset_node(node);
                else if (list_empty(&node->children)) {
                        remove_node(node);
                        /* Several nodes may have gone - restart loop. */
@@ -564,37 +614,115 @@ err_remove:
 }
 
 /*
- * The profiling data set associated with this node is being unloaded. Store a
- * copy of the profiling data and turn this node into a "ghost".
+ * Associate a profiling data set with an existing node. Needs to be called
+ * with node_lock held.
  */
-static int ghost_node(struct gcov_node *node)
+static void add_info(struct gcov_node *node, struct gcov_info *info)
 {
-       node->ghost = gcov_info_dup(node->info);
-       if (!node->ghost) {
-               pr_warning("could not save data for '%s' (out of memory)\n",
-                          node->info->filename);
-               return -ENOMEM;
+       struct gcov_info **loaded_info;
+       int num = node->num_loaded;
+
+       /*
+        * Prepare new array. This is done first to simplify cleanup in
+        * case the new data set is incompatible, the node only contains
+        * unloaded data sets and there's not enough memory for the array.
+        */
+       loaded_info = kcalloc(num + 1, sizeof(struct gcov_info *), GFP_KERNEL);
+       if (!loaded_info) {
+               pr_warning("could not add '%s' (out of memory)\n",
+                          info->filename);
+               return;
+       }
+       memcpy(loaded_info, node->loaded_info,
+              num * sizeof(struct gcov_info *));
+       loaded_info[num] = info;
+       /* Check if the new data set is compatible. */
+       if (num == 0) {
+               /*
+                * A module was unloaded, modified and reloaded. The new
+                * data set replaces the copy of the last one.
+                */
+               if (!gcov_info_is_compatible(node->unloaded_info, info)) {
+                       pr_warning("discarding saved data for %s "
+                                  "(incompatible version)\n", info->filename);
+                       gcov_info_free(node->unloaded_info);
+                       node->unloaded_info = NULL;
+               }
+       } else {
+               /*
+                * Two different versions of the same object file are loaded.
+                * The initial one takes precedence.
+                */
+               if (!gcov_info_is_compatible(node->loaded_info[0], info)) {
+                       pr_warning("could not add '%s' (incompatible "
+                                  "version)\n", info->filename);
+                       kfree(loaded_info);
+                       return;
+               }
        }
-       node->info = NULL;
+       /* Overwrite previous array. */
+       kfree(node->loaded_info);
+       node->loaded_info = loaded_info;
+       node->num_loaded = num + 1;
+}
 
-       return 0;
+/*
+ * Return the index of a profiling data set associated with a node.
+ */
+static int get_info_index(struct gcov_node *node, struct gcov_info *info)
+{
+       int i;
+
+       for (i = 0; i < node->num_loaded; i++) {
+               if (node->loaded_info[i] == info)
+                       return i;
+       }
+       return -ENOENT;
 }
 
 /*
- * Profiling data for this node has been loaded again. Add profiling data
- * from previous instantiation and turn this node into a regular node.
+ * Save the data of a profiling data set which is being unloaded.
  */
-static void revive_node(struct gcov_node *node, struct gcov_info *info)
+static void save_info(struct gcov_node *node, struct gcov_info *info)
 {
-       if (gcov_info_is_compatible(node->ghost, info))
-               gcov_info_add(info, node->ghost);
+       if (node->unloaded_info)
+               gcov_info_add(node->unloaded_info, info);
        else {
-               pr_warning("discarding saved data for '%s' (version changed)\n",
+               node->unloaded_info = gcov_info_dup(info);
+               if (!node->unloaded_info) {
+                       pr_warning("could not save data for '%s' "
+                                  "(out of memory)\n", info->filename);
+               }
+       }
+}
+
+/*
+ * Disassociate a profiling data set from a node. Needs to be called with
+ * node_lock held.
+ */
+static void remove_info(struct gcov_node *node, struct gcov_info *info)
+{
+       int i;
+
+       i = get_info_index(node, info);
+       if (i < 0) {
+               pr_warning("could not remove '%s' (not found)\n",
                           info->filename);
+               return;
        }
-       gcov_info_free(node->ghost);
-       node->ghost = NULL;
-       node->info = info;
+       if (gcov_persist)
+               save_info(node, info);
+       /* Shrink array. */
+       node->loaded_info[i] = node->loaded_info[node->num_loaded - 1];
+       node->num_loaded--;
+       if (node->num_loaded > 0)
+               return;
+       /* Last loaded data set was removed. */
+       kfree(node->loaded_info);
+       node->loaded_info = NULL;
+       node->num_loaded = 0;
+       if (!node->unloaded_info)
+               remove_node(node);
 }
 
 /*
@@ -609,30 +737,18 @@ void gcov_event(enum gcov_action action, struct gcov_info *info)
        node = get_node_by_name(info->filename);
        switch (action) {
        case GCOV_ADD:
-               /* Add new node or revive ghost. */
-               if (!node) {
+               if (node)
+                       add_info(node, info);
+               else
                        add_node(info);
-                       break;
-               }
-               if (gcov_persist)
-                       revive_node(node, info);
-               else {
-                       pr_warning("could not add '%s' (already exists)\n",
-                                  info->filename);
-               }
                break;
        case GCOV_REMOVE:
-               /* Remove node or turn into ghost. */
-               if (!node) {
+               if (node)
+                       remove_info(node, info);
+               else {
                        pr_warning("could not remove '%s' (not found)\n",
                                   info->filename);
-                       break;
                }
-               if (gcov_persist) {
-                       if (!ghost_node(node))
-                               break;
-               }
-               remove_node(node);
                break;
        }
        mutex_unlock(&node_lock);
index 53b1916c94926c245aacd17ccb420fc00d06a19b..253dc0f35cf4c30786d1ff3565427d1374762a04 100644 (file)
@@ -143,10 +143,9 @@ int groups_search(const struct group_info *group_info, gid_t grp)
        right = group_info->ngroups;
        while (left < right) {
                unsigned int mid = (left+right)/2;
-               int cmp = grp - GROUP_AT(group_info, mid);
-               if (cmp > 0)
+               if (grp > GROUP_AT(group_info, mid))
                        left = mid + 1;
-               else if (cmp < 0)
+               else if (grp < GROUP_AT(group_info, mid))
                        right = mid;
                else
                        return 1;
index ce669174f355c7dd1e1893903bb4d18a25f17c34..1decafbb6b1a28197b768021cc987e230d352b5b 100644 (file)
@@ -1091,11 +1091,10 @@ EXPORT_SYMBOL_GPL(hrtimer_cancel);
  */
 ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
 {
-       struct hrtimer_clock_base *base;
        unsigned long flags;
        ktime_t rem;
 
-       base = lock_hrtimer_base(timer, &flags);
+       lock_hrtimer_base(timer, &flags);
        rem = hrtimer_expires_remaining(timer);
        unlock_hrtimer_base(timer, &flags);
 
index d71a987fd2bf2ba5e698f9b69fccb59db4a1bb30..c7c2aed9e2dcc2e52669e7d3880be0666c496bff 100644 (file)
@@ -433,7 +433,8 @@ register_user_hw_breakpoint(struct perf_event_attr *attr,
                            perf_overflow_handler_t triggered,
                            struct task_struct *tsk)
 {
-       return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered);
+       return perf_event_create_kernel_counter(attr, -1, task_pid_vnr(tsk),
+                                               triggered);
 }
 EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
 
index 6b5580c57644dc1804b02fd85648e7100d5d75dd..01a0700e873f53ca60084da3c0c1142bebf49b16 100644 (file)
@@ -365,8 +365,6 @@ static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl,
        n = setup_sgl_buf(sgl, fifo->data + off, nents, l);
        n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l);
 
-       if (n)
-               sg_mark_end(sgl + n - 1);
        return n;
 }
 
index d0b5f8db11b4a4183c229e2a44f433a99f58090a..ccd641991842f4990906946f895fd5062a0389c3 100644 (file)
@@ -1537,6 +1537,7 @@ static int __unlink_module(void *_mod)
 {
        struct module *mod = _mod;
        list_del(&mod->list);
+       module_bug_cleanup(mod);
        return 0;
 }
 
@@ -2625,6 +2626,7 @@ static struct module *load_module(void __user *umod,
        if (err < 0)
                goto ddebug;
 
+       module_bug_finalize(info.hdr, info.sechdrs, mod);
        list_add_rcu(&mod->list, &modules);
        mutex_unlock(&module_mutex);
 
@@ -2650,6 +2652,8 @@ static struct module *load_module(void __user *umod,
        mutex_lock(&module_mutex);
        /* Unlink carefully: kallsyms could be walking list. */
        list_del_rcu(&mod->list);
+       module_bug_cleanup(mod);
+
  ddebug:
        if (!mod->taints)
                dynamic_debug_remove(info.debug);
index 4c0b7b3e6d2e9a483c6cb4cc384e979911ed03bb..200407c1502f509ee3f9d8a665bc4d3b78a27f74 100644 (file)
 # include <asm/mutex.h>
 #endif
 
-/***
- * mutex_init - initialize the mutex
- * @lock: the mutex to be initialized
- * @key: the lock_class_key for the class; used by mutex lock debugging
- *
- * Initialize the mutex to unlocked state.
- *
- * It is not allowed to initialize an already locked mutex.
- */
 void
 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
 {
@@ -68,7 +59,7 @@ EXPORT_SYMBOL(__mutex_init);
 static __used noinline void __sched
 __mutex_lock_slowpath(atomic_t *lock_count);
 
-/***
+/**
  * mutex_lock - acquire the mutex
  * @lock: the mutex to be acquired
  *
@@ -105,7 +96,7 @@ EXPORT_SYMBOL(mutex_lock);
 
 static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
 
-/***
+/**
  * mutex_unlock - release the mutex
  * @lock: the mutex to be released
  *
@@ -364,8 +355,8 @@ __mutex_lock_killable_slowpath(atomic_t *lock_count);
 static noinline int __sched
 __mutex_lock_interruptible_slowpath(atomic_t *lock_count);
 
-/***
- * mutex_lock_interruptible - acquire the mutex, interruptable
+/**
+ * mutex_lock_interruptible - acquire the mutex, interruptible
  * @lock: the mutex to be acquired
  *
  * Lock the mutex like mutex_lock(), and return 0 if the mutex has
@@ -456,15 +447,15 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
        return prev == 1;
 }
 
-/***
- * mutex_trylock - try acquire the mutex, without waiting
+/**
+ * mutex_trylock - try to acquire the mutex, without waiting
  * @lock: the mutex to be acquired
  *
  * Try to acquire the mutex atomically. Returns 1 if the mutex
  * has been acquired successfully, and 0 on contention.
  *
  * NOTE: this function follows the spin_trylock() convention, so
- * it is negated to the down_trylock() return values! Be careful
+ * it is negated from the down_trylock() return values! Be careful
  * about this when converting semaphore users to mutexes.
  *
  * This function must not be used in interrupt context. The
index 403d1804b198140e4f1355c70c0b25e6efa9e5d8..db5b56064687e453c0df1cc118b975ea047bdcae 100644 (file)
@@ -402,11 +402,31 @@ static void perf_group_detach(struct perf_event *event)
        }
 }
 
+static inline int
+event_filter_match(struct perf_event *event)
+{
+       return event->cpu == -1 || event->cpu == smp_processor_id();
+}
+
 static void
 event_sched_out(struct perf_event *event,
                  struct perf_cpu_context *cpuctx,
                  struct perf_event_context *ctx)
 {
+       u64 delta;
+       /*
+        * An event which could not be activated because of
+        * filter mismatch still needs to have its timings
+        * maintained, otherwise bogus information is return
+        * via read() for time_enabled, time_running:
+        */
+       if (event->state == PERF_EVENT_STATE_INACTIVE
+           && !event_filter_match(event)) {
+               delta = ctx->time - event->tstamp_stopped;
+               event->tstamp_running += delta;
+               event->tstamp_stopped = ctx->time;
+       }
+
        if (event->state != PERF_EVENT_STATE_ACTIVE)
                return;
 
@@ -432,9 +452,7 @@ group_sched_out(struct perf_event *group_event,
                struct perf_event_context *ctx)
 {
        struct perf_event *event;
-
-       if (group_event->state != PERF_EVENT_STATE_ACTIVE)
-               return;
+       int state = group_event->state;
 
        event_sched_out(group_event, cpuctx, ctx);
 
@@ -444,7 +462,7 @@ group_sched_out(struct perf_event *group_event,
        list_for_each_entry(event, &group_event->sibling_list, group_entry)
                event_sched_out(event, cpuctx, ctx);
 
-       if (group_event->attr.exclusive)
+       if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
                cpuctx->exclusive = 0;
 }
 
@@ -5743,15 +5761,15 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
 {
        unsigned int cpu = (long)hcpu;
 
-       switch (action) {
+       switch (action & ~CPU_TASKS_FROZEN) {
 
        case CPU_UP_PREPARE:
-       case CPU_UP_PREPARE_FROZEN:
+       case CPU_DOWN_FAILED:
                perf_event_init_cpu(cpu);
                break;
 
+       case CPU_UP_CANCELED:
        case CPU_DOWN_PREPARE:
-       case CPU_DOWN_PREPARE_FROZEN:
                perf_event_exit_cpu(cpu);
                break;
 
index b7e4c362361bcf46fe34992e9bba1852dd478b71..645e541a45f6c9a9667c054bd7f5230ecaef67a3 100644 (file)
@@ -389,10 +389,12 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
        } else if (count == 11) { /* len('0x12345678/0') */
                if (copy_from_user(ascii_value, buf, 11))
                        return -EFAULT;
+               if (strlen(ascii_value) != 10)
+                       return -EINVAL;
                x = sscanf(ascii_value, "%x", &value);
                if (x != 1)
                        return -EINVAL;
-               pr_debug(KERN_ERR "%s, %d, 0x%x\n", ascii_value, x, value);
+               pr_debug("%s, %d, 0x%x\n", ascii_value, x, value);
        } else
                return -EINVAL;
 
index c77963938bca440a90423952d6d85cf4d66abd83..8dc31e02ae129e8f042804b67c38ab02f997d94c 100644 (file)
@@ -338,7 +338,6 @@ int hibernation_snapshot(int platform_mode)
                goto Close;
 
        suspend_console();
-       hibernation_freeze_swap();
        saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
        error = dpm_suspend_start(PMSG_FREEZE);
        if (error)
index 5e7edfb05e66cff0d2c99d5fc8fddfde03e372c3..d3f795f01bbce83741fb717e1ee1a05e1377c561 100644 (file)
@@ -1086,7 +1086,6 @@ void swsusp_free(void)
        buffer = NULL;
        alloc_normal = 0;
        alloc_highmem = 0;
-       hibernation_thaw_swap();
 }
 
 /* Helper functions used for the shrinking of memory. */
@@ -1122,9 +1121,19 @@ static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
        return nr_alloc;
 }
 
-static unsigned long preallocate_image_memory(unsigned long nr_pages)
+static unsigned long preallocate_image_memory(unsigned long nr_pages,
+                                             unsigned long avail_normal)
 {
-       return preallocate_image_pages(nr_pages, GFP_IMAGE);
+       unsigned long alloc;
+
+       if (avail_normal <= alloc_normal)
+               return 0;
+
+       alloc = avail_normal - alloc_normal;
+       if (nr_pages < alloc)
+               alloc = nr_pages;
+
+       return preallocate_image_pages(alloc, GFP_IMAGE);
 }
 
 #ifdef CONFIG_HIGHMEM
@@ -1170,15 +1179,22 @@ static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
  */
 static void free_unnecessary_pages(void)
 {
-       unsigned long save_highmem, to_free_normal, to_free_highmem;
+       unsigned long save, to_free_normal, to_free_highmem;
 
-       to_free_normal = alloc_normal - count_data_pages();
-       save_highmem = count_highmem_pages();
-       if (alloc_highmem > save_highmem) {
-               to_free_highmem = alloc_highmem - save_highmem;
+       save = count_data_pages();
+       if (alloc_normal >= save) {
+               to_free_normal = alloc_normal - save;
+               save = 0;
+       } else {
+               to_free_normal = 0;
+               save -= alloc_normal;
+       }
+       save += count_highmem_pages();
+       if (alloc_highmem >= save) {
+               to_free_highmem = alloc_highmem - save;
        } else {
                to_free_highmem = 0;
-               to_free_normal -= save_highmem - alloc_highmem;
+               to_free_normal -= save - alloc_highmem;
        }
 
        memory_bm_position_reset(&copy_bm);
@@ -1259,7 +1275,7 @@ int hibernate_preallocate_memory(void)
 {
        struct zone *zone;
        unsigned long saveable, size, max_size, count, highmem, pages = 0;
-       unsigned long alloc, save_highmem, pages_highmem;
+       unsigned long alloc, save_highmem, pages_highmem, avail_normal;
        struct timeval start, stop;
        int error;
 
@@ -1296,6 +1312,7 @@ int hibernate_preallocate_memory(void)
                else
                        count += zone_page_state(zone, NR_FREE_PAGES);
        }
+       avail_normal = count;
        count += highmem;
        count -= totalreserve_pages;
 
@@ -1310,12 +1327,21 @@ int hibernate_preallocate_memory(void)
         */
        if (size >= saveable) {
                pages = preallocate_image_highmem(save_highmem);
-               pages += preallocate_image_memory(saveable - pages);
+               pages += preallocate_image_memory(saveable - pages, avail_normal);
                goto out;
        }
 
        /* Estimate the minimum size of the image. */
        pages = minimum_image_size(saveable);
+       /*
+        * To avoid excessive pressure on the normal zone, leave room in it to
+        * accommodate an image of the minimum size (unless it's already too
+        * small, in which case don't preallocate pages from it at all).
+        */
+       if (avail_normal > pages)
+               avail_normal -= pages;
+       else
+               avail_normal = 0;
        if (size < pages)
                size = min_t(unsigned long, pages, max_size);
 
@@ -1336,16 +1362,34 @@ int hibernate_preallocate_memory(void)
         */
        pages_highmem = preallocate_image_highmem(highmem / 2);
        alloc = (count - max_size) - pages_highmem;
-       pages = preallocate_image_memory(alloc);
-       if (pages < alloc)
-               goto err_out;
-       size = max_size - size;
-       alloc = size;
-       size = preallocate_highmem_fraction(size, highmem, count);
-       pages_highmem += size;
-       alloc -= size;
-       pages += preallocate_image_memory(alloc);
-       pages += pages_highmem;
+       pages = preallocate_image_memory(alloc, avail_normal);
+       if (pages < alloc) {
+               /* We have exhausted non-highmem pages, try highmem. */
+               alloc -= pages;
+               pages += pages_highmem;
+               pages_highmem = preallocate_image_highmem(alloc);
+               if (pages_highmem < alloc)
+                       goto err_out;
+               pages += pages_highmem;
+               /*
+                * size is the desired number of saveable pages to leave in
+                * memory, so try to preallocate (all memory - size) pages.
+                */
+               alloc = (count - pages) - size;
+               pages += preallocate_image_highmem(alloc);
+       } else {
+               /*
+                * There are approximately max_size saveable pages at this point
+                * and we want to reduce this number down to size.
+                */
+               alloc = max_size - size;
+               size = preallocate_highmem_fraction(alloc, highmem, count);
+               pages_highmem += size;
+               alloc -= size;
+               size = preallocate_image_memory(alloc, avail_normal);
+               pages_highmem += preallocate_image_highmem(alloc - size);
+               pages += pages_highmem + size;
+       }
 
        /*
         * We only need as many page frames for the image as there are saveable
index 5d0059eed3e4e3ce0bc38ad072bd7b1430d9f712..e6a5bdf61a375c309c1f9ea356e9123d79699037 100644 (file)
@@ -136,10 +136,10 @@ sector_t alloc_swapdev_block(int swap)
 {
        unsigned long offset;
 
-       offset = swp_offset(get_swap_for_hibernation(swap));
+       offset = swp_offset(get_swap_page_of_type(swap));
        if (offset) {
                if (swsusp_extents_insert(offset))
-                       swap_free_for_hibernation(swp_entry(swap, offset));
+                       swap_free(swp_entry(swap, offset));
                else
                        return swapdev_block(swap, offset);
        }
@@ -163,7 +163,7 @@ void free_all_swap_pages(int swap)
                ext = container_of(node, struct swsusp_extent, node);
                rb_erase(node, &swsusp_extents);
                for (offset = ext->start; offset <= ext->end; offset++)
-                       swap_free_for_hibernation(swp_entry(swap, offset));
+                       swap_free(swp_entry(swap, offset));
 
                kfree(ext);
        }
index 09b574e7f4df7c14615d104c3fe736f7c1be0847..dc85ceb908322cad7196339f4df8dd58c37b1cec 100644 (file)
@@ -1294,6 +1294,10 @@ static void resched_task(struct task_struct *p)
 static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
 {
 }
+
+static void sched_avg_update(struct rq *rq)
+{
+}
 #endif /* CONFIG_SMP */
 
 #if BITS_PER_LONG == 32
@@ -3182,6 +3186,8 @@ static void update_cpu_load(struct rq *this_rq)
 
                this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
        }
+
+       sched_avg_update(this_rq);
 }
 
 static void update_cpu_load_active(struct rq *this_rq)
@@ -3507,9 +3513,9 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
        rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
 
        if (total) {
-               u64 temp;
+               u64 temp = rtime;
 
-               temp = (u64)(rtime * utime);
+               temp *= utime;
                do_div(temp, total);
                utime = (cputime_t)temp;
        } else
@@ -3540,9 +3546,9 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
        rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
 
        if (total) {
-               u64 temp;
+               u64 temp = rtime;
 
-               temp = (u64)(rtime * cputime.utime);
+               temp *= cputime.utime;
                do_div(temp, total);
                utime = (cputime_t)temp;
        } else
index ab661ebc4895a8471ecc808825477cf0c3558444..db3f674ca49dbe93a611716b650bb8c715464da3 100644 (file)
@@ -54,13 +54,13 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling
  * Minimal preemption granularity for CPU-bound tasks:
  * (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds)
  */
-unsigned int sysctl_sched_min_granularity = 2000000ULL;
-unsigned int normalized_sysctl_sched_min_granularity = 2000000ULL;
+unsigned int sysctl_sched_min_granularity = 750000ULL;
+unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
 
 /*
  * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
  */
-static unsigned int sched_nr_latency = 3;
+static unsigned int sched_nr_latency = 8;
 
 /*
  * After fork, child runs first. If set to 0 (default) then
@@ -1313,7 +1313,7 @@ static struct sched_group *
 find_idlest_group(struct sched_domain *sd, struct task_struct *p,
                  int this_cpu, int load_idx)
 {
-       struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
+       struct sched_group *idlest = NULL, *group = sd->groups;
        unsigned long min_load = ULONG_MAX, this_load = 0;
        int imbalance = 100 + (sd->imbalance_pct-100)/2;
 
@@ -1348,7 +1348,6 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
 
                if (local_group) {
                        this_load = avg_load;
-                       this = group;
                } else if (avg_load < min_load) {
                        min_load = avg_load;
                        idlest = group;
@@ -2268,8 +2267,6 @@ unsigned long scale_rt_power(int cpu)
        struct rq *rq = cpu_rq(cpu);
        u64 total, available;
 
-       sched_avg_update(rq);
-
        total = sched_avg_period() + (rq->clock - rq->age_stamp);
        available = total - rq->rt_avg;
 
@@ -3633,7 +3630,7 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu)
        if (time_before(now, nohz.next_balance))
                return 0;
 
-       if (!rq->nr_running)
+       if (rq->idle_at_tick)
                return 0;
 
        first_pick_cpu = atomic_read(&nohz.first_pick_cpu);
index 75c970c715d399f1385d72e92e0c47c509e568dc..ed6aacfcb7efb307fe313ea798e7074f2c8f4f92 100644 (file)
@@ -365,9 +365,10 @@ call:
 EXPORT_SYMBOL_GPL(smp_call_function_any);
 
 /**
- * __smp_call_function_single(): Run a function on another CPU
+ * __smp_call_function_single(): Run a function on a specific CPU
  * @cpu: The CPU to run on.
  * @data: Pre-allocated and setup data structure
+ * @wait: If true, wait until function has completed on specified CPU.
  *
  * Like smp_call_function_single(), but allow caller to pass in a
  * pre-allocated data structure. Useful for embedding @data inside
@@ -376,8 +377,10 @@ EXPORT_SYMBOL_GPL(smp_call_function_any);
 void __smp_call_function_single(int cpu, struct call_single_data *data,
                                int wait)
 {
-       csd_lock(data);
+       unsigned int this_cpu;
+       unsigned long flags;
 
+       this_cpu = get_cpu();
        /*
         * Can deadlock when called with interrupts disabled.
         * We allow cpu's that are not yet online though, as no one else can
@@ -387,7 +390,15 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
        WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
                     && !oops_in_progress);
 
-       generic_exec_single(cpu, data, wait);
+       if (cpu == this_cpu) {
+               local_irq_save(flags);
+               data->func(data->info);
+               local_irq_restore(flags);
+       } else {
+               csd_lock(data);
+               generic_exec_single(cpu, data, wait);
+       }
+       put_cpu();
 }
 
 /**
index e9ad4448982860af9919df53c3368156a4bf2445..7f5a0cd296a96ca44e43f0db028026094dbbb57a 100644 (file)
@@ -931,6 +931,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
                pgid = pid;
        if (pgid < 0)
                return -EINVAL;
+       rcu_read_lock();
 
        /* From this point forward we keep holding onto the tasklist lock
         * so that our parent does not change from under us. -DaveM
@@ -984,6 +985,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
 out:
        /* All paths lead to here, thus we are safe. -DaveM */
        write_unlock_irq(&tasklist_lock);
+       rcu_read_unlock();
        return err;
 }
 
index ca38e8e3e907557f74faaad7ddb57d330bd43d2d..f88552c6d2275be1216187f07b1e0e1b22b93af2 100644 (file)
@@ -1713,10 +1713,7 @@ static __init int sysctl_init(void)
 {
        sysctl_set_parent(NULL, root_table);
 #ifdef CONFIG_SYSCTL_SYSCALL_CHECK
-       {
-               int err;
-               err = sysctl_check_table(current->nsproxy, root_table);
-       }
+       sysctl_check_table(current->nsproxy, root_table);
 #endif
        return 0;
 }
index 0d88ce9b9fb8828c9a81fdffcd47763ae5cc2543..fa7ece649fe1bcad7b0db621fa8579e91860fb04 100644 (file)
@@ -381,12 +381,19 @@ static int function_stat_show(struct seq_file *m, void *v)
 {
        struct ftrace_profile *rec = v;
        char str[KSYM_SYMBOL_LEN];
+       int ret = 0;
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       static DEFINE_MUTEX(mutex);
        static struct trace_seq s;
        unsigned long long avg;
        unsigned long long stddev;
 #endif
+       mutex_lock(&ftrace_profile_lock);
+
+       /* we raced with function_profile_reset() */
+       if (unlikely(rec->counter == 0)) {
+               ret = -EBUSY;
+               goto out;
+       }
 
        kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
        seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
@@ -408,7 +415,6 @@ static int function_stat_show(struct seq_file *m, void *v)
                do_div(stddev, (rec->counter - 1) * 1000);
        }
 
-       mutex_lock(&mutex);
        trace_seq_init(&s);
        trace_print_graph_duration(rec->time, &s);
        trace_seq_puts(&s, "    ");
@@ -416,11 +422,12 @@ static int function_stat_show(struct seq_file *m, void *v)
        trace_seq_puts(&s, "    ");
        trace_print_graph_duration(stddev, &s);
        trace_print_seq(m, &s);
-       mutex_unlock(&mutex);
 #endif
        seq_putc(m, '\n');
+out:
+       mutex_unlock(&ftrace_profile_lock);
 
-       return 0;
+       return ret;
 }
 
 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
@@ -1503,6 +1510,8 @@ static void *t_start(struct seq_file *m, loff_t *pos)
                if (*pos > 0)
                        return t_hash_start(m, pos);
                iter->flags |= FTRACE_ITER_PRINTALL;
+               /* reset in case of seek/pread */
+               iter->flags &= ~FTRACE_ITER_HASH;
                return iter;
        }
 
@@ -2409,7 +2418,7 @@ static const struct file_operations ftrace_filter_fops = {
        .open = ftrace_filter_open,
        .read = seq_read,
        .write = ftrace_filter_write,
-       .llseek = ftrace_regex_lseek,
+       .llseek = no_llseek,
        .release = ftrace_filter_release,
 };
 
index 19cccc3c302871beae5fd39ad937b0791a2e785d..492197e2f86cda2792603186b59ad3fdd17c448d 100644 (file)
@@ -2985,13 +2985,11 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
 
 static void rb_advance_iter(struct ring_buffer_iter *iter)
 {
-       struct ring_buffer *buffer;
        struct ring_buffer_per_cpu *cpu_buffer;
        struct ring_buffer_event *event;
        unsigned length;
 
        cpu_buffer = iter->cpu_buffer;
-       buffer = cpu_buffer->buffer;
 
        /*
         * Check if we are at the end of the buffer.
index 000e6e85b445906893d7003b2f28c615453bb726..31cc4cb0dbf2afaa49d5f7428f5cce31d535211d 100644 (file)
@@ -91,6 +91,8 @@ int perf_trace_init(struct perf_event *p_event)
                    tp_event->class && tp_event->class->reg &&
                    try_module_get(tp_event->mod)) {
                        ret = perf_trace_event_init(tp_event, p_event);
+                       if (ret)
+                               module_put(tp_event->mod);
                        break;
                }
        }
@@ -146,6 +148,7 @@ void perf_trace_destroy(struct perf_event *p_event)
                }
        }
 out:
+       module_put(tp_event->mod);
        mutex_unlock(&event_mutex);
 }
 
index 8b27c9849b427905ea9a5ef83864a88526afe405..544301d29dee45b0dc089db788bcfe89687bfa45 100644 (file)
@@ -514,8 +514,8 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
                                struct pt_regs *regs);
 
-/* Check the name is good for event/group */
-static int check_event_name(const char *name)
+/* Check the name is good for event/group/fields */
+static int is_good_name(const char *name)
 {
        if (!isalpha(*name) && *name != '_')
                return 0;
@@ -557,7 +557,7 @@ static struct trace_probe *alloc_trace_probe(const char *group,
        else
                tp->rp.kp.pre_handler = kprobe_dispatcher;
 
-       if (!event || !check_event_name(event)) {
+       if (!event || !is_good_name(event)) {
                ret = -EINVAL;
                goto error;
        }
@@ -567,7 +567,7 @@ static struct trace_probe *alloc_trace_probe(const char *group,
        if (!tp->call.name)
                goto error;
 
-       if (!group || !check_event_name(group)) {
+       if (!group || !is_good_name(group)) {
                ret = -EINVAL;
                goto error;
        }
@@ -883,7 +883,7 @@ static int create_trace_probe(int argc, char **argv)
        int i, ret = 0;
        int is_return = 0, is_delete = 0;
        char *symbol = NULL, *event = NULL, *group = NULL;
-       char *arg, *tmp;
+       char *arg;
        unsigned long offset = 0;
        void *addr = NULL;
        char buf[MAX_EVENT_NAME_LEN];
@@ -992,26 +992,36 @@ static int create_trace_probe(int argc, char **argv)
        /* parse arguments */
        ret = 0;
        for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
+               /* Increment count for freeing args in error case */
+               tp->nr_args++;
+
                /* Parse argument name */
                arg = strchr(argv[i], '=');
-               if (arg)
+               if (arg) {
                        *arg++ = '\0';
-               else
+                       tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
+               } else {
                        arg = argv[i];
+                       /* If argument name is omitted, set "argN" */
+                       snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
+                       tp->args[i].name = kstrdup(buf, GFP_KERNEL);
+               }
 
-               tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
                if (!tp->args[i].name) {
-                       pr_info("Failed to allocate argument%d name '%s'.\n",
-                               i, argv[i]);
+                       pr_info("Failed to allocate argument[%d] name.\n", i);
                        ret = -ENOMEM;
                        goto error;
                }
-               tmp = strchr(tp->args[i].name, ':');
-               if (tmp)
-                       *tmp = '_';     /* convert : to _ */
+
+               if (!is_good_name(tp->args[i].name)) {
+                       pr_info("Invalid argument[%d] name: %s\n",
+                               i, tp->args[i].name);
+                       ret = -EINVAL;
+                       goto error;
+               }
 
                if (conflict_field_name(tp->args[i].name, tp->args, i)) {
-                       pr_info("Argument%d name '%s' conflicts with "
+                       pr_info("Argument[%d] name '%s' conflicts with "
                                "another field.\n", i, argv[i]);
                        ret = -EINVAL;
                        goto error;
@@ -1020,12 +1030,9 @@ static int create_trace_probe(int argc, char **argv)
                /* Parse fetch argument */
                ret = parse_probe_arg(arg, tp, &tp->args[i], is_return);
                if (ret) {
-                       pr_info("Parse error at argument%d. (%d)\n", i, ret);
-                       kfree(tp->args[i].name);
+                       pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
                        goto error;
                }
-
-               tp->nr_args++;
        }
 
        ret = register_trace_probe(tp);
index 0d53c8e853b12450cf0c74665d13a22e91a47543..7f9c3c52ecc12ef5d0de1728839218ff87c2dc7b 100644 (file)
@@ -122,7 +122,7 @@ static void __touch_watchdog(void)
 
 void touch_softlockup_watchdog(void)
 {
-       __get_cpu_var(watchdog_touch_ts) = 0;
+       __raw_get_cpu_var(watchdog_touch_ts) = 0;
 }
 EXPORT_SYMBOL(touch_softlockup_watchdog);
 
@@ -142,7 +142,14 @@ void touch_all_softlockup_watchdogs(void)
 #ifdef CONFIG_HARDLOCKUP_DETECTOR
 void touch_nmi_watchdog(void)
 {
-       __get_cpu_var(watchdog_nmi_touch) = true;
+       if (watchdog_enabled) {
+               unsigned cpu;
+
+               for_each_present_cpu(cpu) {
+                       if (per_cpu(watchdog_nmi_touch, cpu) != true)
+                               per_cpu(watchdog_nmi_touch, cpu) = true;
+               }
+       }
        touch_softlockup_watchdog();
 }
 EXPORT_SYMBOL(touch_nmi_watchdog);
@@ -433,6 +440,9 @@ static int watchdog_enable(int cpu)
                wake_up_process(p);
        }
 
+       /* if any cpu succeeds, watchdog is considered enabled for the system */
+       watchdog_enabled = 1;
+
        return 0;
 }
 
@@ -455,9 +465,6 @@ static void watchdog_disable(int cpu)
                per_cpu(softlockup_watchdog, cpu) = NULL;
                kthread_stop(p);
        }
-
-       /* if any cpu succeeds, watchdog is considered enabled for the system */
-       watchdog_enabled = 1;
 }
 
 static void watchdog_enable_all_cpus(void)
index 727f24e563aef326b8eba951d2a31a9aa864d32b..f77afd93922968d0c216bee148c0f10ed0ccaa72 100644 (file)
@@ -1,19 +1,26 @@
 /*
- * linux/kernel/workqueue.c
+ * kernel/workqueue.c - generic async execution with shared worker pool
  *
- * Generic mechanism for defining kernel helper threads for running
- * arbitrary tasks in process context.
+ * Copyright (C) 2002          Ingo Molnar
  *
- * Started by Ingo Molnar, Copyright (C) 2002
+ *   Derived from the taskqueue/keventd code by:
+ *     David Woodhouse <dwmw2@infradead.org>
+ *     Andrew Morton
+ *     Kai Petzke <wpp@marie.physik.tu-berlin.de>
+ *     Theodore Ts'o <tytso@mit.edu>
  *
- * Derived from the taskqueue/keventd code by:
+ * Made to use alloc_percpu by Christoph Lameter.
  *
- *   David Woodhouse <dwmw2@infradead.org>
- *   Andrew Morton
- *   Kai Petzke <wpp@marie.physik.tu-berlin.de>
- *   Theodore Ts'o <tytso@mit.edu>
+ * Copyright (C) 2010          SUSE Linux Products GmbH
+ * Copyright (C) 2010          Tejun Heo <tj@kernel.org>
  *
- * Made to use alloc_percpu by Christoph Lameter.
+ * This is the generic async execution mechanism.  Work items as are
+ * executed in process context.  The worker pool is shared and
+ * automatically managed.  There is one worker pool for each CPU and
+ * one extra for works which are better served by workers which are
+ * not bound to any specific CPU.
+ *
+ * Please read Documentation/workqueue.txt for details.
  */
 
 #include <linux/module.h>
index 7cdfad88128fa5d3d3076d552fbde276ef500d66..19552096d16b06bd2dac2a9b10212e482a4d0da3 100644 (file)
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -72,8 +72,8 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr)
        return NULL;
 }
 
-int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
-                       struct module *mod)
+void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
+                        struct module *mod)
 {
        char *secstrings;
        unsigned int i;
@@ -97,8 +97,6 @@ int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
         * could potentially lead to deadlock and thus be counter-productive.
         */
        list_add(&mod->bug_list, &module_bug_list);
-
-       return 0;
 }
 
 void module_bug_cleanup(struct module *mod)
index 4b5cb794c38bb270b8b72b70a47de265ec05c210..a7616fa3162e844f5b3c7090c1911543c826147e 100644 (file)
@@ -70,7 +70,7 @@ static void merge_and_restore_back_links(void *priv,
                 * element comparison is needed, so the client's cmp()
                 * routine can invoke cond_resched() periodically.
                 */
-               (*cmp)(priv, tail, tail);
+               (*cmp)(priv, tail->next, tail->next);
 
                tail->next->prev = tail;
                tail = tail->next;
index a5ec42868f99d8d6700f34ca81c061c1ef21d15b..4ceb05d772aed12d392d618358284ea71cb51dd2 100644 (file)
@@ -248,8 +248,18 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
                left -= sg_size;
 
                sg = alloc_fn(alloc_size, gfp_mask);
-               if (unlikely(!sg))
-                       return -ENOMEM;
+               if (unlikely(!sg)) {
+                       /*
+                        * Adjust entry count to reflect that the last
+                        * entry of the previous table won't be used for
+                        * linkage.  Without this, sg_kfree() may get
+                        * confused.
+                        */
+                       if (prv)
+                               table->nents = ++table->orig_nents;
+
+                       return -ENOMEM;
+               }
 
                sg_init_table(sg, alloc_size);
                table->nents = table->orig_nents += sg_size;
index f4e516e9c37cc4c62f93faf92131a632098d2ca5..f0fb9124e410c436c0f240d69f089f4935af2e92 100644 (file)
@@ -189,7 +189,7 @@ config COMPACTION
 config MIGRATION
        bool "Page migration"
        def_bool y
-       depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE
+       depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION
        help
          Allows the migration of the physical location of pages of processes
          while the virtual addresses are not changed. This is useful in
index eaa4a5bbe0634390fc802ebffdbad4291fa3b991..65d420499a615bf68a3be8313c3b0d8b1b330178 100644 (file)
@@ -30,6 +30,7 @@ EXPORT_SYMBOL_GPL(default_backing_dev_info);
 
 struct backing_dev_info noop_backing_dev_info = {
        .name           = "noop",
+       .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK,
 };
 EXPORT_SYMBOL_GPL(noop_backing_dev_info);
 
@@ -243,6 +244,7 @@ static int __init default_bdi_init(void)
        err = bdi_init(&default_backing_dev_info);
        if (!err)
                bdi_register(&default_backing_dev_info, NULL, "default");
+       err = bdi_init(&noop_backing_dev_info);
 
        return err;
 }
@@ -445,8 +447,8 @@ static int bdi_forker_thread(void *ptr)
                switch (action) {
                case FORK_THREAD:
                        __set_current_state(TASK_RUNNING);
-                       task = kthread_run(bdi_writeback_thread, &bdi->wb, "flush-%s",
-                                          dev_name(bdi->dev));
+                       task = kthread_create(bdi_writeback_thread, &bdi->wb,
+                                             "flush-%s", dev_name(bdi->dev));
                        if (IS_ERR(task)) {
                                /*
                                 * If thread creation fails, force writeout of
@@ -457,10 +459,13 @@ static int bdi_forker_thread(void *ptr)
                                /*
                                 * The spinlock makes sure we do not lose
                                 * wake-ups when racing with 'bdi_queue_work()'.
+                                * And as soon as the bdi thread is visible, we
+                                * can start it.
                                 */
                                spin_lock_bh(&bdi->wb_lock);
                                bdi->wb.task = task;
                                spin_unlock_bh(&bdi->wb_lock);
+                               wake_up_process(task);
                        }
                        break;
 
index 13b6dad1eed272bec61a388d17f116be62cb1bb5..1481de68184bce6d8fae978d3b6be8e3223319a5 100644 (file)
@@ -116,8 +116,8 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
                 */
                vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
 
-               flush_dcache_page(tovec->bv_page);
                bounce_copy_vec(tovec, vfrom);
+               flush_dcache_page(tovec->bv_page);
        }
 }
 
index 94cce51b0b3535af75c20f29ecb86a11aba32a71..4d709ee5901370842534224a9f81e7d13943e196 100644 (file)
@@ -214,15 +214,16 @@ static void acct_isolated(struct zone *zone, struct compact_control *cc)
 /* Similar to reclaim, but different enough that they don't share logic */
 static bool too_many_isolated(struct zone *zone)
 {
-
-       unsigned long inactive, isolated;
+       unsigned long active, inactive, isolated;
 
        inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
                                        zone_page_state(zone, NR_INACTIVE_ANON);
+       active = zone_page_state(zone, NR_ACTIVE_FILE) +
+                                       zone_page_state(zone, NR_ACTIVE_ANON);
        isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
                                        zone_page_state(zone, NR_ISOLATED_ANON);
 
-       return isolated > inactive;
+       return isolated > (inactive + active) / 2;
 }
 
 /*
index 46f5dacf90a2cd62427fdf89b67fa01ef8af68e0..ec520c7b28dffedb5027de6b27834831938671cd 100644 (file)
@@ -125,7 +125,6 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
 {
        struct mm_struct *mm = current->mm;
        struct address_space *mapping;
-       unsigned long end = start + size;
        struct vm_area_struct *vma;
        int err = -EINVAL;
        int has_write_lock = 0;
@@ -142,6 +141,10 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
        if (start + size <= start)
                return err;
 
+       /* Does pgoff wrap? */
+       if (pgoff + (size >> PAGE_SHIFT) < pgoff)
+               return err;
+
        /* Can we represent this offset inside this architecture's pte's? */
 #if PTE_FILE_MAX_BITS < BITS_PER_LONG
        if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
@@ -168,7 +171,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
        if (!(vma->vm_flags & VM_CAN_NONLINEAR))
                goto out;
 
-       if (end <= start || start < vma->vm_start || end > vma->vm_end)
+       if (start < vma->vm_start || start + size > vma->vm_end)
                goto out;
 
        /* Must set VM_NONLINEAR before any pages are populated. */
index cc5be788a39fe132c72cbc1d2fb1c03f71708575..c03273807182dde1d9dd2e905c0db11a6dfe2441 100644 (file)
@@ -2324,11 +2324,8 @@ retry_avoidcopy:
         * and just make the page writable */
        avoidcopy = (page_mapcount(old_page) == 1);
        if (avoidcopy) {
-               if (!trylock_page(old_page)) {
-                       if (PageAnon(old_page))
-                               page_move_anon_rmap(old_page, vma, address);
-               } else
-                       unlock_page(old_page);
+               if (PageAnon(old_page))
+                       page_move_anon_rmap(old_page, vma, address);
                set_huge_ptep_writable(vma, address, ptep);
                return 0;
        }
@@ -2404,7 +2401,7 @@ retry_avoidcopy:
                set_huge_pte_at(mm, address, ptep,
                                make_huge_pte(vma, new_page, 1));
                page_remove_rmap(old_page);
-               hugepage_add_anon_rmap(new_page, vma, address);
+               hugepage_add_new_anon_rmap(new_page, vma, address);
                /* Make the old page be freed below */
                new_page = old_page;
                mmu_notifier_invalidate_range_end(mm,
@@ -2631,10 +2628,16 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                                                vma, address);
        }
 
-       if (!pagecache_page) {
-               page = pte_page(entry);
+       /*
+        * hugetlb_cow() requires page locks of pte_page(entry) and
+        * pagecache_page, so here we need take the former one
+        * when page != pagecache_page or !pagecache_page.
+        * Note that locking order is always pagecache_page -> page,
+        * so no worry about deadlock.
+        */
+       page = pte_page(entry);
+       if (page != pagecache_page)
                lock_page(page);
-       }
 
        spin_lock(&mm->page_table_lock);
        /* Check for a racing update before calling hugetlb_cow */
@@ -2661,9 +2664,8 @@ out_page_table_lock:
        if (pagecache_page) {
                unlock_page(pagecache_page);
                put_page(pagecache_page);
-       } else {
-               unlock_page(page);
        }
+       unlock_page(page);
 
 out_mutex:
        mutex_unlock(&hugetlb_instantiation_mutex);
index e2ae00458320786a380a1ab370efe0dc6cfd6e1a..65ab5c7067d994ad934c4f4bd5fd5809235a0756 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -712,7 +712,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
        if (!ptep)
                goto out;
 
-       if (pte_write(*ptep)) {
+       if (pte_write(*ptep) || pte_dirty(*ptep)) {
                pte_t entry;
 
                swapped = PageSwapCache(page);
@@ -735,7 +735,9 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
                        set_pte_at(mm, addr, ptep, entry);
                        goto out_unlock;
                }
-               entry = pte_wrprotect(entry);
+               if (pte_dirty(entry))
+                       set_page_dirty(page);
+               entry = pte_mkclean(pte_wrprotect(entry));
                set_pte_at_notify(mm, addr, ptep, entry);
        }
        *orig_pte = *ptep;
@@ -1504,8 +1506,6 @@ struct page *ksm_does_need_to_copy(struct page *page,
 {
        struct page *new_page;
 
-       unlock_page(page);      /* any racers will COW it, not modify it */
-
        new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
        if (new_page) {
                copy_user_highpage(new_page, page, address, vma);
@@ -1521,7 +1521,6 @@ struct page *ksm_does_need_to_copy(struct page *page,
                        add_page_to_unevictable_list(new_page);
        }
 
-       page_cache_release(page);
        return new_page;
 }
 
index 6b2ab10518512052c895dd5db7ff0f20fd1df2f3..0e18b4d649ec82abc83c208e5f9dce9cbb2cf905 100644 (file)
@@ -2623,7 +2623,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                unsigned int flags, pte_t orig_pte)
 {
        spinlock_t *ptl;
-       struct page *page;
+       struct page *page, *swapcache = NULL;
        swp_entry_t entry;
        pte_t pte;
        struct mem_cgroup *ptr = NULL;
@@ -2679,10 +2679,25 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        lock_page(page);
        delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
 
-       page = ksm_might_need_to_copy(page, vma, address);
-       if (!page) {
-               ret = VM_FAULT_OOM;
-               goto out;
+       /*
+        * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
+        * release the swapcache from under us.  The page pin, and pte_same
+        * test below, are not enough to exclude that.  Even if it is still
+        * swapcache, we need to check that the page's swap has not changed.
+        */
+       if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val))
+               goto out_page;
+
+       if (ksm_might_need_to_copy(page, vma, address)) {
+               swapcache = page;
+               page = ksm_does_need_to_copy(page, vma, address);
+
+               if (unlikely(!page)) {
+                       ret = VM_FAULT_OOM;
+                       page = swapcache;
+                       swapcache = NULL;
+                       goto out_page;
+               }
        }
 
        if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
@@ -2735,6 +2750,18 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
                try_to_free_swap(page);
        unlock_page(page);
+       if (swapcache) {
+               /*
+                * Hold the lock to avoid the swap entry to be reused
+                * until we take the PT lock for the pte_same() check
+                * (to avoid false positives from pte_same). For
+                * further safety release the lock after the swap_free
+                * so that the swap count won't change under a
+                * parallel locked swapcache.
+                */
+               unlock_page(swapcache);
+               page_cache_release(swapcache);
+       }
 
        if (flags & FAULT_FLAG_WRITE) {
                ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
@@ -2756,6 +2783,10 @@ out_page:
        unlock_page(page);
 out_release:
        page_cache_release(page);
+       if (swapcache) {
+               unlock_page(swapcache);
+               page_cache_release(swapcache);
+       }
        return ret;
 }
 
index a4cfcdc00455de4be15fcec98c76e45f8de5feab..dd186c1a5d53f9ebd27de4c1bedcaac471d6c93e 100644 (file)
@@ -584,19 +584,19 @@ static inline int pageblock_free(struct page *page)
 /* Return the start of the next active pageblock after a given page */
 static struct page *next_active_pageblock(struct page *page)
 {
-       int pageblocks_stride;
-
        /* Ensure the starting page is pageblock-aligned */
        BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
 
-       /* Move forward by at least 1 * pageblock_nr_pages */
-       pageblocks_stride = 1;
-
        /* If the entire pageblock is free, move to the end of free page */
-       if (pageblock_free(page))
-               pageblocks_stride += page_order(page) - pageblock_order;
+       if (pageblock_free(page)) {
+               int order;
+               /* be careful. we don't have locks, page_order can be changed.*/
+               order = page_order(page);
+               if ((order < MAX_ORDER) && (order >= pageblock_order))
+                       return page + (1 << order);
+       }
 
-       return page + (pageblocks_stride * pageblock_nr_pages);
+       return page + pageblock_nr_pages;
 }
 
 /* Checks if this range of memory is likely to be hot-removable. */
index cbae7c5b95680a1bfca1df7e11a215bfce15b57c..b70919ce4f72e6941f67b1a5462f5f270c231536 100644 (file)
@@ -135,12 +135,6 @@ void munlock_vma_page(struct page *page)
        }
 }
 
-/* Is the vma a continuation of the stack vma above it? */
-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
-{
-       return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
-}
-
 static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
 {
        return (vma->vm_flags & VM_GROWSDOWN) &&
index 6128dc8e5ede709cada129438fbac101895aa09d..00161a48a45100c611ebaa053f0b1f1486d09f29 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2009,6 +2009,7 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
                        removed_exe_file_vma(mm);
                fput(new->vm_file);
        }
+       unlink_anon_vmas(new);
  out_free_mpol:
        mpol_put(pol);
  out_free_vma:
index f5b7d1760213e53db3c46e84dde56daf219ea0cd..e35bfb82c8555b7377334dbea42bfcf588b0bab8 100644 (file)
@@ -87,3 +87,24 @@ int memmap_valid_within(unsigned long pfn,
        return 1;
 }
 #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
+
+#ifdef CONFIG_SMP
+/* Called when a more accurate view of NR_FREE_PAGES is needed */
+unsigned long zone_nr_free_pages(struct zone *zone)
+{
+       unsigned long nr_free_pages = zone_page_state(zone, NR_FREE_PAGES);
+
+       /*
+        * While kswapd is awake, it is considered the zone is under some
+        * memory pressure. Under pressure, there is a risk that
+        * per-cpu-counter-drift will allow the min watermark to be breached
+        * potentially causing a live-lock. While kswapd is awake and
+        * free pages are low, get a better estimate for free pages
+        */
+       if (nr_free_pages < zone->percpu_drift_mark &&
+                       !waitqueue_active(&zone->zone_pgdat->kswapd_wait))
+               return zone_page_state_snapshot(zone, NR_FREE_PAGES);
+
+       return nr_free_pages;
+}
+#endif /* CONFIG_SMP */
index fc81cb22869ef54e6871daf39f51b32e3377aa98..4029583a10241aaa84e3937ee216740e0a88a363 100644 (file)
@@ -121,8 +121,8 @@ struct task_struct *find_lock_task_mm(struct task_struct *p)
 }
 
 /* return true if the task is not adequate as candidate victim task. */
-static bool oom_unkillable_task(struct task_struct *p, struct mem_cgroup *mem,
-                          const nodemask_t *nodemask)
+static bool oom_unkillable_task(struct task_struct *p,
+               const struct mem_cgroup *mem, const nodemask_t *nodemask)
 {
        if (is_global_init(p))
                return true;
@@ -208,8 +208,13 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
         */
        points += p->signal->oom_score_adj;
 
-       if (points < 0)
-               return 0;
+       /*
+        * Never return 0 for an eligible task that may be killed since it's
+        * possible that no single user task uses more than 0.1% of memory and
+        * no single admin tasks uses more than 3.0%.
+        */
+       if (points <= 0)
+               return 1;
        return (points < 1000) ? points : 1000;
 }
 
@@ -339,26 +344,24 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
 /**
  * dump_tasks - dump current memory state of all system tasks
  * @mem: current's memory controller, if constrained
+ * @nodemask: nodemask passed to page allocator for mempolicy ooms
  *
- * Dumps the current memory state of all system tasks, excluding kernel threads.
+ * Dumps the current memory state of all eligible tasks.  Tasks not in the same
+ * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
+ * are not shown.
  * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj
  * value, oom_score_adj value, and name.
  *
- * If the actual is non-NULL, only tasks that are a member of the mem_cgroup are
- * shown.
- *
  * Call with tasklist_lock read-locked.
  */
-static void dump_tasks(const struct mem_cgroup *mem)
+static void dump_tasks(const struct mem_cgroup *mem, const nodemask_t *nodemask)
 {
        struct task_struct *p;
        struct task_struct *task;
 
        pr_info("[ pid ]   uid  tgid total_vm      rss cpu oom_adj oom_score_adj name\n");
        for_each_process(p) {
-               if (p->flags & PF_KTHREAD)
-                       continue;
-               if (mem && !task_in_mem_cgroup(p, mem))
+               if (oom_unkillable_task(p, mem, nodemask))
                        continue;
 
                task = find_lock_task_mm(p);
@@ -381,7 +384,7 @@ static void dump_tasks(const struct mem_cgroup *mem)
 }
 
 static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
-                                                       struct mem_cgroup *mem)
+                       struct mem_cgroup *mem, const nodemask_t *nodemask)
 {
        task_lock(current);
        pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
@@ -394,7 +397,7 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
        mem_cgroup_print_oom_info(mem, p);
        show_mem();
        if (sysctl_oom_dump_tasks)
-               dump_tasks(mem);
+               dump_tasks(mem, nodemask);
 }
 
 #define K(x) ((x) << (PAGE_SHIFT-10))
@@ -436,7 +439,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
        unsigned int victim_points = 0;
 
        if (printk_ratelimit())
-               dump_header(p, gfp_mask, order, mem);
+               dump_header(p, gfp_mask, order, mem, nodemask);
 
        /*
         * If the task is already exiting, don't alarm the sysadmin or kill
@@ -482,7 +485,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
  * Determines whether the kernel must panic because of the panic_on_oom sysctl.
  */
 static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
-                               int order)
+                               int order, const nodemask_t *nodemask)
 {
        if (likely(!sysctl_panic_on_oom))
                return;
@@ -496,7 +499,7 @@ static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
                        return;
        }
        read_lock(&tasklist_lock);
-       dump_header(NULL, gfp_mask, order, NULL);
+       dump_header(NULL, gfp_mask, order, NULL, nodemask);
        read_unlock(&tasklist_lock);
        panic("Out of memory: %s panic_on_oom is enabled\n",
                sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
@@ -509,7 +512,7 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
        unsigned int points = 0;
        struct task_struct *p;
 
-       check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0);
+       check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL);
        limit = mem_cgroup_get_limit(mem) >> PAGE_SHIFT;
        read_lock(&tasklist_lock);
 retry:
@@ -641,6 +644,7 @@ static void clear_system_oom(void)
 void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
                int order, nodemask_t *nodemask)
 {
+       const nodemask_t *mpol_mask;
        struct task_struct *p;
        unsigned long totalpages;
        unsigned long freed = 0;
@@ -670,7 +674,8 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
         */
        constraint = constrained_alloc(zonelist, gfp_mask, nodemask,
                                                &totalpages);
-       check_panic_on_oom(constraint, gfp_mask, order);
+       mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL;
+       check_panic_on_oom(constraint, gfp_mask, order, mpol_mask);
 
        read_lock(&tasklist_lock);
        if (sysctl_oom_kill_allocating_task &&
@@ -688,15 +693,13 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
        }
 
 retry:
-       p = select_bad_process(&points, totalpages, NULL,
-                       constraint == CONSTRAINT_MEMORY_POLICY ? nodemask :
-                                                                NULL);
+       p = select_bad_process(&points, totalpages, NULL, mpol_mask);
        if (PTR_ERR(p) == -1UL)
                goto out;
 
        /* Found nothing?!?! Either we hang forever, or we panic. */
        if (!p) {
-               dump_header(NULL, gfp_mask, order, NULL);
+               dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
                read_unlock(&tasklist_lock);
                panic("Out of memory and no killable processes...\n");
        }
index a9649f4b261e6b3c01632939c46a77f19f447de1..a8cfa9cc6e86e5d6912a39bc3f5c9d18fb97b7cf 100644 (file)
@@ -588,13 +588,13 @@ static void free_pcppages_bulk(struct zone *zone, int count,
 {
        int migratetype = 0;
        int batch_free = 0;
+       int to_free = count;
 
        spin_lock(&zone->lock);
        zone->all_unreclaimable = 0;
        zone->pages_scanned = 0;
 
-       __mod_zone_page_state(zone, NR_FREE_PAGES, count);
-       while (count) {
+       while (to_free) {
                struct page *page;
                struct list_head *list;
 
@@ -619,8 +619,9 @@ static void free_pcppages_bulk(struct zone *zone, int count,
                        /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
                        __free_one_page(page, zone, 0, page_private(page));
                        trace_mm_page_pcpu_drain(page, 0, page_private(page));
-               } while (--count && --batch_free && !list_empty(list));
+               } while (--to_free && --batch_free && !list_empty(list));
        }
+       __mod_zone_page_state(zone, NR_FREE_PAGES, count);
        spin_unlock(&zone->lock);
 }
 
@@ -631,8 +632,8 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
        zone->all_unreclaimable = 0;
        zone->pages_scanned = 0;
 
-       __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
        __free_one_page(page, zone, order, migratetype);
+       __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
        spin_unlock(&zone->lock);
 }
 
@@ -1461,7 +1462,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
 {
        /* free_pages my go negative - that's OK */
        long min = mark;
-       long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
+       long free_pages = zone_nr_free_pages(z) - (1 << order) + 1;
        int o;
 
        if (alloc_flags & ALLOC_HIGH)
@@ -1846,6 +1847,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
        struct page *page = NULL;
        struct reclaim_state reclaim_state;
        struct task_struct *p = current;
+       bool drained = false;
 
        cond_resched();
 
@@ -1864,14 +1866,25 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
 
        cond_resched();
 
-       if (order != 0)
-               drain_all_pages();
+       if (unlikely(!(*did_some_progress)))
+               return NULL;
 
-       if (likely(*did_some_progress))
-               page = get_page_from_freelist(gfp_mask, nodemask, order,
+retry:
+       page = get_page_from_freelist(gfp_mask, nodemask, order,
                                        zonelist, high_zoneidx,
                                        alloc_flags, preferred_zone,
                                        migratetype);
+
+       /*
+        * If an allocation failed after direct reclaim, it could be because
+        * pages are pinned on the per-cpu lists. Drain them and try again
+        */
+       if (!page && !drained) {
+               drain_all_pages();
+               drained = true;
+               goto retry;
+       }
+
        return page;
 }
 
@@ -2423,7 +2436,7 @@ void show_free_areas(void)
                        " all_unreclaimable? %s"
                        "\n",
                        zone->name,
-                       K(zone_page_state(zone, NR_FREE_PAGES)),
+                       K(zone_nr_free_pages(zone)),
                        K(min_wmark_pages(zone)),
                        K(low_wmark_pages(zone)),
                        K(high_wmark_pages(zone)),
index 58c572b18b07ffbca4e2120d4e1600705db5fc0e..c76ef3891e0da1c71ac3d1b2d4b1db0abdfe8b9f 100644 (file)
@@ -1401,9 +1401,9 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
 
                        if (pcpu_first_unit_cpu == NR_CPUS)
                                pcpu_first_unit_cpu = cpu;
+                       pcpu_last_unit_cpu = cpu;
                }
        }
-       pcpu_last_unit_cpu = cpu;
        pcpu_nr_units = unit;
 
        for_each_possible_cpu(cpu)
index f6f0d2dda2eae8480860cf57f5a9cfce69820716..92e6757f196ed4e3b3598c1f8b7214616a4cbe39 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -381,7 +381,13 @@ vma_address(struct page *page, struct vm_area_struct *vma)
 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
 {
        if (PageAnon(page)) {
-               if (vma->anon_vma->root != page_anon_vma(page)->root)
+               struct anon_vma *page__anon_vma = page_anon_vma(page);
+               /*
+                * Note: swapoff's unuse_vma() is more efficient with this
+                * check, and needs it to match anon_vma when KSM is active.
+                */
+               if (!vma->anon_vma || !page__anon_vma ||
+                   vma->anon_vma->root != page__anon_vma->root)
                        return -EFAULT;
        } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
                if (!vma->vm_file ||
@@ -1564,13 +1570,14 @@ static void __hugepage_set_anon_rmap(struct page *page,
        struct vm_area_struct *vma, unsigned long address, int exclusive)
 {
        struct anon_vma *anon_vma = vma->anon_vma;
+
        BUG_ON(!anon_vma);
-       if (!exclusive) {
-               struct anon_vma_chain *avc;
-               avc = list_entry(vma->anon_vma_chain.prev,
-                                struct anon_vma_chain, same_vma);
-               anon_vma = avc->anon_vma;
-       }
+
+       if (PageAnon(page))
+               return;
+       if (!exclusive)
+               anon_vma = anon_vma->root;
+
        anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
        page->mapping = (struct address_space *) anon_vma;
        page->index = linear_page_index(vma, address);
@@ -1581,6 +1588,8 @@ void hugepage_add_anon_rmap(struct page *page,
 {
        struct anon_vma *anon_vma = vma->anon_vma;
        int first;
+
+       BUG_ON(!PageLocked(page));
        BUG_ON(!anon_vma);
        BUG_ON(address < vma->vm_start || address >= vma->vm_end);
        first = atomic_inc_and_test(&page->_mapcount);
index 1f3f9c59a73ab5be4ff4bb37f428364df7544706..7c703ff2f36f0b760b79eb36149084f07621a0a1 100644 (file)
@@ -47,8 +47,6 @@ long nr_swap_pages;
 long total_swap_pages;
 static int least_priority;
 
-static bool swap_for_hibernation;
-
 static const char Bad_file[] = "Bad swap file entry ";
 static const char Unused_file[] = "Unused swap file entry ";
 static const char Bad_offset[] = "Bad swap offset entry ";
@@ -141,8 +139,7 @@ static int discard_swap(struct swap_info_struct *si)
        nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
        if (nr_blocks) {
                err = blkdev_issue_discard(si->bdev, start_block,
-                               nr_blocks, GFP_KERNEL,
-                               BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
+                               nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT);
                if (err)
                        return err;
                cond_resched();
@@ -153,8 +150,7 @@ static int discard_swap(struct swap_info_struct *si)
                nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
 
                err = blkdev_issue_discard(si->bdev, start_block,
-                               nr_blocks, GFP_KERNEL,
-                               BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
+                               nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT);
                if (err)
                        break;
 
@@ -193,8 +189,7 @@ static void discard_swap_cluster(struct swap_info_struct *si,
                        start_block <<= PAGE_SHIFT - 9;
                        nr_blocks <<= PAGE_SHIFT - 9;
                        if (blkdev_issue_discard(si->bdev, start_block,
-                                   nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT |
-                                                       BLKDEV_IFL_BARRIER))
+                                   nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT))
                                break;
                }
 
@@ -320,10 +315,8 @@ checks:
        if (offset > si->highest_bit)
                scan_base = offset = si->lowest_bit;
 
-       /* reuse swap entry of cache-only swap if not hibernation. */
-       if (vm_swap_full()
-               && usage == SWAP_HAS_CACHE
-               && si->swap_map[offset] == SWAP_HAS_CACHE) {
+       /* reuse swap entry of cache-only swap if not busy. */
+       if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
                int swap_was_freed;
                spin_unlock(&swap_lock);
                swap_was_freed = __try_to_reclaim_swap(si, offset);
@@ -453,8 +446,6 @@ swp_entry_t get_swap_page(void)
        spin_lock(&swap_lock);
        if (nr_swap_pages <= 0)
                goto noswap;
-       if (swap_for_hibernation)
-               goto noswap;
        nr_swap_pages--;
 
        for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
@@ -487,6 +478,28 @@ noswap:
        return (swp_entry_t) {0};
 }
 
+/* The only caller of this function is now susupend routine */
+swp_entry_t get_swap_page_of_type(int type)
+{
+       struct swap_info_struct *si;
+       pgoff_t offset;
+
+       spin_lock(&swap_lock);
+       si = swap_info[type];
+       if (si && (si->flags & SWP_WRITEOK)) {
+               nr_swap_pages--;
+               /* This is called for allocating swap entry, not cache */
+               offset = scan_swap_map(si, 1);
+               if (offset) {
+                       spin_unlock(&swap_lock);
+                       return swp_entry(type, offset);
+               }
+               nr_swap_pages++;
+       }
+       spin_unlock(&swap_lock);
+       return (swp_entry_t) {0};
+}
+
 static struct swap_info_struct *swap_info_get(swp_entry_t entry)
 {
        struct swap_info_struct *p;
@@ -670,6 +683,24 @@ int try_to_free_swap(struct page *page)
        if (page_swapcount(page))
                return 0;
 
+       /*
+        * Once hibernation has begun to create its image of memory,
+        * there's a danger that one of the calls to try_to_free_swap()
+        * - most probably a call from __try_to_reclaim_swap() while
+        * hibernation is allocating its own swap pages for the image,
+        * but conceivably even a call from memory reclaim - will free
+        * the swap from a page which has already been recorded in the
+        * image as a clean swapcache page, and then reuse its swap for
+        * another page of the image.  On waking from hibernation, the
+        * original page might be freed under memory pressure, then
+        * later read back in from swap, now with the wrong data.
+        *
+        * Hibernation clears bits from gfp_allowed_mask to prevent
+        * memory reclaim from writing to disk, so check that here.
+        */
+       if (!(gfp_allowed_mask & __GFP_IO))
+               return 0;
+
        delete_from_swap_cache(page);
        SetPageDirty(page);
        return 1;
@@ -746,74 +777,6 @@ int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep)
 #endif
 
 #ifdef CONFIG_HIBERNATION
-
-static pgoff_t hibernation_offset[MAX_SWAPFILES];
-/*
- * Once hibernation starts to use swap, we freeze swap_map[]. Otherwise,
- * saved swap_map[] image to the disk will be an incomplete because it's
- * changing without synchronization with hibernation snap shot.
- * At resume, we just make swap_for_hibernation=false. We can forget
- * used maps easily.
- */
-void hibernation_freeze_swap(void)
-{
-       int i;
-
-       spin_lock(&swap_lock);
-
-       printk(KERN_INFO "PM: Freeze Swap\n");
-       swap_for_hibernation = true;
-       for (i = 0; i < MAX_SWAPFILES; i++)
-               hibernation_offset[i] = 1;
-       spin_unlock(&swap_lock);
-}
-
-void hibernation_thaw_swap(void)
-{
-       spin_lock(&swap_lock);
-       if (swap_for_hibernation) {
-               printk(KERN_INFO "PM: Thaw Swap\n");
-               swap_for_hibernation = false;
-       }
-       spin_unlock(&swap_lock);
-}
-
-/*
- * Because updateing swap_map[] can make not-saved-status-change,
- * we use our own easy allocator.
- * Please see kernel/power/swap.c, Used swaps are recorded into
- * RB-tree.
- */
-swp_entry_t get_swap_for_hibernation(int type)
-{
-       pgoff_t off;
-       swp_entry_t val = {0};
-       struct swap_info_struct *si;
-
-       spin_lock(&swap_lock);
-
-       si = swap_info[type];
-       if (!si || !(si->flags & SWP_WRITEOK))
-               goto done;
-
-       for (off = hibernation_offset[type]; off < si->max; ++off) {
-               if (!si->swap_map[off])
-                       break;
-       }
-       if (off < si->max) {
-               val = swp_entry(type, off);
-               hibernation_offset[type] = off + 1;
-       }
-done:
-       spin_unlock(&swap_lock);
-       return val;
-}
-
-void swap_free_for_hibernation(swp_entry_t ent)
-{
-       /* Nothing to do */
-}
-
 /*
  * Find the swap type that corresponds to given device (if any).
  *
@@ -2084,7 +2047,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
                        p->flags |= SWP_SOLIDSTATE;
                        p->cluster_next = 1 + (random32() % p->highest_bit);
                }
-               if (discard_swap(p) == 0)
+               if (discard_swap(p) == 0 && (swap_flags & SWAP_FLAG_DISCARD))
                        p->flags |= SWP_DISCARDABLE;
        }
 
index c391c320dbafcda04260923f36336891283b614f..c5dfabf25f115a34df8f9111843af28a8d58d906 100644 (file)
@@ -1804,12 +1804,11 @@ static void shrink_zone(int priority, struct zone *zone,
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static bool shrink_zones(int priority, struct zonelist *zonelist,
+static void shrink_zones(int priority, struct zonelist *zonelist,
                                        struct scan_control *sc)
 {
        struct zoneref *z;
        struct zone *zone;
-       bool all_unreclaimable = true;
 
        for_each_zone_zonelist_nodemask(zone, z, zonelist,
                                        gfp_zone(sc->gfp_mask), sc->nodemask) {
@@ -1827,8 +1826,38 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
                }
 
                shrink_zone(priority, zone, sc);
-               all_unreclaimable = false;
        }
+}
+
+static bool zone_reclaimable(struct zone *zone)
+{
+       return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
+}
+
+/*
+ * As hibernation is going on, kswapd is freezed so that it can't mark
+ * the zone into all_unreclaimable. It can't handle OOM during hibernation.
+ * So let's check zone's unreclaimable in direct reclaim as well as kswapd.
+ */
+static bool all_unreclaimable(struct zonelist *zonelist,
+               struct scan_control *sc)
+{
+       struct zoneref *z;
+       struct zone *zone;
+       bool all_unreclaimable = true;
+
+       for_each_zone_zonelist_nodemask(zone, z, zonelist,
+                       gfp_zone(sc->gfp_mask), sc->nodemask) {
+               if (!populated_zone(zone))
+                       continue;
+               if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
+                       continue;
+               if (zone_reclaimable(zone)) {
+                       all_unreclaimable = false;
+                       break;
+               }
+       }
+
        return all_unreclaimable;
 }
 
@@ -1852,7 +1881,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                                        struct scan_control *sc)
 {
        int priority;
-       bool all_unreclaimable;
        unsigned long total_scanned = 0;
        struct reclaim_state *reclaim_state = current->reclaim_state;
        struct zoneref *z;
@@ -1869,7 +1897,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                sc->nr_scanned = 0;
                if (!priority)
                        disable_swap_token();
-               all_unreclaimable = shrink_zones(priority, zonelist, sc);
+               shrink_zones(priority, zonelist, sc);
                /*
                 * Don't shrink slabs when reclaiming memory from
                 * over limit cgroups
@@ -1931,7 +1959,7 @@ out:
                return sc->nr_reclaimed;
 
        /* top priority shrink_zones still had more to do? don't OOM, then */
-       if (scanning_global_lru(sc) && !all_unreclaimable)
+       if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc))
                return 1;
 
        return 0;
@@ -2197,8 +2225,7 @@ loop_again:
                        total_scanned += sc.nr_scanned;
                        if (zone->all_unreclaimable)
                                continue;
-                       if (nr_slab == 0 &&
-                           zone->pages_scanned >= (zone_reclaimable_pages(zone) * 6))
+                       if (nr_slab == 0 && !zone_reclaimable(zone))
                                zone->all_unreclaimable = 1;
                        /*
                         * If we've done a decent amount of scanning and
index f389168f9a837b9c6be4e1f9bb3d0892396315de..355a9e669aaa800d62fa31d2b83110bf76cce9d7 100644 (file)
@@ -138,11 +138,24 @@ static void refresh_zone_stat_thresholds(void)
        int threshold;
 
        for_each_populated_zone(zone) {
+               unsigned long max_drift, tolerate_drift;
+
                threshold = calculate_threshold(zone);
 
                for_each_online_cpu(cpu)
                        per_cpu_ptr(zone->pageset, cpu)->stat_threshold
                                                        = threshold;
+
+               /*
+                * Only set percpu_drift_mark if there is a danger that
+                * NR_FREE_PAGES reports the low watermark is ok when in fact
+                * the min watermark could be breached by an allocation
+                */
+               tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
+               max_drift = num_online_cpus() * threshold;
+               if (max_drift > tolerate_drift)
+                       zone->percpu_drift_mark = high_wmark_pages(zone) +
+                                       max_drift;
        }
 }
 
@@ -813,7 +826,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
                   "\n        scanned  %lu"
                   "\n        spanned  %lu"
                   "\n        present  %lu",
-                  zone_page_state(zone, NR_FREE_PAGES),
+                  zone_nr_free_pages(zone),
                   min_wmark_pages(zone),
                   low_wmark_pages(zone),
                   high_wmark_pages(zone),
@@ -998,6 +1011,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
        switch (action) {
        case CPU_ONLINE:
        case CPU_ONLINE_FROZEN:
+               refresh_zone_stat_thresholds();
                start_cpu_timer(cpu);
                node_set_state(cpu_to_node(cpu), N_CPU);
                break;
index dc6f2f26d0230b1462ea1ea583c6fda5dc5b49c5..9eb72505308fc697d2857737f8dd3f4ec04d5bd3 100644 (file)
@@ -331,8 +331,10 @@ static void p9_tag_cleanup(struct p9_client *c)
                }
        }
 
-       if (c->tagpool)
+       if (c->tagpool) {
+               p9_idpool_put(0, c->tagpool); /* free reserved tag 0 */
                p9_idpool_destroy(c->tagpool);
+       }
 
        /* free requests associated with tags */
        for (row = 0; row < (c->max_tag/P9_ROW_MAXTAG); row++) {
@@ -944,6 +946,7 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames,
        int16_t nwqids, count;
 
        err = 0;
+       wqids = NULL;
        clnt = oldfid->clnt;
        if (clone) {
                fid = p9_fid_create(clnt);
@@ -994,9 +997,11 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames,
        else
                fid->qid = oldfid->qid;
 
+       kfree(wqids);
        return fid;
 
 clunk_fid:
+       kfree(wqids);
        p9_client_clunk(fid);
        fid = NULL;
 
index 0ea20c30466c7b5758e419fd51c3cba70c9797d7..17c5ba7551a55e79c2c38e22a8fe2fdaa8e00979 100644 (file)
@@ -426,8 +426,10 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
 
        /* Allocate an fcall for the reply */
        rpl_context = kmalloc(sizeof *rpl_context, GFP_KERNEL);
-       if (!rpl_context)
+       if (!rpl_context) {
+               err = -ENOMEM;
                goto err_close;
+       }
 
        /*
         * If the request has a buffer, steal it, otherwise
@@ -445,8 +447,8 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
        }
        rpl_context->rc = req->rc;
        if (!rpl_context->rc) {
-               kfree(rpl_context);
-               goto err_close;
+               err = -ENOMEM;
+               goto err_free2;
        }
 
        /*
@@ -458,11 +460,8 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
         */
        if (atomic_inc_return(&rdma->rq_count) <= rdma->rq_depth) {
                err = post_recv(client, rpl_context);
-               if (err) {
-                       kfree(rpl_context->rc);
-                       kfree(rpl_context);
-                       goto err_close;
-               }
+               if (err)
+                       goto err_free1;
        } else
                atomic_dec(&rdma->rq_count);
 
@@ -471,8 +470,10 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
 
        /* Post the request */
        c = kmalloc(sizeof *c, GFP_KERNEL);
-       if (!c)
-               goto err_close;
+       if (!c) {
+               err = -ENOMEM;
+               goto err_free1;
+       }
        c->req = req;
 
        c->busa = ib_dma_map_single(rdma->cm_id->device,
@@ -499,9 +500,15 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
        return ib_post_send(rdma->qp, &wr, &bad_wr);
 
  error:
+       kfree(c);
+       kfree(rpl_context->rc);
+       kfree(rpl_context);
        P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n");
        return -EIO;
-
+ err_free1:
+       kfree(rpl_context->rc);
+ err_free2:
+       kfree(rpl_context);
  err_close:
        spin_lock_irqsave(&rdma->req_lock, flags);
        if (rdma->state < P9_RDMA_CLOSING) {
index 36cb66022a279e96869de77c3c9cebbbe9824ff4..e9eaaf7d43c18104167692f801794bdacf89b97e 100644 (file)
@@ -38,7 +38,7 @@ static const struct rpc_authops *auth_flavors[RPC_AUTH_MAXFLAVOR] = {
 static LIST_HEAD(cred_unused);
 static unsigned long number_cred_unused;
 
-#define MAX_HASHTABLE_BITS (10) 
+#define MAX_HASHTABLE_BITS (14)
 static int param_set_hashtbl_sz(const char *val, const struct kernel_param *kp)
 {
        unsigned long num;
index dcfc66bab2bb16f9872aca4adc6b258b55afe05a..12c4859828146cbdff7a0e9c38a039eaa3395426 100644 (file)
@@ -745,17 +745,18 @@ gss_pipe_release(struct inode *inode)
        struct rpc_inode *rpci = RPC_I(inode);
        struct gss_upcall_msg *gss_msg;
 
+restart:
        spin_lock(&inode->i_lock);
-       while (!list_empty(&rpci->in_downcall)) {
+       list_for_each_entry(gss_msg, &rpci->in_downcall, list) {
 
-               gss_msg = list_entry(rpci->in_downcall.next,
-                               struct gss_upcall_msg, list);
+               if (!list_empty(&gss_msg->msg.list))
+                       continue;
                gss_msg->msg.errno = -EPIPE;
                atomic_inc(&gss_msg->count);
                __gss_unhash_msg(gss_msg);
                spin_unlock(&inode->i_lock);
                gss_release_msg(gss_msg);
-               spin_lock(&inode->i_lock);
+               goto restart;
        }
        spin_unlock(&inode->i_lock);
 
index 032644610524306ea0e01383b3c4ea54888b10ab..778e5dfc5144910f83609b8bf48ca2a35011110d 100644 (file)
@@ -237,6 +237,7 @@ get_key(const void *p, const void *end,
        if (!supported_gss_krb5_enctype(alg)) {
                printk(KERN_WARNING "gss_kerberos_mech: unsupported "
                        "encryption key algorithm %d\n", alg);
+               p = ERR_PTR(-EINVAL);
                goto out_err;
        }
        p = simple_get_netobj(p, end, &key);
@@ -282,15 +283,19 @@ gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx)
        ctx->enctype = ENCTYPE_DES_CBC_RAW;
 
        ctx->gk5e = get_gss_krb5_enctype(ctx->enctype);
-       if (ctx->gk5e == NULL)
+       if (ctx->gk5e == NULL) {
+               p = ERR_PTR(-EINVAL);
                goto out_err;
+       }
 
        /* The downcall format was designed before we completely understood
         * the uses of the context fields; so it includes some stuff we
         * just give some minimal sanity-checking, and some we ignore
         * completely (like the next twenty bytes): */
-       if (unlikely(p + 20 > end || p + 20 < p))
+       if (unlikely(p + 20 > end || p + 20 < p)) {
+               p = ERR_PTR(-EFAULT);
                goto out_err;
+       }
        p += 20;
        p = simple_get_bytes(p, end, &tmp, sizeof(tmp));
        if (IS_ERR(p))
@@ -619,6 +624,7 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
        if (ctx->seq_send64 != ctx->seq_send) {
                dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__,
                        (long unsigned)ctx->seq_send64, ctx->seq_send);
+               p = ERR_PTR(-EINVAL);
                goto out_err;
        }
        p = simple_get_bytes(p, end, &ctx->enctype, sizeof(ctx->enctype));
index dc3f1f5ed8654da469bd477803eb3530ea9659b7..adade3d313f279bde98674e2e7ed21496267f9e7 100644 (file)
@@ -100,6 +100,7 @@ gss_import_sec_context_spkm3(const void *p, size_t len,
        if (version != 1) {
                dprintk("RPC:       unknown spkm3 token format: "
                                "obsolete nfs-utils?\n");
+               p = ERR_PTR(-EINVAL);
                goto out_err_free_ctx;
        }
 
@@ -135,8 +136,10 @@ gss_import_sec_context_spkm3(const void *p, size_t len,
        if (IS_ERR(p))
                goto out_err_free_intg_alg;
 
-       if (p != end)
+       if (p != end) {
+               p = ERR_PTR(-EFAULT);
                goto out_err_free_intg_key;
+       }
 
        ctx_id->internal_ctx_id = ctx;
 
index 2388d83b68ff75dc4644d1b5808ef19224cd469b..fa5549079d79ca7709d3a68bc686b77f10b08243 100644 (file)
@@ -226,7 +226,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
                        goto out_no_principal;
        }
 
-       kref_init(&clnt->cl_kref);
+       atomic_set(&clnt->cl_count, 1);
 
        err = rpc_setup_pipedir(clnt, program->pipe_dir_name);
        if (err < 0)
@@ -390,14 +390,14 @@ rpc_clone_client(struct rpc_clnt *clnt)
                if (new->cl_principal == NULL)
                        goto out_no_principal;
        }
-       kref_init(&new->cl_kref);
+       atomic_set(&new->cl_count, 1);
        err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);
        if (err != 0)
                goto out_no_path;
        if (new->cl_auth)
                atomic_inc(&new->cl_auth->au_count);
        xprt_get(clnt->cl_xprt);
-       kref_get(&clnt->cl_kref);
+       atomic_inc(&clnt->cl_count);
        rpc_register_client(new);
        rpciod_up();
        return new;
@@ -465,10 +465,8 @@ EXPORT_SYMBOL_GPL(rpc_shutdown_client);
  * Free an RPC client
  */
 static void
-rpc_free_client(struct kref *kref)
+rpc_free_client(struct rpc_clnt *clnt)
 {
-       struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref);
-
        dprintk("RPC:       destroying %s client for %s\n",
                        clnt->cl_protname, clnt->cl_server);
        if (!IS_ERR(clnt->cl_path.dentry)) {
@@ -495,12 +493,10 @@ out_free:
  * Free an RPC client
  */
 static void
-rpc_free_auth(struct kref *kref)
+rpc_free_auth(struct rpc_clnt *clnt)
 {
-       struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref);
-
        if (clnt->cl_auth == NULL) {
-               rpc_free_client(kref);
+               rpc_free_client(clnt);
                return;
        }
 
@@ -509,10 +505,11 @@ rpc_free_auth(struct kref *kref)
         *       release remaining GSS contexts. This mechanism ensures
         *       that it can do so safely.
         */
-       kref_init(kref);
+       atomic_inc(&clnt->cl_count);
        rpcauth_release(clnt->cl_auth);
        clnt->cl_auth = NULL;
-       kref_put(kref, rpc_free_client);
+       if (atomic_dec_and_test(&clnt->cl_count))
+               rpc_free_client(clnt);
 }
 
 /*
@@ -525,7 +522,8 @@ rpc_release_client(struct rpc_clnt *clnt)
 
        if (list_empty(&clnt->cl_tasks))
                wake_up(&destroy_wait);
-       kref_put(&clnt->cl_kref, rpc_free_auth);
+       if (atomic_dec_and_test(&clnt->cl_count))
+               rpc_free_auth(clnt);
 }
 
 /**
@@ -588,7 +586,7 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
        if (clnt != NULL) {
                rpc_task_release_client(task);
                task->tk_client = clnt;
-               kref_get(&clnt->cl_kref);
+               atomic_inc(&clnt->cl_count);
                if (clnt->cl_softrtry)
                        task->tk_flags |= RPC_TASK_SOFT;
                /* Add to the client's list of all tasks */
@@ -931,7 +929,7 @@ call_reserveresult(struct rpc_task *task)
        task->tk_status = 0;
        if (status >= 0) {
                if (task->tk_rqstp) {
-                       task->tk_action = call_allocate;
+                       task->tk_action = call_refresh;
                        return;
                }
 
@@ -966,13 +964,54 @@ call_reserveresult(struct rpc_task *task)
 }
 
 /*
- * 2.  Allocate the buffer. For details, see sched.c:rpc_malloc.
+ * 2.  Bind and/or refresh the credentials
+ */
+static void
+call_refresh(struct rpc_task *task)
+{
+       dprint_status(task);
+
+       task->tk_action = call_refreshresult;
+       task->tk_status = 0;
+       task->tk_client->cl_stats->rpcauthrefresh++;
+       rpcauth_refreshcred(task);
+}
+
+/*
+ * 2a. Process the results of a credential refresh
+ */
+static void
+call_refreshresult(struct rpc_task *task)
+{
+       int status = task->tk_status;
+
+       dprint_status(task);
+
+       task->tk_status = 0;
+       task->tk_action = call_allocate;
+       if (status >= 0 && rpcauth_uptodatecred(task))
+               return;
+       switch (status) {
+       case -EACCES:
+               rpc_exit(task, -EACCES);
+               return;
+       case -ENOMEM:
+               rpc_exit(task, -ENOMEM);
+               return;
+       case -ETIMEDOUT:
+               rpc_delay(task, 3*HZ);
+       }
+       task->tk_action = call_refresh;
+}
+
+/*
+ * 2b. Allocate the buffer. For details, see sched.c:rpc_malloc.
  *     (Note: buffer memory is freed in xprt_release).
  */
 static void
 call_allocate(struct rpc_task *task)
 {
-       unsigned int slack = task->tk_client->cl_auth->au_cslack;
+       unsigned int slack = task->tk_rqstp->rq_cred->cr_auth->au_cslack;
        struct rpc_rqst *req = task->tk_rqstp;
        struct rpc_xprt *xprt = task->tk_xprt;
        struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
@@ -980,7 +1019,7 @@ call_allocate(struct rpc_task *task)
        dprint_status(task);
 
        task->tk_status = 0;
-       task->tk_action = call_refresh;
+       task->tk_action = call_bind;
 
        if (req->rq_buffer)
                return;
@@ -1017,47 +1056,6 @@ call_allocate(struct rpc_task *task)
        rpc_exit(task, -ERESTARTSYS);
 }
 
-/*
- * 2a. Bind and/or refresh the credentials
- */
-static void
-call_refresh(struct rpc_task *task)
-{
-       dprint_status(task);
-
-       task->tk_action = call_refreshresult;
-       task->tk_status = 0;
-       task->tk_client->cl_stats->rpcauthrefresh++;
-       rpcauth_refreshcred(task);
-}
-
-/*
- * 2b. Process the results of a credential refresh
- */
-static void
-call_refreshresult(struct rpc_task *task)
-{
-       int status = task->tk_status;
-
-       dprint_status(task);
-
-       task->tk_status = 0;
-       task->tk_action = call_bind;
-       if (status >= 0 && rpcauth_uptodatecred(task))
-               return;
-       switch (status) {
-       case -EACCES:
-               rpc_exit(task, -EACCES);
-               return;
-       case -ENOMEM:
-               rpc_exit(task, -ENOMEM);
-               return;
-       case -ETIMEDOUT:
-               rpc_delay(task, 3*HZ);
-       }
-       task->tk_action = call_refresh;
-}
-
 static inline int
 rpc_task_need_encode(struct rpc_task *task)
 {
index 95ccbcf45d3eb64c6ee84c4767c85e7c5a9ee694..8c8eef2b8f26a205bef37706b900c6936a492ed8 100644 (file)
@@ -48,7 +48,7 @@ static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head,
                return;
        do {
                msg = list_entry(head->next, struct rpc_pipe_msg, list);
-               list_del(&msg->list);
+               list_del_init(&msg->list);
                msg->errno = err;
                destroy_msg(msg);
        } while (!list_empty(head));
@@ -208,7 +208,7 @@ rpc_pipe_release(struct inode *inode, struct file *filp)
        if (msg != NULL) {
                spin_lock(&inode->i_lock);
                msg->errno = -EAGAIN;
-               list_del(&msg->list);
+               list_del_init(&msg->list);
                spin_unlock(&inode->i_lock);
                rpci->ops->destroy_msg(msg);
        }
@@ -268,7 +268,7 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
        if (res < 0 || msg->len == msg->copied) {
                filp->private_data = NULL;
                spin_lock(&inode->i_lock);
-               list_del(&msg->list);
+               list_del_init(&msg->list);
                spin_unlock(&inode->i_lock);
                rpci->ops->destroy_msg(msg);
        }
@@ -371,21 +371,23 @@ rpc_show_info(struct seq_file *m, void *v)
 static int
 rpc_info_open(struct inode *inode, struct file *file)
 {
-       struct rpc_clnt *clnt;
+       struct rpc_clnt *clnt = NULL;
        int ret = single_open(file, rpc_show_info, NULL);
 
        if (!ret) {
                struct seq_file *m = file->private_data;
-               mutex_lock(&inode->i_mutex);
-               clnt = RPC_I(inode)->private;
-               if (clnt) {
-                       kref_get(&clnt->cl_kref);
+
+               spin_lock(&file->f_path.dentry->d_lock);
+               if (!d_unhashed(file->f_path.dentry))
+                       clnt = RPC_I(inode)->private;
+               if (clnt != NULL && atomic_inc_not_zero(&clnt->cl_count)) {
+                       spin_unlock(&file->f_path.dentry->d_lock);
                        m->private = clnt;
                } else {
+                       spin_unlock(&file->f_path.dentry->d_lock);
                        single_release(inode, file);
                        ret = -EINVAL;
                }
-               mutex_unlock(&inode->i_mutex);
        }
        return ret;
 }
index ee03a4f0b64f4361af8c850b3123b8d457bec0bf..06473791c08adb7c5b0a7080ea9600d927c09d94 100644 (file)
@@ -24,6 +24,7 @@ static int __init example_init(void)
 {
        int                     i;
        unsigned int            ret;
+       unsigned int            nents;
        struct scatterlist      sg[10];
 
        printk(KERN_INFO "DMA fifo test start\n");
@@ -61,9 +62,9 @@ static int __init example_init(void)
         * byte at the beginning, after the kfifo_skip().
         */
        sg_init_table(sg, ARRAY_SIZE(sg));
-       ret = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE);
-       printk(KERN_INFO "DMA sgl entries: %d\n", ret);
-       if (!ret) {
+       nents = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE);
+       printk(KERN_INFO "DMA sgl entries: %d\n", nents);
+       if (!nents) {
                /* fifo is full and no sgl was created */
                printk(KERN_WARNING "error kfifo_dma_in_prepare\n");
                return -EIO;
@@ -71,7 +72,7 @@ static int __init example_init(void)
 
        /* receive data */
        printk(KERN_INFO "scatterlist for receive:\n");
-       for (i = 0; i < ARRAY_SIZE(sg); i++) {
+       for (i = 0; i < nents; i++) {
                printk(KERN_INFO
                "sg[%d] -> "
                "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
@@ -91,16 +92,16 @@ static int __init example_init(void)
        kfifo_dma_in_finish(&fifo, ret);
 
        /* Prepare to transmit data, example: 8 bytes */
-       ret = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8);
-       printk(KERN_INFO "DMA sgl entries: %d\n", ret);
-       if (!ret) {
+       nents = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8);
+       printk(KERN_INFO "DMA sgl entries: %d\n", nents);
+       if (!nents) {
                /* no data was available and no sgl was created */
                printk(KERN_WARNING "error kfifo_dma_out_prepare\n");
                return -EIO;
        }
 
        printk(KERN_INFO "scatterlist for transmit:\n");
-       for (i = 0; i < ARRAY_SIZE(sg); i++) {
+       for (i = 0; i < nents; i++) {
                printk(KERN_INFO
                "sg[%d] -> "
                "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
index 79ab973fb43a42715d61092ce58d7408109e6eff..fc3b18d844af848a415c0e1a98024f5b097b9ead 100644 (file)
  *
  */
 
+#define _GNU_SOURCE
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
 #include <ctype.h>
 #include <unistd.h>
 #include <limits.h>
+#include <errno.h>
 #include <sys/types.h>
 #include <sys/wait.h>
 
@@ -54,6 +56,7 @@ typedef void FILEONLY(char * file);
 FILEONLY *internalfunctions;
 FILEONLY *externalfunctions;
 FILEONLY *symbolsonly;
+FILEONLY *findall;
 
 typedef void FILELINE(char * file, char * line);
 FILELINE * singlefunctions;
@@ -65,12 +68,30 @@ FILELINE * docsection;
 #define KERNELDOCPATH "scripts/"
 #define KERNELDOC     "kernel-doc"
 #define DOCBOOK       "-docbook"
+#define LIST          "-list"
 #define FUNCTION      "-function"
 #define NOFUNCTION    "-nofunction"
 #define NODOCSECTIONS "-no-doc-sections"
 
 static char *srctree, *kernsrctree;
 
+static char **all_list = NULL;
+static int all_list_len = 0;
+
+static void consume_symbol(const char *sym)
+{
+       int i;
+
+       for (i = 0; i < all_list_len; i++) {
+               if (!all_list[i])
+                       continue;
+               if (strcmp(sym, all_list[i]))
+                       continue;
+               all_list[i] = NULL;
+               break;
+       }
+}
+
 static void usage (void)
 {
        fprintf(stderr, "Usage: docproc {doc|depend} file\n");
@@ -248,6 +269,7 @@ static void docfunctions(char * filename, char * type)
                struct symfile * sym = &symfilelist[i];
                for (j=0; j < sym->symbolcnt; j++) {
                        vec[idx++]     = type;
+                       consume_symbol(sym->symbollist[j].name);
                        vec[idx++] = sym->symbollist[j].name;
                }
        }
@@ -287,6 +309,11 @@ static void singfunc(char * filename, char * line)
                         vec[idx++] = &line[i];
                 }
         }
+       for (i = 0; i < idx; i++) {
+               if (strcmp(vec[i], FUNCTION))
+                       continue;
+               consume_symbol(vec[i + 1]);
+       }
        vec[idx++] = filename;
        vec[idx] = NULL;
        exec_kernel_doc(vec);
@@ -306,6 +333,10 @@ static void docsect(char *filename, char *line)
                if (*s == '\n')
                        *s = '\0';
 
+       asprintf(&s, "DOC: %s", line);
+       consume_symbol(s);
+       free(s);
+
        vec[0] = KERNELDOC;
        vec[1] = DOCBOOK;
        vec[2] = FUNCTION;
@@ -315,6 +346,84 @@ static void docsect(char *filename, char *line)
        exec_kernel_doc(vec);
 }
 
+static void find_all_symbols(char *filename)
+{
+       char *vec[4]; /* kerneldoc -list file NULL */
+       pid_t pid;
+       int ret, i, count, start;
+       char real_filename[PATH_MAX + 1];
+       int pipefd[2];
+       char *data, *str;
+       size_t data_len = 0;
+
+       vec[0] = KERNELDOC;
+       vec[1] = LIST;
+       vec[2] = filename;
+       vec[3] = NULL;
+
+       if (pipe(pipefd)) {
+               perror("pipe");
+               exit(1);
+       }
+
+       switch (pid=fork()) {
+               case -1:
+                       perror("fork");
+                       exit(1);
+               case  0:
+                       close(pipefd[0]);
+                       dup2(pipefd[1], 1);
+                       memset(real_filename, 0, sizeof(real_filename));
+                       strncat(real_filename, kernsrctree, PATH_MAX);
+                       strncat(real_filename, "/" KERNELDOCPATH KERNELDOC,
+                                       PATH_MAX - strlen(real_filename));
+                       execvp(real_filename, vec);
+                       fprintf(stderr, "exec ");
+                       perror(real_filename);
+                       exit(1);
+               default:
+                       close(pipefd[1]);
+                       data = malloc(4096);
+                       do {
+                               while ((ret = read(pipefd[0],
+                                                  data + data_len,
+                                                  4096)) > 0) {
+                                       data_len += ret;
+                                       data = realloc(data, data_len + 4096);
+                               }
+                       } while (ret == -EAGAIN);
+                       if (ret != 0) {
+                               perror("read");
+                               exit(1);
+                       }
+                       waitpid(pid, &ret ,0);
+       }
+       if (WIFEXITED(ret))
+               exitstatus |= WEXITSTATUS(ret);
+       else
+               exitstatus = 0xff;
+
+       count = 0;
+       /* poor man's strtok, but with counting */
+       for (i = 0; i < data_len; i++) {
+               if (data[i] == '\n') {
+                       count++;
+                       data[i] = '\0';
+               }
+       }
+       start = all_list_len;
+       all_list_len += count;
+       all_list = realloc(all_list, sizeof(char *) * all_list_len);
+       str = data;
+       for (i = 0; i < data_len && start != all_list_len; i++) {
+               if (data[i] == '\0') {
+                       all_list[start] = str;
+                       str = data + i + 1;
+                       start++;
+               }
+       }
+}
+
 /*
  * Parse file, calling action specific functions for:
  * 1) Lines containing !E
@@ -322,7 +431,8 @@ static void docsect(char *filename, char *line)
  * 3) Lines containing !D
  * 4) Lines containing !F
  * 5) Lines containing !P
- * 6) Default lines - lines not matching the above
+ * 6) Lines containing !C
+ * 7) Default lines - lines not matching the above
  */
 static void parse_file(FILE *infile)
 {
@@ -365,6 +475,12 @@ static void parse_file(FILE *infile)
                                                s++;
                                        docsection(line + 2, s);
                                        break;
+                               case 'C':
+                                       while (*s && !isspace(*s)) s++;
+                                       *s = '\0';
+                                       if (findall)
+                                               findall(line+2);
+                                       break;
                                default:
                                        defaultline(line);
                        }
@@ -380,6 +496,7 @@ static void parse_file(FILE *infile)
 int main(int argc, char *argv[])
 {
        FILE * infile;
+       int i;
 
        srctree = getenv("SRCTREE");
        if (!srctree)
@@ -415,6 +532,7 @@ int main(int argc, char *argv[])
                symbolsonly       = find_export_symbols;
                singlefunctions   = noaction2;
                docsection        = noaction2;
+               findall           = find_all_symbols;
                parse_file(infile);
 
                /* Rewind to start from beginning of file again */
@@ -425,8 +543,16 @@ int main(int argc, char *argv[])
                symbolsonly       = printline;
                singlefunctions   = singfunc;
                docsection        = docsect;
+               findall           = NULL;
 
                parse_file(infile);
+
+               for (i = 0; i < all_list_len; i++) {
+                       if (!all_list[i])
+                               continue;
+                       fprintf(stderr, "Warning: didn't use docs for %s\n",
+                               all_list[i]);
+               }
        }
        else if (strcmp("depend", argv[1]) == 0)
        {
@@ -439,6 +565,7 @@ int main(int argc, char *argv[])
                symbolsonly       = adddep;
                singlefunctions   = adddep2;
                docsection        = adddep2;
+               findall           = adddep;
                parse_file(infile);
                printf("\n");
        }
index 102e1235fd5ced3f83f18a9a5ecd2306b31e087b..cdb6dc1f6458ba4634c0f8584ae3da615ce10696 100755 (executable)
@@ -44,12 +44,13 @@ use strict;
 # Note: This only supports 'c'.
 
 # usage:
-# kernel-doc [ -docbook | -html | -text | -man ] [ -no-doc-sections ]
+# kernel-doc [ -docbook | -html | -text | -man | -list ] [ -no-doc-sections ]
 #           [ -function funcname [ -function funcname ...] ] c file(s)s > outputfile
 # or
 #           [ -nofunction funcname [ -function funcname ...] ] c file(s)s > outputfile
 #
 #  Set output format using one of -docbook -html -text or -man.  Default is man.
+#  The -list format is for internal use by docproc.
 #
 #  -no-doc-sections
 #      Do not output DOC: sections
@@ -210,9 +211,16 @@ my %highlights_text = ( $type_constant, "\$1",
                        $type_param, "\$1" );
 my $blankline_text = "";
 
+# list mode
+my %highlights_list = ( $type_constant, "\$1",
+                       $type_func, "\$1",
+                       $type_struct, "\$1",
+                       $type_param, "\$1" );
+my $blankline_list = "";
 
 sub usage {
-    print "Usage: $0 [ -v ] [ -docbook | -html | -text | -man ] [ -no-doc-sections ]\n";
+    print "Usage: $0 [ -v ] [ -docbook | -html | -text | -man | -list ]\n";
+    print "         [ -no-doc-sections ]\n";
     print "         [ -function funcname [ -function funcname ...] ]\n";
     print "         [ -nofunction funcname [ -nofunction funcname ...] ]\n";
     print "         c source file(s) > outputfile\n";
@@ -318,6 +326,10 @@ while ($ARGV[0] =~ m/^-(.*)/) {
        $output_mode = "xml";
        %highlights = %highlights_xml;
        $blankline = $blankline_xml;
+    } elsif ($cmd eq "-list") {
+       $output_mode = "list";
+       %highlights = %highlights_list;
+       $blankline = $blankline_list;
     } elsif ($cmd eq "-gnome") {
        $output_mode = "gnome";
        %highlights = %highlights_gnome;
@@ -1361,6 +1373,42 @@ sub output_blockhead_text(%) {
     }
 }
 
+## list mode output functions
+
+sub output_function_list(%) {
+    my %args = %{$_[0]};
+
+    print $args{'function'} . "\n";
+}
+
+# output enum in list
+sub output_enum_list(%) {
+    my %args = %{$_[0]};
+    print $args{'enum'} . "\n";
+}
+
+# output typedef in list
+sub output_typedef_list(%) {
+    my %args = %{$_[0]};
+    print $args{'typedef'} . "\n";
+}
+
+# output struct as list
+sub output_struct_list(%) {
+    my %args = %{$_[0]};
+
+    print $args{'struct'} . "\n";
+}
+
+sub output_blockhead_list(%) {
+    my %args = %{$_[0]};
+    my ($parameter, $section);
+
+    foreach $section (@{$args{'sectionlist'}}) {
+       print "DOC: $section\n";
+    }
+}
+
 ##
 # generic output function for all types (function, struct/union, typedef, enum);
 # calls the generated, variable output_ function name based on
@@ -1679,7 +1727,7 @@ sub check_sections($$$$$$) {
                foreach $px (0 .. $#prms) {
                        $prm_clean = $prms[$px];
                        $prm_clean =~ s/\[.*\]//;
-                       $prm_clean =~ s/__attribute__\s*\(\([a-z,_\*\s\(\)]*\)\)//;
+                       $prm_clean =~ s/__attribute__\s*\(\([a-z,_\*\s\(\)]*\)\)//i;
                        # ignore array size in a parameter string;
                        # however, the original param string may contain
                        # spaces, e.g.:  addr[6 + 2]
index 3c88be94649408b412c0193f0bf4186273c6a972..02baec732bb512c77fed7d5ede247b72d788a147 100644 (file)
@@ -33,8 +33,8 @@ struct aa_rlimit {
 };
 
 int aa_map_resource(int resource);
-int aa_task_setrlimit(struct aa_profile *profile, unsigned int resource,
-                     struct rlimit *new_rlim);
+int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *,
+                     unsigned int resource, struct rlimit *new_rlim);
 
 void __aa_transition_rlimits(struct aa_profile *old, struct aa_profile *new);
 
index 6e85cdb4303f69fc1911c94aefd5f1c5b695239c..506d2baf614797624fc4b9450c0d12c9f56e8ae4 100644 (file)
@@ -40,6 +40,7 @@ char *aa_split_fqname(char *fqname, char **ns_name)
        *ns_name = NULL;
        if (name[0] == ':') {
                char *split = strchr(&name[1], ':');
+               *ns_name = skip_spaces(&name[1]);
                if (split) {
                        /* overwrite ':' with \0 */
                        *split = 0;
@@ -47,7 +48,6 @@ char *aa_split_fqname(char *fqname, char **ns_name)
                } else
                        /* a ns name without a following profile is allowed */
                        name = NULL;
-               *ns_name = &name[1];
        }
        if (name && *name == 0)
                name = NULL;
index f73e2c2042185fff2d079dbdb3f4b8b828371e72..cf1de4462ccd3fb297f48bf351dd3494804f22c1 100644 (file)
@@ -614,7 +614,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
        int error = 0;
 
        if (!unconfined(profile))
-               error = aa_task_setrlimit(profile, resource, new_rlim);
+               error = aa_task_setrlimit(profile, task, resource, new_rlim);
 
        return error;
 }
index 19358dc14605bae1422ae00226291751695ba44c..82396050f18646ac0519352321637e930c05e367 100644 (file)
@@ -59,8 +59,7 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
 {
        struct path root, tmp;
        char *res;
-       int deleted, connected;
-       int error = 0;
+       int connected, error = 0;
 
        /* Get the root we want to resolve too, released below */
        if (flags & PATH_CHROOT_REL) {
@@ -74,19 +73,8 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
        }
 
        spin_lock(&dcache_lock);
-       /* There is a race window between path lookup here and the
-        * need to strip the " (deleted) string that __d_path applies
-        * Detect the race and relookup the path
-        *
-        * The stripping of (deleted) is a hack that could be removed
-        * with an updated __d_path
-        */
-       do {
-               tmp = root;
-               deleted = d_unlinked(path->dentry);
-               res = __d_path(path, &tmp, buf, buflen);
-
-       } while (deleted != d_unlinked(path->dentry));
+       tmp = root;
+       res = __d_path(path, &tmp, buf, buflen);
        spin_unlock(&dcache_lock);
 
        *name = res;
@@ -98,21 +86,17 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
                *name = buf;
                goto out;
        }
-       if (deleted) {
-               /* On some filesystems, newly allocated dentries appear to the
-                * security_path hooks as a deleted dentry except without an
-                * inode allocated.
-                *
-                * Remove the appended deleted text and return as string for
-                * normal mediation, or auditing.  The (deleted) string is
-                * guaranteed to be added in this case, so just strip it.
-                */
-               buf[buflen - 11] = 0;   /* - (len(" (deleted)") +\0) */
 
-               if (path->dentry->d_inode && !(flags & PATH_MEDIATE_DELETED)) {
+       /* Handle two cases:
+        * 1. A deleted dentry && profile is not allowing mediation of deleted
+        * 2. On some filesystems, newly allocated dentries appear to the
+        *    security_path hooks as a deleted dentry except without an inode
+        *    allocated.
+        */
+       if (d_unlinked(path->dentry) && path->dentry->d_inode &&
+           !(flags & PATH_MEDIATE_DELETED)) {
                        error = -ENOENT;
                        goto out;
-               }
        }
 
        /* Determine if the path is connected to the expected root */
index 3cdc1ad0787ec9c4769455f8aeb004a417d246bf..52cc865f1464574e696fd28eca6a6e0eed326d68 100644 (file)
@@ -1151,12 +1151,14 @@ ssize_t aa_remove_profiles(char *fqname, size_t size)
                /* released below */
                ns = aa_get_namespace(root);
 
-       write_lock(&ns->lock);
        if (!name) {
                /* remove namespace - can only happen if fqname[0] == ':' */
+               write_lock(&ns->parent->lock);
                __remove_namespace(ns);
+               write_unlock(&ns->parent->lock);
        } else {
                /* remove profile */
+               write_lock(&ns->lock);
                profile = aa_get_profile(__lookup_profile(&ns->base, name));
                if (!profile) {
                        error = -ENOENT;
@@ -1165,8 +1167,8 @@ ssize_t aa_remove_profiles(char *fqname, size_t size)
                }
                name = profile->base.hname;
                __remove_profile(profile);
+               write_unlock(&ns->lock);
        }
-       write_unlock(&ns->lock);
 
        /* don't fail removal if audit fails */
        (void) audit_policy(OP_PROF_RM, GFP_KERNEL, name, info, error);
index 4a368f1fd36ddf02af7204d30ee1b136f1d57bf7..a4136c10b1c6292edbdadae7285803583fb74241 100644 (file)
@@ -72,6 +72,7 @@ int aa_map_resource(int resource)
 /**
  * aa_task_setrlimit - test permission to set an rlimit
  * @profile - profile confining the task  (NOT NULL)
+ * @task - task the resource is being set on
  * @resource - the resource being set
  * @new_rlim - the new resource limit  (NOT NULL)
  *
@@ -79,18 +80,21 @@ int aa_map_resource(int resource)
  *
  * Returns: 0 or error code if setting resource failed
  */
-int aa_task_setrlimit(struct aa_profile *profile, unsigned int resource,
-                     struct rlimit *new_rlim)
+int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *task,
+                     unsigned int resource, struct rlimit *new_rlim)
 {
        int error = 0;
 
-       if (profile->rlimits.mask & (1 << resource) &&
-           new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max)
-
-               error = audit_resource(profile, resource, new_rlim->rlim_max,
-                       -EACCES);
+       /* TODO: extend resource control to handle other (non current)
+        * processes.  AppArmor rules currently have the implicit assumption
+        * that the task is setting the resource of the current process
+        */
+       if ((task != current->group_leader) ||
+           (profile->rlimits.mask & (1 << resource) &&
+            new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max))
+               error = -EACCES;
 
-       return error;
+       return audit_resource(profile, resource, new_rlim->rlim_max, error);
 }
 
 /**
index 16d100d3fc38de931de8e1a2679bc025359f3d31..3fbcd1dda0ef6e06da4a5b4b9c23a240a6378a35 100644 (file)
@@ -35,6 +35,7 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
 #define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS)
 
 /* set during initialization */
+extern int iint_initialized;
 extern int ima_initialized;
 extern int ima_used_chip;
 extern char *ima_hash;
index 7625b85c2274f457fc0d260a21e2d9039758d12c..afba4aef812f699134f7c7bc66c32251d2f12c69 100644 (file)
 
 RADIX_TREE(ima_iint_store, GFP_ATOMIC);
 DEFINE_SPINLOCK(ima_iint_lock);
-
 static struct kmem_cache *iint_cache __read_mostly;
 
+int iint_initialized = 0;
+
 /* ima_iint_find_get - return the iint associated with an inode
  *
  * ima_iint_find_get gets a reference to the iint. Caller must
@@ -141,6 +142,7 @@ static int __init ima_iintcache_init(void)
        iint_cache =
            kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0,
                              SLAB_PANIC, init_once);
+       iint_initialized = 1;
        return 0;
 }
 security_initcall(ima_iintcache_init);
index f93641382e9f9483576578a3ba5f41286f4cc3ab..e662b89d407944103dc121b9ccb37f7e68ac62e1 100644 (file)
@@ -148,12 +148,14 @@ void ima_counts_get(struct file *file)
        struct ima_iint_cache *iint;
        int rc;
 
-       if (!ima_initialized || !S_ISREG(inode->i_mode))
+       if (!iint_initialized || !S_ISREG(inode->i_mode))
                return;
        iint = ima_iint_find_get(inode);
        if (!iint)
                return;
        mutex_lock(&iint->mutex);
+       if (!ima_initialized)
+               goto out;
        rc = ima_must_measure(iint, inode, MAY_READ, FILE_CHECK);
        if (rc < 0)
                goto out;
@@ -213,7 +215,7 @@ void ima_file_free(struct file *file)
        struct inode *inode = file->f_dentry->d_inode;
        struct ima_iint_cache *iint;
 
-       if (!ima_initialized || !S_ISREG(inode->i_mode))
+       if (!iint_initialized || !S_ISREG(inode->i_mode))
                return;
        iint = ima_iint_find_get(inode);
        if (!iint)
@@ -230,7 +232,7 @@ static int process_measurement(struct file *file, const unsigned char *filename,
 {
        struct inode *inode = file->f_dentry->d_inode;
        struct ima_iint_cache *iint;
-       int rc;
+       int rc = 0;
 
        if (!ima_initialized || !S_ISREG(inode->i_mode))
                return 0;
index b2b0998d6abda7759433d7032d255eb0a317c126..60924f6a52db2bbff40ddc953a50bb9d708febb5 100644 (file)
@@ -1272,6 +1272,7 @@ long keyctl_session_to_parent(void)
        keyring_r = NULL;
 
        me = current;
+       rcu_read_lock();
        write_lock_irq(&tasklist_lock);
 
        parent = me->real_parent;
@@ -1304,7 +1305,8 @@ long keyctl_session_to_parent(void)
                goto not_permitted;
 
        /* the keyrings must have the same UID */
-       if (pcred->tgcred->session_keyring->uid != mycred->euid ||
+       if ((pcred->tgcred->session_keyring &&
+            pcred->tgcred->session_keyring->uid != mycred->euid) ||
            mycred->tgcred->session_keyring->uid != mycred->euid)
                goto not_permitted;
 
@@ -1319,6 +1321,7 @@ long keyctl_session_to_parent(void)
        set_ti_thread_flag(task_thread_info(parent), TIF_NOTIFY_RESUME);
 
        write_unlock_irq(&tasklist_lock);
+       rcu_read_unlock();
        if (oldcred)
                put_cred(oldcred);
        return 0;
@@ -1327,6 +1330,7 @@ already_same:
        ret = 0;
 not_permitted:
        write_unlock_irq(&tasklist_lock);
+       rcu_read_unlock();
        put_cred(cred);
        return ret;
 
index ef43995119a453401dd768adfa5ae41a2602dd3a..c668b447c72594494417f6be50795f3b49f860bf 100644 (file)
@@ -1416,15 +1416,19 @@ static char *tomoyo_print_header(struct tomoyo_request_info *r)
        const pid_t gpid = task_pid_nr(current);
        static const int tomoyo_buffer_len = 4096;
        char *buffer = kmalloc(tomoyo_buffer_len, GFP_NOFS);
+       pid_t ppid;
        if (!buffer)
                return NULL;
        do_gettimeofday(&tv);
+       rcu_read_lock();
+       ppid = task_tgid_vnr(current->real_parent);
+       rcu_read_unlock();
        snprintf(buffer, tomoyo_buffer_len - 1,
                 "#timestamp=%lu profile=%u mode=%s (global-pid=%u)"
                 " task={ pid=%u ppid=%u uid=%u gid=%u euid=%u"
                 " egid=%u suid=%u sgid=%u fsuid=%u fsgid=%u }",
                 tv.tv_sec, r->profile, tomoyo_mode[r->mode], gpid,
-                (pid_t) sys_getpid(), (pid_t) sys_getppid(),
+                task_tgid_vnr(current), ppid,
                 current_uid(), current_gid(), current_euid(),
                 current_egid(), current_suid(), current_sgid(),
                 current_fsuid(), current_fsgid());
index 04454cb7b24a534e84c8873a8babeb9614a8da60..7c66bd898782ce0c6fa8ea037cd19dfd78fc8bfe 100644 (file)
@@ -689,9 +689,6 @@ struct tomoyo_profile {
 
 /********** Function prototypes. **********/
 
-extern asmlinkage long sys_getpid(void);
-extern asmlinkage long sys_getppid(void);
-
 /* Check whether the given string starts with the given keyword. */
 bool tomoyo_str_starts(char **src, const char *find);
 /* Get tomoyo_realpath() of current process. */
index 070aab4901914870a0af43faef27e82e761acc32..45a818002d990f664cffadd066488a21c76fedd7 100644 (file)
@@ -31,6 +31,7 @@
 
 /* max number of user-defined controls */
 #define MAX_USER_CONTROLS      32
+#define MAX_CONTROL_COUNT      1028
 
 struct snd_kctl_ioctl {
        struct list_head list;          /* list of all ioctls */
@@ -195,6 +196,10 @@ static struct snd_kcontrol *snd_ctl_new(struct snd_kcontrol *control,
        
        if (snd_BUG_ON(!control || !control->count))
                return NULL;
+
+       if (control->count > MAX_CONTROL_COUNT)
+               return NULL;
+
        kctl = kzalloc(sizeof(*kctl) + sizeof(struct snd_kcontrol_volatile) * control->count, GFP_KERNEL);
        if (kctl == NULL) {
                snd_printk(KERN_ERR "Cannot allocate control instance\n");
index 204af48c5cc17f2f59632bb2089aa03c7b593664..ac242a377aea8068f859bce49aae6825aae5d001 100644 (file)
@@ -372,14 +372,17 @@ static void snd_pcm_substream_proc_hw_params_read(struct snd_info_entry *entry,
                                                  struct snd_info_buffer *buffer)
 {
        struct snd_pcm_substream *substream = entry->private_data;
-       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct snd_pcm_runtime *runtime;
+
+       mutex_lock(&substream->pcm->open_mutex);
+       runtime = substream->runtime;
        if (!runtime) {
                snd_iprintf(buffer, "closed\n");
-               return;
+               goto unlock;
        }
        if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
                snd_iprintf(buffer, "no setup\n");
-               return;
+               goto unlock;
        }
        snd_iprintf(buffer, "access: %s\n", snd_pcm_access_name(runtime->access));
        snd_iprintf(buffer, "format: %s\n", snd_pcm_format_name(runtime->format));
@@ -398,20 +401,25 @@ static void snd_pcm_substream_proc_hw_params_read(struct snd_info_entry *entry,
                snd_iprintf(buffer, "OSS period frames: %lu\n", (unsigned long)runtime->oss.period_frames);
        }
 #endif
+ unlock:
+       mutex_unlock(&substream->pcm->open_mutex);
 }
 
 static void snd_pcm_substream_proc_sw_params_read(struct snd_info_entry *entry,
                                                  struct snd_info_buffer *buffer)
 {
        struct snd_pcm_substream *substream = entry->private_data;
-       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct snd_pcm_runtime *runtime;
+
+       mutex_lock(&substream->pcm->open_mutex);
+       runtime = substream->runtime;
        if (!runtime) {
                snd_iprintf(buffer, "closed\n");
-               return;
+               goto unlock;
        }
        if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
                snd_iprintf(buffer, "no setup\n");
-               return;
+               goto unlock;
        }
        snd_iprintf(buffer, "tstamp_mode: %s\n", snd_pcm_tstamp_mode_name(runtime->tstamp_mode));
        snd_iprintf(buffer, "period_step: %u\n", runtime->period_step);
@@ -421,24 +429,29 @@ static void snd_pcm_substream_proc_sw_params_read(struct snd_info_entry *entry,
        snd_iprintf(buffer, "silence_threshold: %lu\n", runtime->silence_threshold);
        snd_iprintf(buffer, "silence_size: %lu\n", runtime->silence_size);
        snd_iprintf(buffer, "boundary: %lu\n", runtime->boundary);
+ unlock:
+       mutex_unlock(&substream->pcm->open_mutex);
 }
 
 static void snd_pcm_substream_proc_status_read(struct snd_info_entry *entry,
                                               struct snd_info_buffer *buffer)
 {
        struct snd_pcm_substream *substream = entry->private_data;
-       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct snd_pcm_runtime *runtime;
        struct snd_pcm_status status;
        int err;
+
+       mutex_lock(&substream->pcm->open_mutex);
+       runtime = substream->runtime;
        if (!runtime) {
                snd_iprintf(buffer, "closed\n");
-               return;
+               goto unlock;
        }
        memset(&status, 0, sizeof(status));
        err = snd_pcm_status(substream, &status);
        if (err < 0) {
                snd_iprintf(buffer, "error %d\n", err);
-               return;
+               goto unlock;
        }
        snd_iprintf(buffer, "state: %s\n", snd_pcm_state_name(status.state));
        snd_iprintf(buffer, "owner_pid   : %d\n", pid_vnr(substream->pid));
@@ -452,6 +465,8 @@ static void snd_pcm_substream_proc_status_read(struct snd_info_entry *entry,
        snd_iprintf(buffer, "-----\n");
        snd_iprintf(buffer, "hw_ptr      : %ld\n", runtime->status->hw_ptr);
        snd_iprintf(buffer, "appl_ptr    : %ld\n", runtime->control->appl_ptr);
+ unlock:
+       mutex_unlock(&substream->pcm->open_mutex);
 }
 
 #ifdef CONFIG_SND_PCM_XRUN_DEBUG
index 134fc6c2e08dc01eeda84a730545b0532f0588fe..d4eb2ef8078416cc8d06e5d80f3ec3bb467a4f1b 100644 (file)
@@ -1992,6 +1992,8 @@ void snd_pcm_release_substream(struct snd_pcm_substream *substream)
                substream->ops->close(substream);
                substream->hw_opened = 0;
        }
+       if (pm_qos_request_active(&substream->latency_pm_qos_req))
+               pm_qos_remove_request(&substream->latency_pm_qos_req);
        if (substream->pcm_release) {
                substream->pcm_release(substream);
                substream->pcm_release = NULL;
index eb68326c37d47626b53d7970fe6e88027edc0e13..a7868ad4d530fd40b3a9bd608bd7860a7d35ce44 100644 (file)
@@ -829,6 +829,8 @@ static int snd_rawmidi_control_ioctl(struct snd_card *card,
                
                if (get_user(device, (int __user *)argp))
                        return -EFAULT;
+               if (device >= SNDRV_RAWMIDI_DEVICES) /* next device is -1 */
+                       device = SNDRV_RAWMIDI_DEVICES - 1;
                mutex_lock(&register_mutex);
                device = device < 0 ? 0 : device + 1;
                while (device < SNDRV_RAWMIDI_DEVICES) {
index 685712276ac95ab0d57985489b86f4f840bcf9b1..69cd7b3c362d19f4b0ac989b8bc107e49e59ff1b 100644 (file)
@@ -281,13 +281,10 @@ snd_seq_oss_open(struct file *file, int level)
        return 0;
 
  _error:
-       snd_seq_oss_writeq_delete(dp->writeq);
-       snd_seq_oss_readq_delete(dp->readq);
        snd_seq_oss_synth_cleanup(dp);
        snd_seq_oss_midi_cleanup(dp);
-       delete_port(dp);
        delete_seq_queue(dp->queue);
-       kfree(dp);
+       delete_port(dp);
 
        return rc;
 }
@@ -350,8 +347,10 @@ create_port(struct seq_oss_devinfo *dp)
 static int
 delete_port(struct seq_oss_devinfo *dp)
 {
-       if (dp->port < 0)
+       if (dp->port < 0) {
+               kfree(dp);
                return 0;
+       }
 
        debug_printk(("delete_port %i\n", dp->port));
        return snd_seq_event_port_detach(dp->cseq, dp->port);
index 1adb8a3c2b62db229f9ba0b580d930d288e74c14..42d7844ecd0bfa66ddf3db96527ccc18c79f7485 100644 (file)
@@ -900,7 +900,7 @@ static int proc_init(struct snd_akm4xxx *ak)
        return 0;
 }
 #else /* !CONFIG_PROC_FS */
-static int proc_init(struct snd_akm4xxx *ak) {}
+static int proc_init(struct snd_akm4xxx *ak) { return 0; }
 #endif
 
 int snd_akm4xxx_build_controls(struct snd_akm4xxx *ak)
index 5f3e68401f905dbf2b8a03bc12fd657450b739d4..91d6023a63e57c6b14227e158c171b8edf4b8087 100644 (file)
@@ -764,9 +764,9 @@ static long io[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
 static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ;
 static long mem[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
 
+#ifndef MSND_CLASSIC
 static long cfg[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
 
-#ifndef MSND_CLASSIC
 /* Extra Peripheral Configuration (Default: Disable) */
 static long ide_io0[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
 static long ide_io1[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
@@ -894,7 +894,11 @@ static int __devinit snd_msnd_isa_probe(struct device *pdev, unsigned int idx)
        struct snd_card *card;
        struct snd_msnd *chip;
 
-       if (has_isapnp(idx) || cfg[idx] == SNDRV_AUTO_PORT) {
+       if (has_isapnp(idx)
+#ifndef MSND_CLASSIC
+           || cfg[idx] == SNDRV_AUTO_PORT
+#endif
+           ) {
                printk(KERN_INFO LOGNAME ": Assuming PnP mode\n");
                return -ENODEV;
        }
index 3827092cc1d2802e0902a2c809032cac6f6ef9da..14829210ef0bf4b1bac018d4f5e31a1d1c4728b9 100644 (file)
@@ -4536,7 +4536,7 @@ int snd_hda_parse_pin_def_config(struct hda_codec *codec,
                        cfg->hp_outs--;
                        memmove(cfg->hp_pins + i, cfg->hp_pins + i + 1,
                                sizeof(cfg->hp_pins[0]) * (cfg->hp_outs - i));
-                       memmove(sequences_hp + i - 1, sequences_hp + i,
+                       memmove(sequences_hp + i, sequences_hp + i + 1,
                                sizeof(sequences_hp[0]) * (cfg->hp_outs - i));
                }
        }
index 1053fff4bd0a7bc5b1dbe07de0c1a7a3d0df68db..34940a07905192590a056efe734e155d66f575f7 100644 (file)
@@ -126,6 +126,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"
                         "{Intel, ICH10},"
                         "{Intel, PCH},"
                         "{Intel, CPT},"
+                        "{Intel, PBG},"
                         "{Intel, SCH},"
                         "{ATI, SB450},"
                         "{ATI, SB600},"
@@ -2749,6 +2750,8 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
        { PCI_DEVICE(0x8086, 0x3b57), .driver_data = AZX_DRIVER_ICH },
        /* CPT */
        { PCI_DEVICE(0x8086, 0x1c20), .driver_data = AZX_DRIVER_PCH },
+       /* PBG */
+       { PCI_DEVICE(0x8086, 0x1d20), .driver_data = AZX_DRIVER_PCH },
        /* SCH */
        { PCI_DEVICE(0x8086, 0x811b), .driver_data = AZX_DRIVER_SCH },
        /* ATI SB 450/600 */
index b697fd2a6f8b8cd19a84840fd8e05369226981db..10bbbaf6ebc3d2b0531ba2fb1ab7e3f2be1f8994 100644 (file)
@@ -3641,6 +3641,7 @@ static struct snd_pci_quirk ad1984_cfg_tbl[] = {
        /* Lenovo Thinkpad T61/X61 */
        SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1984_THINKPAD),
        SND_PCI_QUIRK(0x1028, 0x0214, "Dell T3400", AD1984_DELL_DESKTOP),
+       SND_PCI_QUIRK(0x1028, 0x0233, "Dell Latitude E6400", AD1984_DELL_DESKTOP),
        {}
 };
 
index 4ef5efaaaef1a81d29290d5d9618097634957c10..488fd9ade1ba2bf7b306bf6e48ae106ef823294b 100644 (file)
@@ -972,6 +972,53 @@ static struct hda_verb cs_coef_init_verbs[] = {
        {} /* terminator */
 };
 
+/* Errata: CS4207 rev C0/C1/C2 Silicon
+ *
+ * http://www.cirrus.com/en/pubs/errata/ER880C3.pdf
+ *
+ * 6. At high temperature (TA > +85°C), the digital supply current (IVD)
+ * may be excessive (up to an additional 200 Î¼A), which is most easily
+ * observed while the part is being held in reset (RESET# active low).
+ *
+ * Root Cause: At initial powerup of the device, the logic that drives
+ * the clock and write enable to the S/PDIF SRC RAMs is not properly
+ * initialized.
+ * Certain random patterns will cause a steady leakage current in those
+ * RAM cells. The issue will resolve once the SRCs are used (turned on).
+ *
+ * Workaround: The following verb sequence briefly turns on the S/PDIF SRC
+ * blocks, which will alleviate the issue.
+ */
+
+static struct hda_verb cs_errata_init_verbs[] = {
+       {0x01, AC_VERB_SET_POWER_STATE, 0x00}, /* AFG: D0 */
+       {0x11, AC_VERB_SET_PROC_STATE, 0x01},  /* VPW: processing on */
+
+       {0x11, AC_VERB_SET_COEF_INDEX, 0x0008},
+       {0x11, AC_VERB_SET_PROC_COEF, 0x9999},
+       {0x11, AC_VERB_SET_COEF_INDEX, 0x0017},
+       {0x11, AC_VERB_SET_PROC_COEF, 0xa412},
+       {0x11, AC_VERB_SET_COEF_INDEX, 0x0001},
+       {0x11, AC_VERB_SET_PROC_COEF, 0x0009},
+
+       {0x07, AC_VERB_SET_POWER_STATE, 0x00}, /* S/PDIF Rx: D0 */
+       {0x08, AC_VERB_SET_POWER_STATE, 0x00}, /* S/PDIF Tx: D0 */
+
+       {0x11, AC_VERB_SET_COEF_INDEX, 0x0017},
+       {0x11, AC_VERB_SET_PROC_COEF, 0x2412},
+       {0x11, AC_VERB_SET_COEF_INDEX, 0x0008},
+       {0x11, AC_VERB_SET_PROC_COEF, 0x0000},
+       {0x11, AC_VERB_SET_COEF_INDEX, 0x0001},
+       {0x11, AC_VERB_SET_PROC_COEF, 0x0008},
+       {0x11, AC_VERB_SET_PROC_STATE, 0x00},
+
+       {0x07, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Rx: D3 */
+       {0x08, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Tx: D3 */
+       /*{0x01, AC_VERB_SET_POWER_STATE, 0x03},*/ /* AFG: D3 This is already handled */
+
+       {} /* terminator */
+};
+
 /* SPDIF setup */
 static void init_digital(struct hda_codec *codec)
 {
@@ -991,6 +1038,9 @@ static int cs_init(struct hda_codec *codec)
 {
        struct cs_spec *spec = codec->spec;
 
+       /* init_verb sequence for C0/C1/C2 errata*/
+       snd_hda_sequence_write(codec, cs_errata_init_verbs);
+
        snd_hda_sequence_write(codec, cs_coef_init_verbs);
 
        if (spec->gpio_mask) {
index 5cdb80edbd7f06d0cc00db6dd43cc325a1ef7779..972e7c453b3d6c8320d58b6aebe6ef41fc259920 100644 (file)
@@ -116,6 +116,7 @@ struct conexant_spec {
        unsigned int dell_vostro:1;
        unsigned int ideapad:1;
        unsigned int thinkpad:1;
+       unsigned int hp_laptop:1;
 
        unsigned int ext_mic_present;
        unsigned int recording;
@@ -2299,6 +2300,18 @@ static void cxt5066_ideapad_automic(struct hda_codec *codec)
        }
 }
 
+/* toggle input of built-in digital mic and mic jack appropriately */
+static void cxt5066_hp_laptop_automic(struct hda_codec *codec)
+{
+       unsigned int present;
+
+       present = snd_hda_jack_detect(codec, 0x1b);
+       snd_printdd("CXT5066: external microphone present=%d\n", present);
+       snd_hda_codec_write(codec, 0x17, 0, AC_VERB_SET_CONNECT_SEL,
+                           present ? 1 : 3);
+}
+
+
 /* toggle input of built-in digital mic and mic jack appropriately
    order is: external mic -> dock mic -> interal mic */
 static void cxt5066_thinkpad_automic(struct hda_codec *codec)
@@ -2407,6 +2420,20 @@ static void cxt5066_ideapad_event(struct hda_codec *codec, unsigned int res)
        }
 }
 
+/* unsolicited event for jack sensing */
+static void cxt5066_hp_laptop_event(struct hda_codec *codec, unsigned int res)
+{
+       snd_printdd("CXT5066_hp_laptop: unsol event %x (%x)\n", res, res >> 26);
+       switch (res >> 26) {
+       case CONEXANT_HP_EVENT:
+               cxt5066_hp_automute(codec);
+               break;
+       case CONEXANT_MIC_EVENT:
+               cxt5066_hp_laptop_automic(codec);
+               break;
+       }
+}
+
 /* unsolicited event for jack sensing */
 static void cxt5066_thinkpad_event(struct hda_codec *codec, unsigned int res)
 {
@@ -2989,6 +3016,14 @@ static struct hda_verb cxt5066_init_verbs_portd_lo[] = {
        { } /* end */
 };
 
+
+static struct hda_verb cxt5066_init_verbs_hp_laptop[] = {
+       {0x14, AC_VERB_SET_CONNECT_SEL, 0x0},
+       {0x19, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_HP_EVENT},
+       {0x1b, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_MIC_EVENT},
+       { } /* end */
+};
+
 /* initialize jack-sensing, too */
 static int cxt5066_init(struct hda_codec *codec)
 {
@@ -3004,6 +3039,8 @@ static int cxt5066_init(struct hda_codec *codec)
                        cxt5066_ideapad_automic(codec);
                else if (spec->thinkpad)
                        cxt5066_thinkpad_automic(codec);
+               else if (spec->hp_laptop)
+                       cxt5066_hp_laptop_automic(codec);
        }
        cxt5066_set_mic_boost(codec);
        return 0;
@@ -3031,6 +3068,7 @@ enum {
        CXT5066_DELL_VOSTO,     /* Dell Vostro 1015i */
        CXT5066_IDEAPAD,        /* Lenovo IdeaPad U150 */
        CXT5066_THINKPAD,       /* Lenovo ThinkPad T410s, others? */
+       CXT5066_HP_LAPTOP,      /* HP Laptop */
        CXT5066_MODELS
 };
 
@@ -3041,6 +3079,7 @@ static const char *cxt5066_models[CXT5066_MODELS] = {
        [CXT5066_DELL_VOSTO]    = "dell-vostro",
        [CXT5066_IDEAPAD]       = "ideapad",
        [CXT5066_THINKPAD]      = "thinkpad",
+       [CXT5066_HP_LAPTOP]     = "hp-laptop",
 };
 
 static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
@@ -3052,8 +3091,11 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTO),
        SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO),
        SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
+       SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP),
+       SND_PCI_QUIRK(0x1179, 0xff1e, "Toshiba Satellite C650D", CXT5066_IDEAPAD),
        SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
        SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5),
+       SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD),
        SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD),
        SND_PCI_QUIRK(0x17aa, 0x21b3, "Thinkpad Edge 13 (197)", CXT5066_IDEAPAD),
        SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD),
@@ -3116,6 +3158,23 @@ static int patch_cxt5066(struct hda_codec *codec)
                spec->num_init_verbs++;
                spec->dell_automute = 1;
                break;
+       case CXT5066_HP_LAPTOP:
+               codec->patch_ops.init = cxt5066_init;
+               codec->patch_ops.unsol_event = cxt5066_hp_laptop_event;
+               spec->init_verbs[spec->num_init_verbs] =
+                       cxt5066_init_verbs_hp_laptop;
+               spec->num_init_verbs++;
+               spec->hp_laptop = 1;
+               spec->mixers[spec->num_mixers++] = cxt5066_mixer_master;
+               spec->mixers[spec->num_mixers++] = cxt5066_mixers;
+               /* no S/PDIF out */
+               spec->multiout.dig_out_nid = 0;
+               /* input source automatically selected */
+               spec->input_mux = NULL;
+               spec->port_d_mode = 0;
+               spec->mic_boost = 3; /* default 30dB gain */
+               break;
+
        case CXT5066_OLPC_XO_1_5:
                codec->patch_ops.init = cxt5066_olpc_init;
                codec->patch_ops.unsol_event = cxt5066_olpc_unsol_event;
index 69b950d527c31d966846a3c6e8f0ff30bdbe886c..baa108b9d6aacaf88be2999aa35515d2400bb9fd 100644 (file)
@@ -84,7 +84,7 @@ static struct hda_verb nvhdmi_basic_init_7x[] = {
 #else
 /* support all rates and formats */
 #define SUPPORTED_RATES \
-       (SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\
+       (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\
        SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |\
         SNDRV_PCM_RATE_192000)
 #define SUPPORTED_MAXBPS       24
index 627bf99633681483242a7559a9eaef878c3f7605..a432e6efd19bbe7bbce3ad9b6607c44661f912cf 100644 (file)
@@ -1594,12 +1594,22 @@ static void alc_auto_parse_digital(struct hda_codec *codec)
        }
 
        if (spec->autocfg.dig_in_pin) {
-               hda_nid_t dig_nid;
-               err = snd_hda_get_connections(codec,
-                                             spec->autocfg.dig_in_pin,
-                                             &dig_nid, 1);
-               if (err > 0)
-                       spec->dig_in_nid = dig_nid;
+               dig_nid = codec->start_nid;
+               for (i = 0; i < codec->num_nodes; i++, dig_nid++) {
+                       unsigned int wcaps = get_wcaps(codec, dig_nid);
+                       if (get_wcaps_type(wcaps) != AC_WID_AUD_IN)
+                               continue;
+                       if (!(wcaps & AC_WCAP_DIGITAL))
+                               continue;
+                       if (!(wcaps & AC_WCAP_CONN_LIST))
+                               continue;
+                       err = get_connection_index(codec, dig_nid,
+                                                  spec->autocfg.dig_in_pin);
+                       if (err >= 0) {
+                               spec->dig_in_nid = dig_nid;
+                               break;
+                       }
+               }
        }
 }
 
@@ -5334,6 +5344,7 @@ static void fillup_priv_adc_nids(struct hda_codec *codec, hda_nid_t *nids,
 
 static struct snd_pci_quirk beep_white_list[] = {
        SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1),
+       SND_PCI_QUIRK(0x1043, 0x83ce, "EeePC", 1),
        SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1),
        {}
 };
@@ -14452,6 +14463,7 @@ static void alc269_auto_init(struct hda_codec *codec)
 
 enum {
        ALC269_FIXUP_SONY_VAIO,
+       ALC269_FIXUP_DELL_M101Z,
 };
 
 static const struct hda_verb alc269_sony_vaio_fixup_verbs[] = {
@@ -14463,11 +14475,20 @@ static const struct alc_fixup alc269_fixups[] = {
        [ALC269_FIXUP_SONY_VAIO] = {
                .verbs = alc269_sony_vaio_fixup_verbs
        },
+       [ALC269_FIXUP_DELL_M101Z] = {
+               .verbs = (const struct hda_verb[]) {
+                       /* Enables internal speaker */
+                       {0x20, AC_VERB_SET_COEF_INDEX, 13},
+                       {0x20, AC_VERB_SET_PROC_COEF, 0x4040},
+                       {}
+               }
+       },
 };
 
 static struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x104d, 0x9071, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
        SND_PCI_QUIRK(0x104d, 0x9077, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
+       SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
        {}
 };
 
index 289cb4dacfc79ec012b6efb174054efaa0afe855..6c0a11adb2a84511b83b23b69d5efce9fb2085c6 100644 (file)
@@ -543,6 +543,10 @@ static int __devinit get_oxygen_model(struct oxygen *chip,
                chip->model.suspend = claro_suspend;
                chip->model.resume = claro_resume;
                chip->model.set_adc_params = set_ak5385_params;
+               chip->model.device_config = PLAYBACK_0_TO_I2S |
+                                           PLAYBACK_1_TO_SPDIF |
+                                           CAPTURE_0_FROM_I2S_2 |
+                                           CAPTURE_1_FROM_SPDIF;
                break;
        }
        if (id->driver_data == MODEL_MERIDIAN ||
index 6147216af74412f5ff47af36b04ce9fd51013f6b..a3409edcfb5094791c33563f8ad3911826ce3e26 100644 (file)
@@ -155,6 +155,7 @@ void oxygen_pci_remove(struct pci_dev *pci);
 int oxygen_pci_suspend(struct pci_dev *pci, pm_message_t state);
 int oxygen_pci_resume(struct pci_dev *pci);
 #endif
+void oxygen_pci_shutdown(struct pci_dev *pci);
 
 /* oxygen_mixer.c */
 
index fad03d64e3ad0c936aca79a257f6c484b067f657..7e93cf884437d0b5844c9515250c49994390e54c 100644 (file)
@@ -519,16 +519,21 @@ static void oxygen_init(struct oxygen *chip)
        }
 }
 
-static void oxygen_card_free(struct snd_card *card)
+static void oxygen_shutdown(struct oxygen *chip)
 {
-       struct oxygen *chip = card->private_data;
-
        spin_lock_irq(&chip->reg_lock);
        chip->interrupt_mask = 0;
        chip->pcm_running = 0;
        oxygen_write16(chip, OXYGEN_DMA_STATUS, 0);
        oxygen_write16(chip, OXYGEN_INTERRUPT_MASK, 0);
        spin_unlock_irq(&chip->reg_lock);
+}
+
+static void oxygen_card_free(struct snd_card *card)
+{
+       struct oxygen *chip = card->private_data;
+
+       oxygen_shutdown(chip);
        if (chip->irq >= 0)
                free_irq(chip->irq, chip);
        flush_scheduled_work();
@@ -778,3 +783,13 @@ int oxygen_pci_resume(struct pci_dev *pci)
 }
 EXPORT_SYMBOL(oxygen_pci_resume);
 #endif /* CONFIG_PM */
+
+void oxygen_pci_shutdown(struct pci_dev *pci)
+{
+       struct snd_card *card = pci_get_drvdata(pci);
+       struct oxygen *chip = card->private_data;
+
+       oxygen_shutdown(chip);
+       chip->model.cleanup(chip);
+}
+EXPORT_SYMBOL(oxygen_pci_shutdown);
index f03a2f2cffee88911e9c11db08c5dd7dd8087b55..06c863e86e3d3d24a5dfd0f7b43f56a47b120719 100644 (file)
@@ -95,6 +95,7 @@ static struct pci_driver xonar_driver = {
        .suspend = oxygen_pci_suspend,
        .resume = oxygen_pci_resume,
 #endif
+       .shutdown = oxygen_pci_shutdown,
 };
 
 static int __init alsa_card_xonar_init(void)
index dbc4b89d74e43ffa3311af7c9386fb5aae38d9a9..b82c1cfa96f5334554435a6eeebc6e82fa63857d 100644 (file)
@@ -53,6 +53,8 @@ struct xonar_wm87x6 {
        struct xonar_generic generic;
        u16 wm8776_regs[0x17];
        u16 wm8766_regs[0x10];
+       struct snd_kcontrol *line_adcmux_control;
+       struct snd_kcontrol *mic_adcmux_control;
        struct snd_kcontrol *lc_controls[13];
 };
 
@@ -193,6 +195,7 @@ static void xonar_ds_init(struct oxygen *chip)
 static void xonar_ds_cleanup(struct oxygen *chip)
 {
        xonar_disable_output(chip);
+       wm8776_write(chip, WM8776_RESET, 0);
 }
 
 static void xonar_ds_suspend(struct oxygen *chip)
@@ -603,6 +606,7 @@ static int wm8776_input_mux_put(struct snd_kcontrol *ctl,
 {
        struct oxygen *chip = ctl->private_data;
        struct xonar_wm87x6 *data = chip->model_data;
+       struct snd_kcontrol *other_ctl;
        unsigned int mux_bit = ctl->private_value;
        u16 reg;
        int changed;
@@ -610,8 +614,18 @@ static int wm8776_input_mux_put(struct snd_kcontrol *ctl,
        mutex_lock(&chip->mutex);
        reg = data->wm8776_regs[WM8776_ADCMUX];
        if (value->value.integer.value[0]) {
-               reg &= ~0x003;
                reg |= mux_bit;
+               /* line-in and mic-in are exclusive */
+               mux_bit ^= 3;
+               if (reg & mux_bit) {
+                       reg &= ~mux_bit;
+                       if (mux_bit == 1)
+                               other_ctl = data->line_adcmux_control;
+                       else
+                               other_ctl = data->mic_adcmux_control;
+                       snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
+                                      &other_ctl->id);
+               }
        } else
                reg &= ~mux_bit;
        changed = reg != data->wm8776_regs[WM8776_ADCMUX];
@@ -963,7 +977,13 @@ static int xonar_ds_mixer_init(struct oxygen *chip)
                err = snd_ctl_add(chip->card, ctl);
                if (err < 0)
                        return err;
+               if (!strcmp(ctl->id.name, "Line Capture Switch"))
+                       data->line_adcmux_control = ctl;
+               else if (!strcmp(ctl->id.name, "Mic Capture Switch"))
+                       data->mic_adcmux_control = ctl;
        }
+       if (!data->line_adcmux_control || !data->mic_adcmux_control)
+               return -ENXIO;
        BUILD_BUG_ON(ARRAY_SIZE(lc_controls) != ARRAY_SIZE(data->lc_controls));
        for (i = 0; i < ARRAY_SIZE(lc_controls); ++i) {
                ctl = snd_ctl_new1(&lc_controls[i], chip);
index b92adef8e81e7ae61eef5681925610b87f03a502..d6fa7bfd9aa123d7f8bb8142a8def39888129434 100644 (file)
@@ -4609,6 +4609,7 @@ static int snd_hdsp_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigne
                if (err < 0)
                        return err;
 
+               memset(&info, 0, sizeof(info));
                spin_lock_irqsave(&hdsp->lock, flags);
                info.pref_sync_ref = (unsigned char)hdsp_pref_sync_ref(hdsp);
                info.wordclock_sync_check = (unsigned char)hdsp_wc_sync_check(hdsp);
index 547b713d720449a7bdd746ca04fbf5a6cbe84931..0c98ef9156d8fd919f81711fe7b798be7c0ca7d8 100644 (file)
@@ -4127,6 +4127,7 @@ static int snd_hdspm_hwdep_ioctl(struct snd_hwdep * hw, struct file *file,
 
        case SNDRV_HDSPM_IOCTL_GET_CONFIG_INFO:
 
+               memset(&info, 0, sizeof(info));
                spin_lock_irq(&hdspm->lock);
                info.pref_sync_ref = hdspm_pref_sync_ref(hdspm);
                info.wordclock_sync_check = hdspm_wc_sync_check(hdspm);
index 2f12da4da561f6eeec98a028d7163c68e112b5dd..581a670e826192ee7267859144cc70e89f9a7a8e 100644 (file)
@@ -579,7 +579,7 @@ static int snd_ps3_delay_to_bytes(struct snd_pcm_substream *substream,
                                  rate * delay_ms / 1000)
                * substream->runtime->channels;
 
-       pr_debug(KERN_ERR "%s: time=%d rate=%d bytes=%ld, frames=%d, ret=%d\n",
+       pr_debug("%s: time=%d rate=%d bytes=%ld, frames=%d, ret=%d\n",
                 __func__,
                 delay_ms,
                 rate,
index 1b61c23ff300be0ef2c593cf5e813e20aeb2cb5d..f1b1bc4bacfb7134d588469cbcd0b5032747f0ae 100644 (file)
@@ -94,8 +94,7 @@ static void s3c_dma_enqueue(struct snd_pcm_substream *substream)
 
                if ((pos + len) > prtd->dma_end) {
                        len  = prtd->dma_end - pos;
-                       pr_debug(KERN_DEBUG "%s: corrected dma len %ld\n",
-                              __func__, len);
+                       pr_debug("%s: corrected dma len %ld\n", __func__, len);
                }
 
                ret = s3c2410_dma_enqueue(prtd->params->channel,
index b823a5c9b9bc81d81b8f64f4847483eae3f253bc..87e2b7fcbf176d9f429506b285dcc2f6ac05d7bc 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/firmware.h>
 #include <linux/module.h>
 
+#include <asm/clkdev.h>
 #include <asm/clock.h>
 
 #include <cpu/sh7722.h>
@@ -40,12 +41,12 @@ static struct clk_ops siumckb_clk_ops = {
 };
 
 static struct clk siumckb_clk = {
-       .name           = "siumckb_clk",
-       .id             = -1,
        .ops            = &siumckb_clk_ops,
        .rate           = 0, /* initialised at run-time */
 };
 
+static struct clk_lookup *siumckb_lookup;
+
 static int migor_hw_params(struct snd_pcm_substream *substream,
                           struct snd_pcm_hw_params *params)
 {
@@ -180,6 +181,13 @@ static int __init migor_init(void)
        if (ret < 0)
                return ret;
 
+       siumckb_lookup = clkdev_alloc(&siumckb_clk, "siumckb_clk", NULL);
+       if (!siumckb_lookup) {
+               ret = -ENOMEM;
+               goto eclkdevalloc;
+       }
+       clkdev_add(siumckb_lookup);
+
        /* Port number used on this machine: port B */
        migor_snd_device = platform_device_alloc("soc-audio", 1);
        if (!migor_snd_device) {
@@ -200,12 +208,15 @@ static int __init migor_init(void)
 epdevadd:
        platform_device_put(migor_snd_device);
 epdevalloc:
+       clkdev_drop(siumckb_lookup);
+eclkdevalloc:
        clk_unregister(&siumckb_clk);
        return ret;
 }
 
 static void __exit migor_exit(void)
 {
+       clkdev_drop(siumckb_lookup);
        clk_unregister(&siumckb_clk);
        platform_device_unregister(migor_snd_device);
 }
index adbc68ce90508221cc919121fc19e062fd309f44..f6b0d2829ea96d438d1e84550272c6a7bda80b4c 100644 (file)
@@ -203,8 +203,9 @@ static int snd_soc_8_16_write(struct snd_soc_codec *codec, unsigned int reg,
        data[1] = (value >> 8) & 0xff;
        data[2] = value & 0xff;
 
-       if (!snd_soc_codec_volatile_register(codec, reg))
-               reg_cache[reg] = value;
+       if (!snd_soc_codec_volatile_register(codec, reg)
+               && reg < codec->reg_cache_size)
+                       reg_cache[reg] = value;
 
        if (codec->cache_only) {
                codec->cache_sync = 1;
index 9feb00c831a02b791227b75294e6dc8e15975f1f..4eabafa5b037db66b250db64cc0ce05aa783f595 100644 (file)
@@ -126,7 +126,7 @@ static void snd_usb_stream_disconnect(struct list_head *head)
        for (idx = 0; idx < 2; idx++) {
                subs = &as->substream[idx];
                if (!subs->num_formats)
-                       return;
+                       continue;
                snd_usb_release_substream_urbs(subs, 1);
                subs->interface = -1;
        }
@@ -216,6 +216,11 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
        }
 
        switch (protocol) {
+       default:
+               snd_printdd(KERN_WARNING "unknown interface protocol %#02x, assuming v1\n",
+                           protocol);
+               /* fall through */
+
        case UAC_VERSION_1: {
                struct uac1_ac_header_descriptor *h1 = control_header;
 
@@ -253,10 +258,6 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
 
                break;
        }
-
-       default:
-               snd_printk(KERN_ERR "unknown protocol version 0x%02x\n", protocol);
-               return -EINVAL;
        }
 
        return 0;
@@ -465,7 +466,13 @@ static void *snd_usb_audio_probe(struct usb_device *dev,
                        goto __error;
        }
 
-       chip->ctrl_intf = alts;
+       /*
+        * For devices with more than one control interface, we assume the
+        * first contains the audio controls. We might need a more specific
+        * check here in the future.
+        */
+       if (!chip->ctrl_intf)
+               chip->ctrl_intf = alts;
 
        if (err > 0) {
                /* create normal USB audio interfaces */
index b853f8df794f6a35ceb8e39576abf3d1cb91e36c..7754a10345451109a9ff9bd03d271148160861b9 100644 (file)
@@ -295,12 +295,11 @@ int snd_usb_init_sample_rate(struct snd_usb_audio *chip, int iface,
 
        switch (altsd->bInterfaceProtocol) {
        case UAC_VERSION_1:
+       default:
                return set_sample_rate_v1(chip, iface, alts, fmt, rate);
 
        case UAC_VERSION_2:
                return set_sample_rate_v2(chip, iface, alts, fmt, rate);
        }
-
-       return -EINVAL;
 }
 
index 1a701f1e8f501b358029f84bedcbe1268e9f543f..ef0a07e34844ae4d53ffd12e54b16a203da02a19 100644 (file)
@@ -275,6 +275,12 @@ int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no)
 
                /* get audio formats */
                switch (protocol) {
+               default:
+                       snd_printdd(KERN_WARNING "%d:%u:%d: unknown interface protocol %#02x, assuming v1\n",
+                                   dev->devnum, iface_no, altno, protocol);
+                       protocol = UAC_VERSION_1;
+                       /* fall through */
+
                case UAC_VERSION_1: {
                        struct uac1_as_header_descriptor *as =
                                snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_AS_GENERAL);
@@ -336,11 +342,6 @@ int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no)
                                   dev->devnum, iface_no, altno, as->bTerminalLink);
                        continue;
                }
-
-               default:
-                       snd_printk(KERN_ERR "%d:%u:%d : unknown interface protocol %04x\n",
-                                  dev->devnum, iface_no, altno, protocol);
-                       continue;
                }
 
                /* get format type */
index 3a1375459c06a49d0d48a309460af30a0fcfd8c0..69148212aa70e66f9e1193040bfb41aad5548b6b 100644 (file)
@@ -49,7 +49,8 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
        u64 pcm_formats;
 
        switch (protocol) {
-       case UAC_VERSION_1: {
+       case UAC_VERSION_1:
+       default: {
                struct uac_format_type_i_discrete_descriptor *fmt = _fmt;
                sample_width = fmt->bBitResolution;
                sample_bytes = fmt->bSubframeSize;
@@ -64,9 +65,6 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
                format <<= 1;
                break;
        }
-
-       default:
-               return -EINVAL;
        }
 
        pcm_formats = 0;
@@ -384,6 +382,10 @@ static int parse_audio_format_i(struct snd_usb_audio *chip,
         * audio class v2 uses class specific EP0 range requests for that.
         */
        switch (protocol) {
+       default:
+               snd_printdd(KERN_WARNING "%d:%u:%d : invalid protocol version %d, assuming v1\n",
+                          chip->dev->devnum, fp->iface, fp->altsetting, protocol);
+               /* fall through */
        case UAC_VERSION_1:
                fp->channels = fmt->bNrChannels;
                ret = parse_audio_format_rates_v1(chip, fp, (unsigned char *) fmt, 7);
@@ -392,10 +394,6 @@ static int parse_audio_format_i(struct snd_usb_audio *chip,
                /* fp->channels is already set in this case */
                ret = parse_audio_format_rates_v2(chip, fp);
                break;
-       default:
-               snd_printk(KERN_ERR "%d:%u:%d : invalid protocol version %d\n",
-                          chip->dev->devnum, fp->iface, fp->altsetting, protocol);
-               return -EINVAL;
        }
 
        if (fp->channels < 1) {
@@ -438,6 +436,10 @@ static int parse_audio_format_ii(struct snd_usb_audio *chip,
        fp->channels = 1;
 
        switch (protocol) {
+       default:
+               snd_printdd(KERN_WARNING "%d:%u:%d : invalid protocol version %d, assuming v1\n",
+                          chip->dev->devnum, fp->iface, fp->altsetting, protocol);
+               /* fall through */
        case UAC_VERSION_1: {
                struct uac_format_type_ii_discrete_descriptor *fmt = _fmt;
                brate = le16_to_cpu(fmt->wMaxBitRate);
@@ -456,10 +458,6 @@ static int parse_audio_format_ii(struct snd_usb_audio *chip,
                ret = parse_audio_format_rates_v2(chip, fp);
                break;
        }
-       default:
-               snd_printk(KERN_ERR "%d:%u:%d : invalid protocol version %d\n",
-                          chip->dev->devnum, fp->iface, fp->altsetting, protocol);
-               return -EINVAL;
        }
 
        return ret;
index c166db0057d3e0e613954b21f8ca5274f0242741..3ed3901369ce1a9ca58473ecef450623cf95a7b2 100644 (file)
@@ -2175,7 +2175,15 @@ int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif,
        }
 
        host_iface = &usb_ifnum_to_if(chip->dev, ctrlif)->altsetting[0];
-       mixer->protocol = get_iface_desc(host_iface)->bInterfaceProtocol;
+       switch (get_iface_desc(host_iface)->bInterfaceProtocol) {
+       case UAC_VERSION_1:
+       default:
+               mixer->protocol = UAC_VERSION_1;
+               break;
+       case UAC_VERSION_2:
+               mixer->protocol = UAC_VERSION_2;
+               break;
+       }
 
        if ((err = snd_usb_mixer_controls(mixer)) < 0 ||
            (err = snd_usb_mixer_status_create(mixer)) < 0)
index 3634cedf93061629619125690fb19dc7d0beae64..3b5135c930628fc092cad54e67ce2a7645c59c02 100644 (file)
@@ -173,13 +173,12 @@ int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface,
 
        switch (altsd->bInterfaceProtocol) {
        case UAC_VERSION_1:
+       default:
                return init_pitch_v1(chip, iface, alts, fmt);
 
        case UAC_VERSION_2:
                return init_pitch_v2(chip, iface, alts, fmt);
        }
-
-       return -EINVAL;
 }
 
 /*
index 4f1fa77c1feb0b7a854ab8a85bd21682cbc66377..1950e19af1cf6b23be56f0250bcf8cbb52b3d9f3 100644 (file)
@@ -1017,7 +1017,7 @@ builtin-revert.o wt-status.o: wt-status.h
 # we compile into subdirectories. if the target directory is not the source directory, they might not exists. So
 # we depend the various files onto their directories.
 DIRECTORY_DEPS = $(LIB_OBJS) $(BUILTIN_OBJS) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h
-$(DIRECTORY_DEPS): $(sort $(dir $(DIRECTORY_DEPS)))
+$(DIRECTORY_DEPS): $(sort $(dir $(DIRECTORY_DEPS)))
 # In the second step, we make a rule to actually create these directories
 $(sort $(dir $(DIRECTORY_DEPS))):
        $(QUIET_MKDIR)$(MKDIR) -p $@ 2>/dev/null
index 624a96c636fdbc36a472ecaadf5fcb72c226bf38..6de4313924fb5c510f354cb3a8e2a73061e103f6 100644 (file)
@@ -50,6 +50,7 @@ static inline void callchain_init(struct callchain_node *node)
        INIT_LIST_HEAD(&node->children);
        INIT_LIST_HEAD(&node->val);
 
+       node->children_hit = 0;
        node->parent = NULL;
        node->hit = 0;
 }
index e72f05c3bef09258311f7192afb179a10657c450..fcc16e4349df9f3353ac03f975d9c1be9938863a 100644 (file)
@@ -1539,6 +1539,7 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev,
                goto error;
        }
        tev->point.offset = pev->point.offset;
+       tev->point.retprobe = pev->point.retprobe;
        tev->nargs = pev->nargs;
        if (tev->nargs) {
                tev->args = zalloc(sizeof(struct probe_trace_arg)
index 525136684d4ec170201b4d07c7b080b73851edf9..32b81f707ff5eb5d950a2d233d6c00c6b90fedb2 100644 (file)
@@ -686,6 +686,25 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf)
        char buf[32], *ptr;
        int ret, nscopes;
 
+       if (!is_c_varname(pf->pvar->var)) {
+               /* Copy raw parameters */
+               pf->tvar->value = strdup(pf->pvar->var);
+               if (pf->tvar->value == NULL)
+                       return -ENOMEM;
+               if (pf->pvar->type) {
+                       pf->tvar->type = strdup(pf->pvar->type);
+                       if (pf->tvar->type == NULL)
+                               return -ENOMEM;
+               }
+               if (pf->pvar->name) {
+                       pf->tvar->name = strdup(pf->pvar->name);
+                       if (pf->tvar->name == NULL)
+                               return -ENOMEM;
+               } else
+                       pf->tvar->name = NULL;
+               return 0;
+       }
+
        if (pf->pvar->name)
                pf->tvar->name = strdup(pf->pvar->name);
        else {
@@ -700,19 +719,6 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf)
        if (pf->tvar->name == NULL)
                return -ENOMEM;
 
-       if (!is_c_varname(pf->pvar->var)) {
-               /* Copy raw parameters */
-               pf->tvar->value = strdup(pf->pvar->var);
-               if (pf->tvar->value == NULL)
-                       return -ENOMEM;
-               if (pf->pvar->type) {
-                       pf->tvar->type = strdup(pf->pvar->type);
-                       if (pf->tvar->type == NULL)
-                               return -ENOMEM;
-               }
-               return 0;
-       }
-
        pr_debug("Searching '%s' variable in context.\n",
                 pf->pvar->var);
        /* Search child die for local variables and parameters. */
@@ -783,6 +789,16 @@ static int convert_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf)
                /* This function has no name. */
                tev->point.offset = (unsigned long)pf->addr;
 
+       /* Return probe must be on the head of a subprogram */
+       if (pf->pev->point.retprobe) {
+               if (tev->point.offset != 0) {
+                       pr_warning("Return probe must be on the head of"
+                                  " a real function\n");
+                       return -EINVAL;
+               }
+               tev->point.retprobe = true;
+       }
+
        pr_debug("Probe point found: %s+%lu\n", tev->point.symbol,
                 tev->point.offset);
 
index 1a367734e01693c8a93f79bb3846af4a89f9cffe..b2f5ae97f33dded1cdaba3e843ab2df888515777 100644 (file)
@@ -2268,6 +2268,9 @@ static int setup_list(struct strlist **list, const char *list_str,
 
 int symbol__init(void)
 {
+       if (symbol_conf.initialized)
+               return 0;
+
        elf_version(EV_CURRENT);
        if (symbol_conf.sort_by_name)
                symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
@@ -2293,6 +2296,7 @@ int symbol__init(void)
                       symbol_conf.sym_list_str, "symbol") < 0)
                goto out_free_comm_list;
 
+       symbol_conf.initialized = true;
        return 0;
 
 out_free_dso_list:
@@ -2304,11 +2308,14 @@ out_free_comm_list:
 
 void symbol__exit(void)
 {
+       if (!symbol_conf.initialized)
+               return;
        strlist__delete(symbol_conf.sym_list);
        strlist__delete(symbol_conf.dso_list);
        strlist__delete(symbol_conf.comm_list);
        vmlinux_path__exit();
        symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
+       symbol_conf.initialized = false;
 }
 
 int machines__create_kernel_maps(struct rb_root *self, pid_t pid)
index b7a8da4af5a0a98e84e1616e55cdaceec18fcdbf..ea95c2756f05e73d09f6868f24b174ed1221f59a 100644 (file)
@@ -69,7 +69,8 @@ struct symbol_conf {
                        show_nr_samples,
                        use_callchain,
                        exclude_other,
-                       show_cpu_utilization;
+                       show_cpu_utilization,
+                       initialized;
        const char      *vmlinux_name,
                        *source_prefix,
                        *field_sep;
index 7ea983acfaea521a425f7eb33250e319b0a3a960..f7af2fca965d5c206973f73ccd25b8b3607e02e5 100644 (file)
@@ -97,7 +97,7 @@ void setup_python_scripting(void)
        register_python_scripting(&python_scripting_unsupported_ops);
 }
 #else
-struct scripting_ops python_scripting_ops;
+extern struct scripting_ops python_scripting_ops;
 
 void setup_python_scripting(void)
 {
@@ -158,7 +158,7 @@ void setup_perl_scripting(void)
        register_perl_scripting(&perl_scripting_unsupported_ops);
 }
 #else
-struct scripting_ops perl_scripting_ops;
+extern struct scripting_ops perl_scripting_ops;
 
 void setup_perl_scripting(void)
 {
index dafdf6775d77f44d69abf1980b1a9cfe4ab053dc..6866aa4c41e09cfd0239559cc903506a351a549c 100644 (file)
@@ -773,7 +773,7 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
 
                        switch (key) {
                        case 'a':
-                               if (browser->selection->map == NULL &&
+                               if (browser->selection->map == NULL ||
                                    browser->selection->map->dso->annotate_warned)
                                        continue;
                                goto do_annotate;
index 66cf65b510b11c1c245db3b6a435e040ac3606fc..c1f1e3c6298462f8ed4d672257dac3b4da081b33 100644 (file)
@@ -218,7 +218,6 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
        events = file->f_op->poll(file, &irqfd->pt);
 
        list_add_tail(&irqfd->list, &kvm->irqfds.items);
-       spin_unlock_irq(&kvm->irqfds.lock);
 
        /*
         * Check if there was an event already pending on the eventfd
@@ -227,6 +226,8 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
        if (events & POLLIN)
                schedule_work(&irqfd->inject);
 
+       spin_unlock_irq(&kvm->irqfds.lock);
+
        /*
         * do not drop the file until the irqfd is fully initialized, otherwise
         * we might race against the POLLHUP
index b78b794c1039df394b6ccfc43ae34fb2ff6fc47a..5186e728c53ed7e27ba6d32df31d37f7aa140ebc 100644 (file)
@@ -1958,10 +1958,10 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
                       cpu);
                hardware_disable(NULL);
                break;
-       case CPU_ONLINE:
+       case CPU_STARTING:
                printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
                       cpu);
-               smp_call_function_single(cpu, hardware_enable, NULL, 1);
+               hardware_enable(NULL);
                break;
        }
        return NOTIFY_OK;
@@ -1970,10 +1970,12 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
 
 asmlinkage void kvm_handle_fault_on_reboot(void)
 {
-       if (kvm_rebooting)
+       if (kvm_rebooting) {
                /* spin while reset goes on */
+               local_irq_enable();
                while (true)
                        ;
+       }
        /* Fault while not rebooting.  We want the trace. */
        BUG();
 }
@@ -2096,7 +2098,6 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
 
 static struct notifier_block kvm_cpu_notifier = {
        .notifier_call = kvm_cpu_hotplug,
-       .priority = 20, /* must be > scheduler priority */
 };
 
 static int vm_stat_get(void *_offset, u64 *val)