]> git.karo-electronics.de Git - linux-beck.git/commitdiff
Merge branches 'perf-urgent-for-linus', 'sched-urgent-for-linus', 'timers-urgent...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 19 Jun 2011 16:00:18 +0000 (09:00 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 19 Jun 2011 16:00:18 +0000 (09:00 -0700)
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  tools/perf: Fix static build of perf tool
  tracing: Fix regression in printk_formats file

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  generic-ipi: Fix kexec boot crash by initializing call_single_queue before enabling interrupts

* 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  clocksource: Make watchdog robust vs. interruption
  timerfd: Fix wakeup of processes when timer is cancelled on clock change

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86, MAINTAINERS: Add x86 MCE people
  x86, efi: Do not reserve boot services regions within reserved areas

808 files changed:
CREDITS
Documentation/ABI/testing/sysfs-class-backlight-driver-adp8870 [new file with mode: 0644]
Documentation/accounting/cgroupstats.txt
Documentation/cgroups/blkio-controller.txt
Documentation/cgroups/cgroups.txt
Documentation/cgroups/cpuacct.txt
Documentation/cgroups/cpusets.txt
Documentation/cgroups/devices.txt
Documentation/cgroups/freezer-subsystem.txt
Documentation/cgroups/memory.txt
Documentation/feature-removal-schedule.txt
Documentation/filesystems/proc.txt
Documentation/kernel-parameters.txt
Documentation/kmemleak.txt
Documentation/md.txt
Documentation/printk-formats.txt
Documentation/scheduler/sched-design-CFS.txt
Documentation/scheduler/sched-rt-group.txt
Documentation/vm/hwpoison.txt
MAINTAINERS
Makefile
arch/alpha/kernel/osf_sys.c
arch/arm/boot/compressed/head.S
arch/arm/configs/davinci_all_defconfig
arch/arm/configs/netx_defconfig
arch/arm/configs/viper_defconfig
arch/arm/configs/xcep_defconfig
arch/arm/configs/zeus_defconfig
arch/arm/kernel/devtree.c
arch/arm/kernel/entry-armv.S
arch/arm/kernel/entry-common.S
arch/arm/kernel/traps.c
arch/arm/mach-davinci/devices-da8xx.c
arch/arm/mach-davinci/devices.c
arch/arm/mach-davinci/gpio.c
arch/arm/mach-ep93xx/core.c
arch/arm/mach-exynos4/Kconfig
arch/arm/mach-exynos4/Makefile
arch/arm/mach-exynos4/cpu.c
arch/arm/mach-exynos4/include/mach/regs-usb-phy.h
arch/arm/mach-exynos4/setup-usb-phy.c [moved from arch/arm/mach-exynos4/usb-phy.c with 100% similarity]
arch/arm/mach-exynos4/time.c
arch/arm/mach-footbridge/dc21285-timer.c
arch/arm/mach-footbridge/include/mach/debug-macro.S
arch/arm/mach-mxs/ocotp.c
arch/arm/mach-omap1/Makefile
arch/arm/mach-omap1/dma.c
arch/arm/mach-omap1/pm_bus.c
arch/arm/mach-omap2/board-2430sdp.c
arch/arm/mach-omap2/board-3430sdp.c
arch/arm/mach-omap2/board-4430sdp.c
arch/arm/mach-omap2/board-apollon.c
arch/arm/mach-omap2/board-cm-t35.c
arch/arm/mach-omap2/board-cm-t3517.c
arch/arm/mach-omap2/board-devkit8000.c
arch/arm/mach-omap2/board-omap3beagle.c
arch/arm/mach-omap2/board-omap3pandora.c
arch/arm/mach-omap2/board-omap3touchbook.c
arch/arm/mach-omap2/board-omap4panda.c
arch/arm/mach-omap2/board-overo.c
arch/arm/mach-omap2/board-rx51-peripherals.c
arch/arm/mach-omap2/board-zoom-display.c
arch/arm/mach-omap2/common-board-devices.c
arch/arm/mach-omap2/common-board-devices.h
arch/arm/mach-omap2/devices.c
arch/arm/mach-omap2/hsmmc.c
arch/arm/mach-omap2/hsmmc.h
arch/arm/mach-omap2/mux.c
arch/arm/mach-omap2/mux.h
arch/arm/mach-omap2/mux44xx.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod_44xx_data.c
arch/arm/mach-omap2/omap_phy_internal.c
arch/arm/mach-omap2/pm-debug.c
arch/arm/mach-pxa/spitz_pm.c
arch/arm/mach-s3c2410/Makefile
arch/arm/mach-s3c2410/irq.c [deleted file]
arch/arm/mach-s5pv210/cpufreq.c
arch/arm/mach-shmobile/board-ag5evm.c
arch/arm/mach-shmobile/board-ap4evb.c
arch/arm/mach-shmobile/board-mackerel.c
arch/arm/mach-shmobile/clock-sh7372.c
arch/arm/mach-shmobile/intc-sh73a0.c
arch/arm/mach-shmobile/setup-sh7367.c
arch/arm/mach-tegra/board-harmony-power.c
arch/arm/mach-tegra/board-harmony.h
arch/arm/mach-u300/clock.h
arch/arm/mach-u300/include/mach/u300-regs.h
arch/arm/mach-u300/timer.c
arch/arm/mach-ux500/cpu-db8500.c
arch/arm/mach-vexpress/v2m.c
arch/arm/mm/context.c
arch/arm/mm/init.c
arch/arm/mm/proc-arm7tdmi.S
arch/arm/mm/proc-arm9tdmi.S
arch/arm/mm/proc-v7.S
arch/arm/plat-mxc/devices/platform-imx-dma.c
arch/arm/plat-nomadik/include/plat/gpio.h
arch/arm/plat-omap/include/plat/flash.h
arch/arm/plat-omap/include/plat/iovmm.h
arch/arm/plat-omap/include/plat/mmc.h
arch/arm/plat-omap/iovmm.c
arch/arm/plat-omap/omap_device.c
arch/arm/plat-omap/sram.c
arch/arm/plat-s3c24xx/dma.c
arch/arm/plat-s3c24xx/irq.c
arch/arm/plat-s5p/dev-onenand.c
arch/arm/plat-s5p/include/plat/map-s5p.h
arch/arm/plat-samsung/dev-onenand.c
arch/arm/plat-samsung/include/plat/devs.h
arch/avr32/configs/atngw100_defconfig
arch/avr32/configs/atngw100_evklcd100_defconfig
arch/avr32/configs/atngw100_evklcd101_defconfig
arch/avr32/configs/atngw100_mrmt_defconfig
arch/avr32/configs/atngw100mkii_defconfig
arch/avr32/configs/atngw100mkii_evklcd100_defconfig
arch/avr32/configs/atngw100mkii_evklcd101_defconfig
arch/avr32/configs/atstk1002_defconfig
arch/avr32/configs/atstk1003_defconfig
arch/avr32/configs/atstk1004_defconfig
arch/avr32/configs/atstk1006_defconfig
arch/avr32/configs/favr-32_defconfig
arch/avr32/configs/hammerhead_defconfig
arch/avr32/configs/merisc_defconfig
arch/avr32/configs/mimc200_defconfig
arch/avr32/include/asm/processor.h
arch/avr32/mach-at32ap/at32ap700x.c
arch/avr32/mach-at32ap/include/mach/cpu.h
arch/avr32/mach-at32ap/intc.c
arch/blackfin/configs/CM-BF548_defconfig
arch/blackfin/lib/strncpy.S
arch/m68k/Kconfig.nommu
arch/m68k/kernel/m68k_ksyms.c
arch/m68k/kernel/vmlinux.lds_no.S
arch/m68k/lib/memcpy.c
arch/m68k/lib/memset.c
arch/m68k/lib/muldi3.c
arch/mips/configs/mtx1_defconfig
arch/mn10300/kernel/traps.c
arch/mn10300/kernel/vmlinux.lds.S
arch/mn10300/mm/cache-dbg-flush-by-reg.S
arch/powerpc/boot/.gitignore
arch/powerpc/boot/dtc-src/.gitignore [deleted file]
arch/powerpc/configs/52xx/pcm030_defconfig
arch/powerpc/configs/ps3_defconfig
arch/powerpc/include/asm/rio.h
arch/powerpc/kernel/cputable.c
arch/powerpc/kernel/prom.c
arch/powerpc/mm/init_32.c
arch/powerpc/mm/init_64.c
arch/powerpc/mm/mem.c
arch/powerpc/sysdev/fsl_lbc.c
arch/s390/Kconfig
arch/s390/include/asm/pgalloc.h
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/qdio.h
arch/s390/include/asm/tlb.h
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/sie64a.S
arch/s390/mm/pgtable.c
arch/sh/Kconfig
arch/sh/boards/mach-ap325rxa/setup.c
arch/sh/boards/mach-ecovec24/setup.c
arch/sh/boot/compressed/Makefile
arch/sh/configs/titan_defconfig
arch/sh/include/asm/cmpxchg-grb.h
arch/sh/include/asm/pgtable.h
arch/sh/include/asm/processor_64.h
arch/sh/include/asm/ptrace.h
arch/sh/include/asm/tlb.h
arch/sh/include/cpu-sh4/cpu/sh7722.h
arch/sh/include/cpu-sh4/cpu/sh7724.h
arch/sh/include/cpu-sh4/cpu/sh7757.h
arch/sh/kernel/cpu/sh4a/setup-sh7724.c
arch/sh/kernel/process_32.c
arch/sh/mm/cache-debugfs.c
arch/sh/mm/consistent.c
arch/sparc/Kconfig
arch/sparc/include/asm/floppy_32.h
arch/sparc/include/asm/floppy_64.h
arch/sparc/include/asm/leon.h
arch/sparc/include/asm/leon_pci.h [new file with mode: 0644]
arch/sparc/include/asm/pci_32.h
arch/sparc/include/asm/pcic.h
arch/sparc/include/asm/system_32.h
arch/sparc/include/asm/system_64.h
arch/sparc/kernel/Makefile
arch/sparc/kernel/apc.c
arch/sparc/kernel/auxio_32.c
arch/sparc/kernel/chmc.c
arch/sparc/kernel/entry.S
arch/sparc/kernel/leon_kernel.c
arch/sparc/kernel/leon_pci.c [new file with mode: 0644]
arch/sparc/kernel/leon_pci_grpci2.c [new file with mode: 0644]
arch/sparc/kernel/module.c
arch/sparc/kernel/pci_common.c
arch/sparc/kernel/pci_schizo.c
arch/sparc/kernel/prom_irqtrans.c
arch/sparc/kernel/psycho_common.c
arch/sparc/kernel/sbus.c
arch/sparc/kernel/setup_32.c
arch/sparc/kernel/setup_64.c
arch/sparc/kernel/smp_32.c
arch/sparc/kernel/sun4d_irq.c
arch/sparc/kernel/sys_sparc32.c
arch/sparc/kernel/sys_sparc_64.c
arch/sparc/kernel/time_64.c
arch/sparc/kernel/traps_64.c
arch/sparc/kernel/unaligned_64.c
arch/sparc/kernel/us2e_cpufreq.c
arch/sparc/kernel/us3_cpufreq.c
arch/sparc/kernel/viohs.c
arch/sparc/kernel/visemul.c
arch/sparc/mm/fault_32.c
arch/sparc/mm/init_32.c
arch/sparc/mm/init_64.c
arch/sparc/mm/srmmu.c
arch/sparc/mm/sun4c.c
arch/sparc/mm/tsb.c
arch/sparc/prom/console_32.c
arch/sparc/prom/init_32.c
arch/sparc/prom/mp.c
arch/unicore32/Kconfig
arch/unicore32/Makefile
arch/unicore32/boot/compressed/Makefile
arch/unicore32/configs/unicore32_defconfig [moved from arch/unicore32/configs/debug_defconfig with 97% similarity]
arch/unicore32/include/asm/Kbuild
arch/unicore32/kernel/Makefile
arch/unicore32/kernel/vmlinux.lds.S
arch/x86/include/asm/memblock.h
arch/x86/kernel/amd_iommu.c
arch/x86/kernel/amd_iommu_init.c
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/devicetree.c
arch/x86/kernel/process.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/smpboot.c
arch/x86/kvm/emulate.c
arch/x86/mm/memblock.c
arch/x86/platform/efi/efi.c
arch/x86/xen/multicalls.c
block/blk-ioc.c
block/cfq-iosched.c
drivers/ata/libata-eh.c
drivers/base/power/clock_ops.c
drivers/block/nbd.c
drivers/block/xen-blkback/blkback.c
drivers/block/xen-blkback/xenbus.c
drivers/bluetooth/hci_ldisc.c
drivers/char/hpet.c
drivers/clocksource/sh_cmt.c
drivers/clocksource/sh_tmu.c
drivers/cpufreq/cpufreq_stats.c
drivers/cpufreq/powernow-k8.c
drivers/dma/shdma.c
drivers/firmware/iscsi_ibft_find.c
drivers/gpio/Kconfig
drivers/gpio/gpio-exynos4.c
drivers/gpio/gpio-nomadik.c
drivers/gpio/gpio-omap.c
drivers/gpu/drm/drm_bufs.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_ioc32.c
drivers/gpu/drm/drm_pci.c
drivers/gpu/drm/drm_vm.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_modes.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/mga/mga_drv.h
drivers/gpu/drm/nouveau/nouveau_hw.c
drivers/gpu/drm/nouveau/nouveau_mem.c
drivers/gpu/drm/nouveau/nouveau_sgdma.c
drivers/gpu/drm/nouveau/nouveau_state.c
drivers/gpu/drm/nouveau/nouveau_vm.c
drivers/gpu/drm/nouveau/nv04_crtc.c
drivers/gpu/drm/nouveau/nvreg.h
drivers/gpu/drm/radeon/Kconfig
drivers/gpu/drm/radeon/atombios.h
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/cayman_blit_shaders.c
drivers/gpu/drm/radeon/cayman_blit_shaders.h
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_blit_kms.c
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/r100_track.h
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/radeon/r600d.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_clocks.c
drivers/gpu/drm/radeon/radeon_combios.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_encoders.c
drivers/gpu/drm/radeon/radeon_family.h
drivers/gpu/drm/radeon/radeon_fence.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/reg_srcs/r600
drivers/gpu/drm/savage/savage_bci.c
drivers/hid/Kconfig
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-magicmouse.c
drivers/hid/hid-multitouch.c
drivers/hid/usbhid/hid-quirks.c
drivers/hid/usbhid/hiddev.c
drivers/hwmon/asus_atk0110.c
drivers/hwmon/coretemp.c
drivers/hwmon/ibmaem.c
drivers/hwmon/ibmpex.c
drivers/hwmon/max6642.c
drivers/hwmon/s3c-hwmon.c
drivers/ide/ide-cd.c
drivers/input/serio/serport.c
drivers/isdn/gigaset/ser-gigaset.c
drivers/isdn/hardware/mISDN/hfcsusb.c
drivers/leds/Kconfig
drivers/md/bitmap.c
drivers/md/bitmap.h
drivers/md/md.c
drivers/md/md.h
drivers/md/raid1.c
drivers/md/raid1.h
drivers/md/raid5.c
drivers/media/dvb/dvb-usb/anysee.c
drivers/media/media-devnode.c
drivers/media/video/cx23885/cx23885-cards.c
drivers/media/video/gspca/coarse_expo_autogain.h [deleted file]
drivers/media/video/gspca/ov519.c
drivers/media/video/gspca/sonixj.c
drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h
drivers/media/video/ivtv/ivtv-driver.c
drivers/media/video/ivtv/ivtv-firmware.c
drivers/media/video/ivtv/ivtv-ioctl.c
drivers/media/video/ivtv/ivtv-ioctl.h
drivers/media/video/ivtv/ivtv-streams.c
drivers/media/video/ivtv/ivtv-vbi.c
drivers/media/video/ivtv/ivtvfb.c
drivers/media/video/omap3isp/isp.c
drivers/media/video/soc_camera.c
drivers/media/video/uvc/uvc_entity.c
drivers/misc/apds990x.c
drivers/misc/cs5535-mfgpt.c
drivers/misc/spear13xx_pcie_gadget.c
drivers/misc/ti-st/st_core.c
drivers/mmc/host/mmci.c
drivers/mmc/host/omap_hsmmc.c
drivers/net/3c509.c
drivers/net/3c59x.c
drivers/net/arm/am79c961a.c
drivers/net/arm/ep93xx_eth.c
drivers/net/bonding/bond_main.c
drivers/net/caif/caif_serial.c
drivers/net/can/flexcan.c
drivers/net/can/slcan.c
drivers/net/davinci_emac.c
drivers/net/depca.c
drivers/net/dl2k.c
drivers/net/dm9000.c
drivers/net/gianfar.c
drivers/net/gianfar.h
drivers/net/gianfar_ethtool.c
drivers/net/hamradio/6pack.c
drivers/net/hamradio/mkiss.c
drivers/net/hp100.c
drivers/net/ibmlana.c
drivers/net/igb/igb_main.c
drivers/net/irda/irtty-sir.c
drivers/net/irda/smsc-ircc2.c
drivers/net/ks8842.c
drivers/net/ne3210.c
drivers/net/ppp_async.c
drivers/net/ppp_synctty.c
drivers/net/qlcnic/qlcnic_hw.c
drivers/net/qlcnic/qlcnic_main.c
drivers/net/slip.c
drivers/net/smc-mca.c
drivers/net/smc91x.c
drivers/net/tg3.c
drivers/net/tokenring/madgemc.c
drivers/net/tulip/de4x5.c
drivers/net/usb/catc.c
drivers/net/usb/cdc_ncm.c
drivers/net/wan/x25_asy.c
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath5k/reset.c
drivers/net/wireless/ath/ath9k/Kconfig
drivers/net/wireless/ath/ath9k/ar9002_calib.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
drivers/net/wireless/ath/ath9k/ar9003_phy.c
drivers/net/wireless/ath/ath9k/eeprom_9287.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/hw.h
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/rc.c
drivers/net/wireless/b43/phy_n.c
drivers/net/wireless/iwlegacy/iwl-4965-lib.c
drivers/net/wireless/iwlegacy/iwl-4965.c
drivers/net/wireless/iwlegacy/iwl-core.c
drivers/net/wireless/iwlegacy/iwl-core.h
drivers/net/wireless/iwlegacy/iwl-dev.h
drivers/net/wireless/iwlegacy/iwl4965-base.c
drivers/net/wireless/iwlwifi/iwl-2000.c
drivers/net/wireless/iwlwifi/iwl-5000.c
drivers/net/wireless/iwlwifi/iwl-6000.c
drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
drivers/net/wireless/iwlwifi/iwl-agn.c
drivers/net/wireless/iwlwifi/iwl-agn.h
drivers/net/wireless/iwlwifi/iwl-core.c
drivers/net/wireless/iwlwifi/iwl-core.h
drivers/net/wireless/iwlwifi/iwl-dev.h
drivers/net/wireless/iwlwifi/iwl-rx.c
drivers/net/wireless/libertas/cmd.c
drivers/net/wireless/libertas/if_sdio.c
drivers/net/wireless/mwifiex/sdio.h
drivers/net/wireless/rt2x00/Kconfig
drivers/net/wireless/rt2x00/rt2x00config.c
drivers/net/wireless/rt2x00/rt2x00dev.c
drivers/net/wireless/rtlwifi/pci.c
drivers/net/wireless/wl12xx/conf.h
drivers/net/wireless/wl12xx/main.c
drivers/net/wireless/wl12xx/scan.c
drivers/net/wireless/wl12xx/scan.h
drivers/net/wireless/zd1211rw/zd_usb.c
drivers/pci/Makefile
drivers/pci/intel-iommu.c
drivers/pci/pci.c
drivers/pcmcia/pxa2xx_vpac270.c
drivers/ptp/ptp_chardev.c
drivers/rtc/Kconfig
drivers/rtc/Makefile
drivers/rtc/interface.c
drivers/rtc/rtc-dev.c
drivers/rtc/rtc-m41t93.c
drivers/rtc/rtc-puv3.c [moved from arch/unicore32/kernel/rtc.c with 98% similarity]
drivers/s390/cio/qdio_main.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/scsi/zfcp_fsf.c
drivers/s390/scsi/zfcp_qdio.c
drivers/s390/scsi/zfcp_qdio.h
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_sysfs.c
drivers/sh/clk/core.c
drivers/spi/amba-pl022.c
drivers/spi/omap2_mcspi.c
drivers/spi/spi_bfin5xx.c
drivers/ssb/driver_pcicore.c
drivers/staging/Kconfig
drivers/staging/altera-stapl/altera-jtag.c
drivers/staging/altera-stapl/altera.c
drivers/staging/altera-stapl/altera.h [moved from include/staging/altera.h with 100% similarity]
drivers/staging/ath6kl/Kconfig
drivers/staging/ath6kl/os/linux/cfg80211.c
drivers/staging/brcm80211/brcmfmac/wl_iw.c
drivers/staging/gma500/psb_drv.c
drivers/staging/gma500/psb_fb.c
drivers/staging/gma500/psb_intel_bios.c
drivers/staging/iio/accel/adis16201.h
drivers/staging/iio/accel/adis16203.h
drivers/staging/iio/dac/max517.c
drivers/staging/iio/imu/adis16400_ring.c
drivers/staging/iio/industrialio-trigger.c
drivers/staging/mei/init.c
drivers/staging/olpc_dcon/Kconfig
drivers/staging/rts_pstor/sd.c
drivers/staging/usbip/stub_dev.c
drivers/staging/usbip/stub_rx.c
drivers/tty/n_gsm.c
drivers/tty/n_hdlc.c
drivers/tty/n_r3964.c
drivers/tty/n_tty.c
drivers/tty/serial/pch_uart.c
drivers/tty/tty_buffer.c
drivers/tty/vt/selection.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/hub.c
drivers/usb/core/inode.c
drivers/usb/gadget/Kconfig
drivers/usb/gadget/amd5536udc.c
drivers/usb/gadget/at91_udc.c
drivers/usb/gadget/dummy_hcd.c
drivers/usb/gadget/inode.c
drivers/usb/gadget/mv_udc_core.c
drivers/usb/gadget/net2280.c
drivers/usb/gadget/pxa25x_udc.c
drivers/usb/gadget/s3c-hsotg.c
drivers/usb/gadget/s3c-hsudc.c
drivers/usb/gadget/s3c2410_udc.c
drivers/usb/host/ohci-pxa27x.c
drivers/usb/host/xhci-dbg.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/musb/musb_core.c
drivers/usb/renesas_usbhs/mod_gadget.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/option.c
drivers/usb/storage/transport.c
drivers/usb/storage/unusual_devs.h
drivers/usb/storage/usb.c
drivers/usb/storage/usb.h
drivers/video/arcfb.c
drivers/video/aty/atyfb_base.c
drivers/video/backlight/Kconfig
drivers/video/backlight/Makefile
drivers/video/backlight/adp8870_bl.c [new file with mode: 0644]
drivers/video/bf537-lq035.c
drivers/video/broadsheetfb.c
drivers/video/efifb.c
drivers/video/hecubafb.c
drivers/video/imxfb.c
drivers/video/metronomefb.c
drivers/video/modedb.c
drivers/video/pxa168fb.c
drivers/video/s3c-fb.c
drivers/video/savage/savagefb_driver.c
drivers/video/sh_mobile_hdmi.c
drivers/video/sh_mobile_lcdcfb.c
drivers/video/vga16fb.c
drivers/video/xen-fbfront.c
drivers/w1/masters/Kconfig
drivers/xen/events.c
drivers/xen/swiotlb-xen.c
fs/afs/dir.c
fs/afs/fsclient.c
fs/afs/inode.c
fs/afs/super.c
fs/afs/write.c
fs/block_dev.c
fs/btrfs/btrfs_inode.h
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/delayed-inode.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode-map.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/relocation.c
fs/btrfs/scrub.c
fs/btrfs/super.c
fs/btrfs/transaction.c
fs/btrfs/transaction.h
fs/btrfs/volumes.c
fs/btrfs/xattr.c
fs/buffer.c
fs/ceph/addr.c
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/export.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/ioctl.c
fs/ceph/locks.c
fs/ceph/snap.c
fs/ceph/xattr.c
fs/cifs/Kconfig
fs/cifs/cache.c
fs/cifs/cifsencrypt.c
fs/cifs/cifsfs.c
fs/cifs/cifsfs.h
fs/cifs/connect.c
fs/cifs/fscache.c
fs/exec.c
fs/fat/file.c
fs/fuse/inode.c
fs/gfs2/glock.c
fs/isofs/inode.c
fs/jfs/jfs_logmgr.c
fs/namei.c
fs/nilfs2/btree.c
fs/nilfs2/segment.c
fs/ocfs2/super.c
fs/partitions/check.c
fs/proc/namespaces.c
fs/proc/root.c
fs/super.c
fs/sysfs/mount.c
fs/sysfs/sysfs.h
fs/timerfd.c
fs/ubifs/io.c
fs/ubifs/journal.c
fs/ubifs/orphan.c
fs/ubifs/recovery.c
fs/ubifs/replay.c
fs/ubifs/shrinker.c
fs/ubifs/super.c
fs/ubifs/tnc.c
fs/ubifs/ubifs.h
fs/xfs/linux-2.6/xfs_file.c
fs/xfs/linux-2.6/xfs_iops.c
fs/xfs/linux-2.6/xfs_super.c
fs/xfs/xfs_log.c
include/asm-generic/gpio.h
include/asm-generic/pgtable.h
include/asm-generic/unistd.h
include/drm/drm_crtc.h
include/drm/drm_pciids.h
include/linux/basic_mmio_gpio.h
include/linux/blkdev.h
include/linux/clocksource.h
include/linux/ethtool.h
include/linux/fs.h
include/linux/genhd.h
include/linux/gpio.h
include/linux/i2c/adp8870.h [new file with mode: 0644]
include/linux/ieee80211.h
include/linux/if_packet.h
include/linux/if_vlan.h
include/linux/interrupt.h
include/linux/irqreturn.h
include/linux/kernel.h
include/linux/kmod.h
include/linux/kmsg_dump.h
include/linux/kobject_ns.h
include/linux/memcontrol.h
include/linux/netdevice.h
include/linux/netfilter/nf_conntrack_common.h
include/linux/percpu.h
include/linux/sched.h
include/linux/seqlock.h
include/linux/skbuff.h
include/linux/smp.h
include/linux/swap.h
include/linux/swiotlb.h
include/linux/sysfs.h
include/linux/topology.h
include/linux/tty_ldisc.h
include/linux/usb_usual.h
include/linux/uts.h
include/media/v4l2-dev.h
include/net/net_namespace.h
include/net/sctp/command.h
include/net/sctp/structs.h
include/trace/events/irq.h
include/trace/events/net.h
include/trace/events/vmscan.h
init/Kconfig
init/calibrate.c
init/main.c
kernel/exit.c
kernel/gcov/Kconfig
kernel/irq/handle.c
kernel/irq/irqdesc.c
kernel/irq/manage.c
kernel/irq/spurious.c
kernel/kmod.c
kernel/lockdep.c
kernel/rcutree.c
kernel/rcutree.h
kernel/rcutree_plugin.h
kernel/rcutree_trace.c
kernel/sched.c
kernel/sched_rt.c
kernel/signal.c
kernel/smp.c
kernel/softirq.c
kernel/time/clockevents.c
kernel/time/clocksource.c
kernel/timer.c
lib/Kconfig.debug
lib/bitmap.c
lib/kobject.c
lib/swiotlb.c
lib/vsprintf.c
mm/compaction.c
mm/filemap.c
mm/huge_memory.c
mm/hugetlb.c
mm/ksm.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory.c
mm/memory_hotplug.c
mm/migrate.c
mm/mmap.c
mm/page_cgroup.c
mm/rmap.c
mm/slab.c
mm/slub.c
mm/thrash.c
mm/vmscan.c
net/8021q/vlan_core.c
net/8021q/vlan_dev.c
net/bluetooth/l2cap_core.c
net/bridge/br_netfilter.c
net/caif/chnl_net.c
net/ceph/osd_client.c
net/core/dev.c
net/core/net-sysfs.c
net/core/net_namespace.c
net/core/netpoll.c
net/ipv4/af_inet.c
net/ipv4/ip_options.c
net/ipv4/ip_output.c
net/ipv4/netfilter/ip_queue.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/netfilter/ipt_MASQUERADE.c
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
net/ipv4/netfilter/nf_conntrack_proto_icmp.c
net/ipv4/netfilter/nf_nat_core.c
net/ipv4/netfilter/nf_nat_helper.c
net/ipv4/netfilter/nf_nat_rule.c
net/ipv4/netfilter/nf_nat_standalone.c
net/ipv4/route.c
net/ipv6/af_inet6.c
net/ipv6/netfilter/ip6_queue.c
net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
net/irda/iriap.c
net/l2tp/l2tp_debugfs.c
net/mac80211/ibss.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/key.c
net/mac80211/mlme.c
net/mac80211/scan.c
net/mac80211/tx.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipset/ip_set_hash_ipportnet.c
net/netfilter/ipset/ip_set_hash_net.c
net/netfilter/ipset/ip_set_hash_netport.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_ftp.c
net/netfilter/nf_conntrack_h323_main.c
net/netfilter/nf_conntrack_irc.c
net/netfilter/nf_conntrack_pptp.c
net/netfilter/nf_conntrack_sane.c
net/netfilter/nf_conntrack_sip.c
net/netfilter/xt_socket.c
net/packet/af_packet.c
net/sched/sch_generic.c
net/sctp/associola.c
net/sctp/sm_sideeffect.c
net/sctp/sm_statefuns.c
net/wireless/nl80211.c
net/wireless/scan.c
net/xfrm/xfrm_replay.c
scripts/Makefile.asm-generic
scripts/checkpatch.pl
scripts/depmod.sh [new file with mode: 0755]
security/apparmor/lsm.c
security/keys/request_key.c
security/selinux/hooks.c
security/selinux/selinuxfs.c
security/selinux/ss/policydb.c
security/tomoyo/mount.c
sound/core/misc.c
sound/firewire/isight.c
sound/pci/asihpi/hpidspcd.c
sound/pci/emu10k1/emu10k1_main.c
sound/pci/fm801.c
sound/pci/hda/hda_beep.h
sound/pci/hda/patch_analog.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_via.c
sound/pci/lola/lola.c
sound/pci/rme9652/hdspm.c
sound/soc/atmel/atmel_ssc_dai.c
sound/soc/blackfin/bf5xx-ad1836.c
sound/soc/codecs/ad1836.c
sound/soc/codecs/ad1836.h
sound/soc/codecs/cx20442.c
sound/soc/codecs/wm8804.c
sound/soc/codecs/wm8915.c
sound/soc/codecs/wm8962.c
sound/soc/codecs/wm_hubs.c
sound/soc/fsl/fsl_dma.c
sound/soc/samsung/i2s.c
sound/soc/soc-cache.c
sound/soc/soc-dapm.c
sound/usb/6fire/firmware.c
sound/usb/6fire/pcm.c
sound/usb/quirks.c
tools/perf/util/PERF-VERSION-GEN
tools/perf/util/trace-event-parse.c
tools/testing/ktest/ktest.pl
virt/kvm/kvm_main.c

diff --git a/CREDITS b/CREDITS
index a7ea8e343836fb47fd82ef624d77c630ba199df2..d78359f5f64d7e581110166f9a2a7ecd410abc46 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -518,6 +518,14 @@ N: Zach Brown
 E: zab@zabbo.net
 D: maestro pci sound
 
+M: David Brownell
+D: Kernel engineer, mentor, and friend.  Maintained USB EHCI and
+D: gadget layers, SPI subsystem, GPIO subsystem, and more than a few
+D: device drivers.  His encouragement also helped many engineers get
+D: started working on the Linux kernel.  David passed away in early
+D: 2011, and will be greatly missed.
+W: https://lkml.org/lkml/2011/4/5/36
+
 N: Gary Brubaker
 E: xavyer@ix.netcom.com
 D: USB Serial Empeg Empeg-car Mark I/II Driver
diff --git a/Documentation/ABI/testing/sysfs-class-backlight-driver-adp8870 b/Documentation/ABI/testing/sysfs-class-backlight-driver-adp8870
new file mode 100644 (file)
index 0000000..aa11dbd
--- /dev/null
@@ -0,0 +1,56 @@
+What:          /sys/class/backlight/<backlight>/<ambient light zone>_max
+What:          /sys/class/backlight/<backlight>/l1_daylight_max
+What:          /sys/class/backlight/<backlight>/l2_bright_max
+What:          /sys/class/backlight/<backlight>/l3_office_max
+What:          /sys/class/backlight/<backlight>/l4_indoor_max
+What:          /sys/class/backlight/<backlight>/l5_dark_max
+Date:          Mai 2011
+KernelVersion: 2.6.40
+Contact:       device-drivers-devel@blackfin.uclinux.org
+Description:
+               Control the maximum brightness for <ambient light zone>
+               on this <backlight>. Values are between 0 and 127. This file
+               will also show the brightness level stored for this
+               <ambient light zone>.
+
+What:          /sys/class/backlight/<backlight>/<ambient light zone>_dim
+What:          /sys/class/backlight/<backlight>/l2_bright_dim
+What:          /sys/class/backlight/<backlight>/l3_office_dim
+What:          /sys/class/backlight/<backlight>/l4_indoor_dim
+What:          /sys/class/backlight/<backlight>/l5_dark_dim
+Date:          Mai 2011
+KernelVersion: 2.6.40
+Contact:       device-drivers-devel@blackfin.uclinux.org
+Description:
+               Control the dim brightness for <ambient light zone>
+               on this <backlight>. Values are between 0 and 127, typically
+               set to 0. Full off when the backlight is disabled.
+               This file will also show the dim brightness level stored for
+               this <ambient light zone>.
+
+What:          /sys/class/backlight/<backlight>/ambient_light_level
+Date:          Mai 2011
+KernelVersion: 2.6.40
+Contact:       device-drivers-devel@blackfin.uclinux.org
+Description:
+               Get conversion value of the light sensor.
+               This value is updated every 80 ms (when the light sensor
+               is enabled). Returns integer between 0 (dark) and
+               8000 (max ambient brightness)
+
+What:          /sys/class/backlight/<backlight>/ambient_light_zone
+Date:          Mai 2011
+KernelVersion: 2.6.40
+Contact:       device-drivers-devel@blackfin.uclinux.org
+Description:
+               Get/Set current ambient light zone. Reading returns
+               integer between 1..5 (1 = daylight, 2 = bright, ..., 5 = dark).
+               Writing a value between 1..5 forces the backlight controller
+               to enter the corresponding ambient light zone.
+               Writing 0 returns to normal/automatic ambient light level
+               operation. The ambient light sensing feature on these devices
+               is an extension to the API documented in
+               Documentation/ABI/stable/sysfs-class-backlight.
+               It can be enabled by writing the value stored in
+               /sys/class/backlight/<backlight>/max_brightness to
+               /sys/class/backlight/<backlight>/brightness.
\ No newline at end of file
index eda40fd39cad9df7927e7409765007af38020219..d16a9849e60e127c2cd234cd02dcc9d6f16d35ec 100644 (file)
@@ -21,7 +21,7 @@ information will not be available.
 To extract cgroup statistics a utility very similar to getdelays.c
 has been developed, the sample output of the utility is shown below
 
-~/balbir/cgroupstats # ./getdelays  -C "/cgroup/a"
+~/balbir/cgroupstats # ./getdelays  -C "/sys/fs/cgroup/a"
 sleeping 1, blocked 0, running 1, stopped 0, uninterruptible 0
-~/balbir/cgroupstats # ./getdelays  -C "/cgroup"
+~/balbir/cgroupstats # ./getdelays  -C "/sys/fs/cgroup"
 sleeping 155, blocked 0, running 1, stopped 0, uninterruptible 2
index 465351d4cf853e8a308c9c84abef789b3dcfa42c..cd45c8ea7463f71eccee9d82ce6a82b47bcee467 100644 (file)
@@ -28,16 +28,19 @@ cgroups. Here is what you can do.
 - Enable group scheduling in CFQ
        CONFIG_CFQ_GROUP_IOSCHED=y
 
-- Compile and boot into kernel and mount IO controller (blkio).
+- Compile and boot into kernel and mount IO controller (blkio); see
+  cgroups.txt, Why are cgroups needed?.
 
-       mount -t cgroup -o blkio none /cgroup
+       mount -t tmpfs cgroup_root /sys/fs/cgroup
+       mkdir /sys/fs/cgroup/blkio
+       mount -t cgroup -o blkio none /sys/fs/cgroup/blkio
 
 - Create two cgroups
-       mkdir -p /cgroup/test1/ /cgroup/test2
+       mkdir -p /sys/fs/cgroup/blkio/test1/ /sys/fs/cgroup/blkio/test2
 
 - Set weights of group test1 and test2
-       echo 1000 > /cgroup/test1/blkio.weight
-       echo 500 > /cgroup/test2/blkio.weight
+       echo 1000 > /sys/fs/cgroup/blkio/test1/blkio.weight
+       echo 500 > /sys/fs/cgroup/blkio/test2/blkio.weight
 
 - Create two same size files (say 512MB each) on same disk (file1, file2) and
   launch two dd threads in different cgroup to read those files.
@@ -46,12 +49,12 @@ cgroups. Here is what you can do.
        echo 3 > /proc/sys/vm/drop_caches
 
        dd if=/mnt/sdb/zerofile1 of=/dev/null &
-       echo $! > /cgroup/test1/tasks
-       cat /cgroup/test1/tasks
+       echo $! > /sys/fs/cgroup/blkio/test1/tasks
+       cat /sys/fs/cgroup/blkio/test1/tasks
 
        dd if=/mnt/sdb/zerofile2 of=/dev/null &
-       echo $! > /cgroup/test2/tasks
-       cat /cgroup/test2/tasks
+       echo $! > /sys/fs/cgroup/blkio/test2/tasks
+       cat /sys/fs/cgroup/blkio/test2/tasks
 
 - At macro level, first dd should finish first. To get more precise data, keep
   on looking at (with the help of script), at blkio.disk_time and
@@ -68,13 +71,13 @@ Throttling/Upper Limit policy
 - Enable throttling in block layer
        CONFIG_BLK_DEV_THROTTLING=y
 
-- Mount blkio controller
-        mount -t cgroup -o blkio none /cgroup/blkio
+- Mount blkio controller (see cgroups.txt, Why are cgroups needed?)
+        mount -t cgroup -o blkio none /sys/fs/cgroup/blkio
 
 - Specify a bandwidth rate on particular device for root group. The format
   for policy is "<major>:<minor>  <byes_per_second>".
 
-        echo "8:16  1048576" > /cgroup/blkio/blkio.read_bps_device
+        echo "8:16  1048576" > /sys/fs/cgroup/blkio/blkio.read_bps_device
 
   Above will put a limit of 1MB/second on reads happening for root group
   on device having major/minor number 8:16.
@@ -108,7 +111,7 @@ Hierarchical Cgroups
   CFQ and throttling will practically treat all groups at same level.
 
                                pivot
-                            /  |   \  \
+                            /  /   \  \
                        root  test1 test2  test3
 
   Down the line we can implement hierarchical accounting/control support
@@ -149,7 +152,7 @@ Proportional weight policy files
 
          Following is the format.
 
-         #echo dev_maj:dev_minor weight > /path/to/cgroup/blkio.weight_device
+         # echo dev_maj:dev_minor weight > blkio.weight_device
          Configure weight=300 on /dev/sdb (8:16) in this cgroup
          # echo 8:16 300 > blkio.weight_device
          # cat blkio.weight_device
index 0ed99f08f1f39256bd71067059627ddc9a0203ac..cd67e90003c0e547952de41086f01d59ecb9a493 100644 (file)
@@ -138,11 +138,11 @@ With the ability to classify tasks differently for different resources
 the admin can easily set up a script which receives exec notifications
 and depending on who is launching the browser he can
 
-       # echo browser_pid > /mnt/<restype>/<userclass>/tasks
+    # echo browser_pid > /sys/fs/cgroup/<restype>/<userclass>/tasks
 
 With only a single hierarchy, he now would potentially have to create
 a separate cgroup for every browser launched and associate it with
-approp network and other resource class.  This may lead to
+appropriate network and other resource class.  This may lead to
 proliferation of such cgroups.
 
 Also lets say that the administrator would like to give enhanced network
@@ -153,9 +153,9 @@ apps enhanced CPU power,
 With ability to write pids directly to resource classes, it's just a
 matter of :
 
-       # echo pid > /mnt/network/<new_class>/tasks
+       # echo pid > /sys/fs/cgroup/network/<new_class>/tasks
        (after some time)
-       # echo pid > /mnt/network/<orig_class>/tasks
+       # echo pid > /sys/fs/cgroup/network/<orig_class>/tasks
 
 Without this ability, he would have to split the cgroup into
 multiple separate ones and then associate the new cgroups with the
@@ -310,21 +310,24 @@ subsystem, this is the case for the cpuset.
 To start a new job that is to be contained within a cgroup, using
 the "cpuset" cgroup subsystem, the steps are something like:
 
- 1) mkdir /dev/cgroup
- 2) mount -t cgroup -ocpuset cpuset /dev/cgroup
- 3) Create the new cgroup by doing mkdir's and write's (or echo's) in
-    the /dev/cgroup virtual file system.
- 4) Start a task that will be the "founding father" of the new job.
- 5) Attach that task to the new cgroup by writing its pid to the
-    /dev/cgroup tasks file for that cgroup.
- 6) fork, exec or clone the job tasks from this founding father task.
+ 1) mount -t tmpfs cgroup_root /sys/fs/cgroup
+ 2) mkdir /sys/fs/cgroup/cpuset
+ 3) mount -t cgroup -ocpuset cpuset /sys/fs/cgroup/cpuset
+ 4) Create the new cgroup by doing mkdir's and write's (or echo's) in
+    the /sys/fs/cgroup virtual file system.
+ 5) Start a task that will be the "founding father" of the new job.
+ 6) Attach that task to the new cgroup by writing its pid to the
+    /sys/fs/cgroup/cpuset/tasks file for that cgroup.
+ 7) fork, exec or clone the job tasks from this founding father task.
 
 For example, the following sequence of commands will setup a cgroup
 named "Charlie", containing just CPUs 2 and 3, and Memory Node 1,
 and then start a subshell 'sh' in that cgroup:
 
-  mount -t cgroup cpuset -ocpuset /dev/cgroup
-  cd /dev/cgroup
+  mount -t tmpfs cgroup_root /sys/fs/cgroup
+  mkdir /sys/fs/cgroup/cpuset
+  mount -t cgroup cpuset -ocpuset /sys/fs/cgroup/cpuset
+  cd /sys/fs/cgroup/cpuset
   mkdir Charlie
   cd Charlie
   /bin/echo 2-3 > cpuset.cpus
@@ -345,7 +348,7 @@ Creating, modifying, using the cgroups can be done through the cgroup
 virtual filesystem.
 
 To mount a cgroup hierarchy with all available subsystems, type:
-# mount -t cgroup xxx /dev/cgroup
+# mount -t cgroup xxx /sys/fs/cgroup
 
 The "xxx" is not interpreted by the cgroup code, but will appear in
 /proc/mounts so may be any useful identifying string that you like.
@@ -354,23 +357,32 @@ Note: Some subsystems do not work without some user input first.  For instance,
 if cpusets are enabled the user will have to populate the cpus and mems files
 for each new cgroup created before that group can be used.
 
+As explained in section `1.2 Why are cgroups needed?' you should create
+different hierarchies of cgroups for each single resource or group of
+resources you want to control. Therefore, you should mount a tmpfs on
+/sys/fs/cgroup and create directories for each cgroup resource or resource
+group.
+
+# mount -t tmpfs cgroup_root /sys/fs/cgroup
+# mkdir /sys/fs/cgroup/rg1
+
 To mount a cgroup hierarchy with just the cpuset and memory
 subsystems, type:
-# mount -t cgroup -o cpuset,memory hier1 /dev/cgroup
+# mount -t cgroup -o cpuset,memory hier1 /sys/fs/cgroup/rg1
 
 To change the set of subsystems bound to a mounted hierarchy, just
 remount with different options:
-# mount -o remount,cpuset,blkio hier1 /dev/cgroup
+# mount -o remount,cpuset,blkio hier1 /sys/fs/cgroup/rg1
 
 Now memory is removed from the hierarchy and blkio is added.
 
 Note this will add blkio to the hierarchy but won't remove memory or
 cpuset, because the new options are appended to the old ones:
-# mount -o remount,blkio /dev/cgroup
+# mount -o remount,blkio /sys/fs/cgroup/rg1
 
 To Specify a hierarchy's release_agent:
 # mount -t cgroup -o cpuset,release_agent="/sbin/cpuset_release_agent" \
-  xxx /dev/cgroup
+  xxx /sys/fs/cgroup/rg1
 
 Note that specifying 'release_agent' more than once will return failure.
 
@@ -379,17 +391,17 @@ when the hierarchy consists of a single (root) cgroup. Supporting
 the ability to arbitrarily bind/unbind subsystems from an existing
 cgroup hierarchy is intended to be implemented in the future.
 
-Then under /dev/cgroup you can find a tree that corresponds to the
-tree of the cgroups in the system. For instance, /dev/cgroup
+Then under /sys/fs/cgroup/rg1 you can find a tree that corresponds to the
+tree of the cgroups in the system. For instance, /sys/fs/cgroup/rg1
 is the cgroup that holds the whole system.
 
 If you want to change the value of release_agent:
-# echo "/sbin/new_release_agent" > /dev/cgroup/release_agent
+# echo "/sbin/new_release_agent" > /sys/fs/cgroup/rg1/release_agent
 
 It can also be changed via remount.
 
-If you want to create a new cgroup under /dev/cgroup:
-# cd /dev/cgroup
+If you want to create a new cgroup under /sys/fs/cgroup/rg1:
+# cd /sys/fs/cgroup/rg1
 # mkdir my_cgroup
 
 Now you want to do something with this cgroup.
index 8b930946c52a7dec05657470016946b1c3492123..9ad85df4b983e9da4ab967f098b39376c2ac0162 100644 (file)
@@ -10,26 +10,25 @@ directly present in its group.
 
 Accounting groups can be created by first mounting the cgroup filesystem.
 
-# mkdir /cgroups
-# mount -t cgroup -ocpuacct none /cgroups
-
-With the above step, the initial or the parent accounting group
-becomes visible at /cgroups. At bootup, this group includes all the
-tasks in the system. /cgroups/tasks lists the tasks in this cgroup.
-/cgroups/cpuacct.usage gives the CPU time (in nanoseconds) obtained by
-this group which is essentially the CPU time obtained by all the tasks
+# mount -t cgroup -ocpuacct none /sys/fs/cgroup
+
+With the above step, the initial or the parent accounting group becomes
+visible at /sys/fs/cgroup. At bootup, this group includes all the tasks in
+the system. /sys/fs/cgroup/tasks lists the tasks in this cgroup.
+/sys/fs/cgroup/cpuacct.usage gives the CPU time (in nanoseconds) obtained
+by this group which is essentially the CPU time obtained by all the tasks
 in the system.
 
-New accounting groups can be created under the parent group /cgroups.
+New accounting groups can be created under the parent group /sys/fs/cgroup.
 
-# cd /cgroups
+# cd /sys/fs/cgroup
 # mkdir g1
 # echo $$ > g1
 
 The above steps create a new group g1 and move the current shell
 process (bash) into it. CPU time consumed by this bash and its children
 can be obtained from g1/cpuacct.usage and the same is accumulated in
-/cgroups/cpuacct.usage also.
+/sys/fs/cgroup/cpuacct.usage also.
 
 cpuacct.stat file lists a few statistics which further divide the
 CPU time obtained by the cgroup into user and system times. Currently
index 98a30829af7a1bb1ce74015ef3fa7170a112af27..5b0d78e55cccc98b34989493e3bd40bda3f6fc41 100644 (file)
@@ -661,21 +661,21 @@ than stress the kernel.
 
 To start a new job that is to be contained within a cpuset, the steps are:
 
- 1) mkdir /dev/cpuset
- 2) mount -t cgroup -ocpuset cpuset /dev/cpuset
+ 1) mkdir /sys/fs/cgroup/cpuset
+ 2) mount -t cgroup -ocpuset cpuset /sys/fs/cgroup/cpuset
  3) Create the new cpuset by doing mkdir's and write's (or echo's) in
-    the /dev/cpuset virtual file system.
+    the /sys/fs/cgroup/cpuset virtual file system.
  4) Start a task that will be the "founding father" of the new job.
  5) Attach that task to the new cpuset by writing its pid to the
-    /dev/cpuset tasks file for that cpuset.
+    /sys/fs/cgroup/cpuset tasks file for that cpuset.
  6) fork, exec or clone the job tasks from this founding father task.
 
 For example, the following sequence of commands will setup a cpuset
 named "Charlie", containing just CPUs 2 and 3, and Memory Node 1,
 and then start a subshell 'sh' in that cpuset:
 
-  mount -t cgroup -ocpuset cpuset /dev/cpuset
-  cd /dev/cpuset
+  mount -t cgroup -ocpuset cpuset /sys/fs/cgroup/cpuset
+  cd /sys/fs/cgroup/cpuset
   mkdir Charlie
   cd Charlie
   /bin/echo 2-3 > cpuset.cpus
@@ -710,14 +710,14 @@ Creating, modifying, using the cpusets can be done through the cpuset
 virtual filesystem.
 
 To mount it, type:
-# mount -t cgroup -o cpuset cpuset /dev/cpuset
+# mount -t cgroup -o cpuset cpuset /sys/fs/cgroup/cpuset
 
-Then under /dev/cpuset you can find a tree that corresponds to the
-tree of the cpusets in the system. For instance, /dev/cpuset
+Then under /sys/fs/cgroup/cpuset you can find a tree that corresponds to the
+tree of the cpusets in the system. For instance, /sys/fs/cgroup/cpuset
 is the cpuset that holds the whole system.
 
-If you want to create a new cpuset under /dev/cpuset:
-# cd /dev/cpuset
+If you want to create a new cpuset under /sys/fs/cgroup/cpuset:
+# cd /sys/fs/cgroup/cpuset
 # mkdir my_cpuset
 
 Now you want to do something with this cpuset.
@@ -765,12 +765,12 @@ wrapper around the cgroup filesystem.
 
 The command
 
-mount -t cpuset X /dev/cpuset
+mount -t cpuset X /sys/fs/cgroup/cpuset
 
 is equivalent to
 
-mount -t cgroup -ocpuset,noprefix X /dev/cpuset
-echo "/sbin/cpuset_release_agent" > /dev/cpuset/release_agent
+mount -t cgroup -ocpuset,noprefix X /sys/fs/cgroup/cpuset
+echo "/sbin/cpuset_release_agent" > /sys/fs/cgroup/cpuset/release_agent
 
 2.2 Adding/removing cpus
 ------------------------
index 57ca4c89fe5c089aa6d8b9050fa5b2597e4f62b3..16624a7f82224d2f79d2a7aa2999ebe66c063baa 100644 (file)
@@ -22,16 +22,16 @@ removed from the child(ren).
 An entry is added using devices.allow, and removed using
 devices.deny.  For instance
 
-       echo 'c 1:3 mr' > /cgroups/1/devices.allow
+       echo 'c 1:3 mr' > /sys/fs/cgroup/1/devices.allow
 
 allows cgroup 1 to read and mknod the device usually known as
 /dev/null.  Doing
 
-       echo a > /cgroups/1/devices.deny
+       echo a > /sys/fs/cgroup/1/devices.deny
 
 will remove the default 'a *:* rwm' entry. Doing
 
-       echo a > /cgroups/1/devices.allow
+       echo a > /sys/fs/cgroup/1/devices.allow
 
 will add the 'a *:* rwm' entry to the whitelist.
 
index 41f37fea1276839b80cd4c220af27963f52c6a1d..c21d77742a0799424b09466857681ddcc7100f8b 100644 (file)
@@ -59,28 +59,28 @@ is non-freezable.
 
 * Examples of usage :
 
-   # mkdir /containers
-   # mount -t cgroup -ofreezer freezer  /containers
-   # mkdir /containers/0
-   # echo $some_pid > /containers/0/tasks
+   # mkdir /sys/fs/cgroup/freezer
+   # mount -t cgroup -ofreezer freezer /sys/fs/cgroup/freezer
+   # mkdir /sys/fs/cgroup/freezer/0
+   # echo $some_pid > /sys/fs/cgroup/freezer/0/tasks
 
 to get status of the freezer subsystem :
 
-   # cat /containers/0/freezer.state
+   # cat /sys/fs/cgroup/freezer/0/freezer.state
    THAWED
 
 to freeze all tasks in the container :
 
-   # echo FROZEN > /containers/0/freezer.state
-   # cat /containers/0/freezer.state
+   # echo FROZEN > /sys/fs/cgroup/freezer/0/freezer.state
+   # cat /sys/fs/cgroup/freezer/0/freezer.state
    FREEZING
-   # cat /containers/0/freezer.state
+   # cat /sys/fs/cgroup/freezer/0/freezer.state
    FROZEN
 
 to unfreeze all tasks in the container :
 
-   # echo THAWED > /containers/0/freezer.state
-   # cat /containers/0/freezer.state
+   # echo THAWED > /sys/fs/cgroup/freezer/0/freezer.state
+   # cat /sys/fs/cgroup/freezer/0/freezer.state
    THAWED
 
 This is the basic mechanism which should do the right thing for user space task
index 7c163477fcd8f001fb217cc66e4c67812af907a7..06eb6d957c83097b85fd15e87e94b8ed7edfe1cf 100644 (file)
@@ -1,8 +1,8 @@
 Memory Resource Controller
 
-NOTE: The Memory Resource Controller has been generically been referred
-      to as the memory controller in this document. Do not confuse memory
-      controller used here with the memory controller that is used in hardware.
+NOTE: The Memory Resource Controller has generically been referred to as the
+      memory controller in this document. Do not confuse memory controller
+      used here with the memory controller that is used in hardware.
 
 (For editors)
 In this document:
@@ -70,6 +70,7 @@ Brief summary of control files.
                                 (See sysctl's vm.swappiness)
  memory.move_charge_at_immigrate # set/show controls of moving charges
  memory.oom_control             # set/show oom controls.
+ memory.numa_stat               # show the number of memory usage per numa node
 
 1. History
 
@@ -181,7 +182,7 @@ behind this approach is that a cgroup that aggressively uses a shared
 page will eventually get charged for it (once it is uncharged from
 the cgroup that brought it in -- this will happen on memory pressure).
 
-Exception: If CONFIG_CGROUP_CGROUP_MEM_RES_CTLR_SWAP is not used..
+Exception: If CONFIG_CGROUP_CGROUP_MEM_RES_CTLR_SWAP is not used.
 When you do swapoff and make swapped-out pages of shmem(tmpfs) to
 be backed into memory in force, charges for pages are accounted against the
 caller of swapoff rather than the users of shmem.
@@ -213,7 +214,7 @@ affecting global LRU, memory+swap limit is better than just limiting swap from
 OS point of view.
 
 * What happens when a cgroup hits memory.memsw.limit_in_bytes
-When a cgroup his memory.memsw.limit_in_bytes, it's useless to do swap-out
+When a cgroup hits memory.memsw.limit_in_bytes, it's useless to do swap-out
 in this cgroup. Then, swap-out will not be done by cgroup routine and file
 caches are dropped. But as mentioned above, global LRU can do swapout memory
 from it for sanity of the system's memory management state. You can't forbid
@@ -263,16 +264,17 @@ b. Enable CONFIG_RESOURCE_COUNTERS
 c. Enable CONFIG_CGROUP_MEM_RES_CTLR
 d. Enable CONFIG_CGROUP_MEM_RES_CTLR_SWAP (to use swap extension)
 
-1. Prepare the cgroups
-# mkdir -p /cgroups
-# mount -t cgroup none /cgroups -o memory
+1. Prepare the cgroups (see cgroups.txt, Why are cgroups needed?)
+# mount -t tmpfs none /sys/fs/cgroup
+# mkdir /sys/fs/cgroup/memory
+# mount -t cgroup none /sys/fs/cgroup/memory -o memory
 
 2. Make the new group and move bash into it
-# mkdir /cgroups/0
-# echo $$ > /cgroups/0/tasks
+# mkdir /sys/fs/cgroup/memory/0
+# echo $$ > /sys/fs/cgroup/memory/0/tasks
 
 Since now we're in the 0 cgroup, we can alter the memory limit:
-# echo 4M > /cgroups/0/memory.limit_in_bytes
+# echo 4M > /sys/fs/cgroup/memory/0/memory.limit_in_bytes
 
 NOTE: We can use a suffix (k, K, m, M, g or G) to indicate values in kilo,
 mega or gigabytes. (Here, Kilo, Mega, Giga are Kibibytes, Mebibytes, Gibibytes.)
@@ -280,11 +282,11 @@ mega or gigabytes. (Here, Kilo, Mega, Giga are Kibibytes, Mebibytes, Gibibytes.)
 NOTE: We can write "-1" to reset the *.limit_in_bytes(unlimited).
 NOTE: We cannot set limits on the root cgroup any more.
 
-# cat /cgroups/0/memory.limit_in_bytes
+# cat /sys/fs/cgroup/memory/0/memory.limit_in_bytes
 4194304
 
 We can check the usage:
-# cat /cgroups/0/memory.usage_in_bytes
+# cat /sys/fs/cgroup/memory/0/memory.usage_in_bytes
 1216512
 
 A successful write to this file does not guarantee a successful set of
@@ -464,6 +466,24 @@ value for efficient access. (Of course, when necessary, it's synchronized.)
 If you want to know more exact memory usage, you should use RSS+CACHE(+SWAP)
 value in memory.stat(see 5.2).
 
+5.6 numa_stat
+
+This is similar to numa_maps but operates on a per-memcg basis.  This is
+useful for providing visibility into the numa locality information within
+an memcg since the pages are allowed to be allocated from any physical
+node.  One of the usecases is evaluating application performance by
+combining this information with the application's cpu allocation.
+
+We export "total", "file", "anon" and "unevictable" pages per-node for
+each memcg.  The ouput format of memory.numa_stat is:
+
+total=<total pages> N0=<node 0 pages> N1=<node 1 pages> ...
+file=<total file pages> N0=<node 0 pages> N1=<node 1 pages> ...
+anon=<total anon pages> N0=<node 0 pages> N1=<node 1 pages> ...
+unevictable=<total anon pages> N0=<node 0 pages> N1=<node 1 pages> ...
+
+And we have total = file + anon + unevictable.
+
 6. Hierarchy support
 
 The memory controller supports a deep hierarchy and hierarchical accounting.
@@ -471,13 +491,13 @@ The hierarchy is created by creating the appropriate cgroups in the
 cgroup filesystem. Consider for example, the following cgroup filesystem
 hierarchy
 
-               root
+              root
             /  |   \
-             |    \
-         a     b       c
-                       | \
-                       |  \
-                       d   e
+            /  |    \
+          a    b     c
+                     | \
+                     |  \
+                     d   e
 
 In the diagram above, with hierarchical accounting enabled, all memory
 usage of e, is accounted to its ancestors up until the root (i.e, c and root),
index 1a9446b59153d8e4dd9f2bc787714a8179eb5bb4..72e238465b0b6ca452ee4905dc097007b9ff3dc0 100644 (file)
@@ -481,23 +481,6 @@ Who:       FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
 
 ----------------------------
 
-What:   namespace cgroup (ns_cgroup)
-When:   2.6.38
-Why:    The ns_cgroup leads to some problems:
-       * cgroup creation is out-of-control
-       * cgroup name can conflict when pids are looping
-       * it is not possible to have a single process handling
-       a lot of namespaces without falling in a exponential creation time
-       * we may want to create a namespace without creating a cgroup
-
-       The ns_cgroup is replaced by a compatibility flag 'clone_children',
-       where a newly created cgroup will copy the parent cgroup values.
-       The userspace has to manually create a cgroup and add a task to
-       the 'tasks' file.
-Who:    Daniel Lezcano <daniel.lezcano@free.fr>
-
-----------------------------
-
 What:  iwlwifi disable_hw_scan module parameters
 When:  2.6.40
 Why:   Hareware scan is the prefer method for iwlwifi devices for
index f48178024067fd48fc3454806bdaa366c64dd966..db3b1aba32a3f9c0d80ce0cde2d8b6f1943f4dea 100644 (file)
@@ -843,6 +843,7 @@ Provides counts of softirq handlers serviced since boot time, for each cpu.
  TASKLET:          0          0          0        290
    SCHED:      27035      26983      26971      26746
  HRTIMER:          0          0          0          0
+     RCU:       1678       1769       2178       2250
 
 
 1.3 IDE devices in /proc/ide
index d9a203b058f18cf4c99fb98a298c373ed7b162ca..fd248a318211a189163f2c42ba911a44284ea0bc 100644 (file)
@@ -2598,6 +2598,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                                        unlock ejectable media);
                                m = MAX_SECTORS_64 (don't transfer more
                                        than 64 sectors = 32 KB at a time);
+                               n = INITIAL_READ10 (force a retry of the
+                                       initial READ(10) command);
                                o = CAPACITY_OK (accept the capacity
                                        reported by the device);
                                r = IGNORE_RESIDUE (the device reports
index 090e6ee04536285398a81257e1f9a1f9d34e398d..51063e681ca4f8cc0b628d2a46503ad1762aeaf5 100644 (file)
@@ -11,7 +11,9 @@ with the difference that the orphan objects are not freed but only
 reported via /sys/kernel/debug/kmemleak. A similar method is used by the
 Valgrind tool (memcheck --leak-check) to detect the memory leaks in
 user-space applications.
-Kmemleak is supported on x86, arm, powerpc, sparc, sh, microblaze and tile.
+
+Please check DEBUG_KMEMLEAK dependencies in lib/Kconfig.debug for supported
+architectures.
 
 Usage
 -----
index 2366b1c8cf19492f52480669449ecd2b0a0d0f34..f0eee83ff78a61801e7d0cf293a2090180f208e8 100644 (file)
@@ -555,7 +555,7 @@ also have
    sync_min
    sync_max
      The two values, given as numbers of sectors, indicate a range
-     withing the array where 'check'/'repair' will operate. Must be
+     within the array where 'check'/'repair' will operate. Must be
      a multiple of chunk_size. When it reaches "sync_max" it will
      pause, rather than complete.
      You can use 'select' or 'poll' on "sync_completed" to wait for
index 1b5a5ddbc3ef9a5211f3e7eeb487e524ac6ac0ac..5df176ed59b826e8cbeaca7c96d6ed6d06759fa8 100644 (file)
@@ -9,7 +9,121 @@ If variable is of Type,               use printk format specifier:
                size_t                  %zu or %zx
                ssize_t                 %zd or %zx
 
-Raw pointer value SHOULD be printed with %p.
+Raw pointer value SHOULD be printed with %p. The kernel supports
+the following extended format specifiers for pointer types:
+
+Symbols/Function Pointers:
+
+       %pF     versatile_init+0x0/0x110
+       %pf     versatile_init
+       %pS     versatile_init+0x0/0x110
+       %ps     versatile_init
+       %pB     prev_fn_of_versatile_init+0x88/0x88
+
+       For printing symbols and function pointers. The 'S' and 's' specifiers
+       result in the symbol name with ('S') or without ('s') offsets. Where
+       this is used on a kernel without KALLSYMS - the symbol address is
+       printed instead.
+
+       The 'B' specifier results in the symbol name with offsets and should be
+       used when printing stack backtraces. The specifier takes into
+       consideration the effect of compiler optimisations which may occur
+       when tail-call's are used and marked with the noreturn GCC attribute.
+
+       On ia64, ppc64 and parisc64 architectures function pointers are
+       actually function descriptors which must first be resolved. The 'F' and
+       'f' specifiers perform this resolution and then provide the same
+       functionality as the 'S' and 's' specifiers.
+
+Kernel Pointers:
+
+       %pK     0x01234567 or 0x0123456789abcdef
+
+       For printing kernel pointers which should be hidden from unprivileged
+       users. The behaviour of %pK depends on the kptr_restrict sysctl - see
+       Documentation/sysctl/kernel.txt for more details.
+
+Struct Resources:
+
+       %pr     [mem 0x60000000-0x6fffffff flags 0x2200] or
+               [mem 0x0000000060000000-0x000000006fffffff flags 0x2200]
+       %pR     [mem 0x60000000-0x6fffffff pref] or
+               [mem 0x0000000060000000-0x000000006fffffff pref]
+
+       For printing struct resources. The 'R' and 'r' specifiers result in a
+       printed resource with ('R') or without ('r') a decoded flags member.
+
+MAC/FDDI addresses:
+
+       %pM     00:01:02:03:04:05
+       %pMF    00-01-02-03-04-05
+       %pm     000102030405
+
+       For printing 6-byte MAC/FDDI addresses in hex notation. The 'M' and 'm'
+       specifiers result in a printed address with ('M') or without ('m') byte
+       separators. The default byte separator is the colon (':').
+
+       Where FDDI addresses are concerned the 'F' specifier can be used after
+       the 'M' specifier to use dash ('-') separators instead of the default
+       separator.
+
+IPv4 addresses:
+
+       %pI4    1.2.3.4
+       %pi4    001.002.003.004
+       %p[Ii][hnbl]
+
+       For printing IPv4 dot-separated decimal addresses. The 'I4' and 'i4'
+       specifiers result in a printed address with ('i4') or without ('I4')
+       leading zeros.
+
+       The additional 'h', 'n', 'b', and 'l' specifiers are used to specify
+       host, network, big or little endian order addresses respectively. Where
+       no specifier is provided the default network/big endian order is used.
+
+IPv6 addresses:
+
+       %pI6    0001:0002:0003:0004:0005:0006:0007:0008
+       %pi6    00010002000300040005000600070008
+       %pI6c   1:2:3:4:5:6:7:8
+
+       For printing IPv6 network-order 16-bit hex addresses. The 'I6' and 'i6'
+       specifiers result in a printed address with ('I6') or without ('i6')
+       colon-separators. Leading zeros are always used.
+
+       The additional 'c' specifier can be used with the 'I' specifier to
+       print a compressed IPv6 address as described by
+       http://tools.ietf.org/html/rfc5952
+
+UUID/GUID addresses:
+
+       %pUb    00010203-0405-0607-0809-0a0b0c0d0e0f
+       %pUB    00010203-0405-0607-0809-0A0B0C0D0E0F
+       %pUl    03020100-0504-0706-0809-0a0b0c0e0e0f
+       %pUL    03020100-0504-0706-0809-0A0B0C0E0E0F
+
+       For printing 16-byte UUID/GUIDs addresses. The additional 'l', 'L',
+       'b' and 'B' specifiers are used to specify a little endian order in
+       lower ('l') or upper case ('L') hex characters - and big endian order
+       in lower ('b') or upper case ('B') hex characters.
+
+       Where no additional specifiers are used the default little endian
+       order with lower case hex characters will be printed.
+
+struct va_format:
+
+       %pV
+
+       For printing struct va_format structures. These contain a format string
+       and va_list as follows:
+
+       struct va_format {
+               const char *fmt;
+               va_list *va;
+       };
+
+       Do not use this feature without some mechanism to verify the
+       correctness of the format string and va_list arguments.
 
 u64 SHOULD be printed with %llu/%llx, (unsigned long long):
 
@@ -32,4 +146,5 @@ Reminder: sizeof() result is of type size_t.
 Thank you for your cooperation and attention.
 
 
-By Randy Dunlap <rdunlap@xenotime.net>
+By Randy Dunlap <rdunlap@xenotime.net> and
+Andrew Murray <amurray@mpc-data.co.uk>
index 99961993257a9f9cf358ca9641577e4b5b1699ff..91ecff07cede7dfbb4710273bd9824030a805258 100644 (file)
@@ -223,9 +223,10 @@ When CONFIG_FAIR_GROUP_SCHED is defined, a "cpu.shares" file is created for each
 group created using the pseudo filesystem.  See example steps below to create
 task groups and modify their CPU share using the "cgroups" pseudo filesystem.
 
-       # mkdir /dev/cpuctl
-       # mount -t cgroup -ocpu none /dev/cpuctl
-       # cd /dev/cpuctl
+       # mount -t tmpfs cgroup_root /sys/fs/cgroup
+       # mkdir /sys/fs/cgroup/cpu
+       # mount -t cgroup -ocpu none /sys/fs/cgroup/cpu
+       # cd /sys/fs/cgroup/cpu
 
        # mkdir multimedia      # create "multimedia" group of tasks
        # mkdir browser         # create "browser" group of tasks
index 605b0d40329d843f6c3b838cd4afa5e38438d31e..71b54d54998731ebcfe2c451c15066620b384aaa 100644 (file)
@@ -129,9 +129,8 @@ priority!
 Enabling CONFIG_RT_GROUP_SCHED lets you explicitly allocate real
 CPU bandwidth to task groups.
 
-This uses the /cgroup virtual file system and
-"/cgroup/<cgroup>/cpu.rt_runtime_us" to control the CPU time reserved for each
-control group.
+This uses the cgroup virtual file system and "<cgroup>/cpu.rt_runtime_us"
+to control the CPU time reserved for each control group.
 
 For more information on working with control groups, you should read
 Documentation/cgroups/cgroups.txt as well.
@@ -150,7 +149,7 @@ For now, this can be simplified to just the following (but see Future plans):
 ===============
 
 There is work in progress to make the scheduling period for each group
-("/cgroup/<cgroup>/cpu.rt_period_us") configurable as well.
+("<cgroup>/cpu.rt_period_us") configurable as well.
 
 The constraint on the period is that a subgroup must have a smaller or
 equal period to its parent. But realistically its not very useful _yet_
index 12f9ba20ccb7aebd72aab882f9b6d6d7d9b4a135..550068466605b99e8beecca7a1b4a03788f2829a 100644 (file)
@@ -129,12 +129,12 @@ Limit injection to pages owned by memgroup. Specified by inode number
 of the memcg.
 
 Example:
-        mkdir /cgroup/hwpoison
+        mkdir /sys/fs/cgroup/mem/hwpoison
 
         usemem -m 100 -s 1000 &
-        echo `jobs -p` > /cgroup/hwpoison/tasks
+        echo `jobs -p` > /sys/fs/cgroup/mem/hwpoison/tasks
 
-        memcg_ino=$(ls -id /cgroup/hwpoison | cut -f1 -d' ')
+        memcg_ino=$(ls -id /sys/fs/cgroup/mem/hwpoison | cut -f1 -d' ')
         echo $memcg_ino > /debug/hwpoison/corrupt-filter-memcg
 
         page-types -p `pidof init`   --hwpoison  # shall do nothing
index 29801f760b6ff59c74a7e94ae05210ee437bfc8e..b12b8c13f53a7e0b6972ad6ad695f59046408727 100644 (file)
@@ -1739,7 +1739,7 @@ S:        Supported
 F:     drivers/net/enic/
 
 CIRRUS LOGIC EP93XX ETHERNET DRIVER
-M:     Lennert Buytenhek <kernel@wantstofly.org>
+M:     Hartley Sweeten <hsweeten@visionengravers.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/arm/ep93xx_eth.c
@@ -1889,7 +1889,6 @@ L:        cpufreq@vger.kernel.org
 W:     http://www.codemonkey.org.uk/projects/cpufreq/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq.git
 S:     Maintained
-F:     arch/x86/kernel/cpu/cpufreq/
 F:     drivers/cpufreq/
 F:     include/linux/cpufreq.h
 
@@ -3820,6 +3819,12 @@ S:       Maintained
 F:     drivers/leds/
 F:     include/linux/leds.h
 
+LEGACY EEPROM DRIVER
+M:     Jean Delvare <khali@linux-fr.org>
+S:     Maintained
+F:     Documentation/misc-devices/eeprom
+F:     drivers/misc/eeprom/eeprom.c
+
 LEGO USB Tower driver
 M:     Juergen Stuber <starblue@users.sourceforge.net>
 L:     legousb-devel@lists.sourceforge.net
@@ -4145,7 +4150,7 @@ F:        include/linux/mm.h
 F:     mm/
 
 MEMORY RESOURCE CONTROLLER
-M:     Balbir Singh <balbir@linux.vnet.ibm.com>
+M:     Balbir Singh <bsingharora@gmail.com>
 M:     Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
 M:     KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
 L:     linux-mm@kvack.org
@@ -4252,8 +4257,7 @@ F:        drivers/mmc/
 F:     include/linux/mmc/
 
 MULTIMEDIA CARD (MMC) ETC. OVER SPI
-M:     David Brownell <dbrownell@users.sourceforge.net>
-S:     Odd Fixes
+S:     Orphan
 F:     drivers/mmc/host/mmc_spi.c
 F:     include/linux/spi/mmc_spi.h
 
@@ -4603,7 +4607,6 @@ F:        drivers/media/video/omap3isp/*
 
 OMAP USB SUPPORT
 M:     Felipe Balbi <balbi@ti.com>
-M:     David Brownell <dbrownell@users.sourceforge.net>
 L:     linux-usb@vger.kernel.org
 L:     linux-omap@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
@@ -4892,7 +4895,7 @@ F:        mm/percpu*.c
 F:     arch/*/include/asm/percpu.h
 
 PER-TASK DELAY ACCOUNTING
-M:     Balbir Singh <balbir@linux.vnet.ibm.com>
+M:     Balbir Singh <bsingharora@gmail.com>
 S:     Maintained
 F:     include/linux/delayacct.h
 F:     kernel/delayacct.c
@@ -4947,6 +4950,7 @@ T:        git git://git.kernel.org/pub/scm/linux/kernel/git/epip/linux-2.6-unicore32.gi
 F:     drivers/input/serio/i8042-unicore32io.h
 F:     drivers/i2c/busses/i2c-puv3.c
 F:     drivers/video/fb-puv3.c
+F:     drivers/rtc/rtc-puv3.c
 
 PMC SIERRA MaxRAID DRIVER
 M:     Anil Ravindranath <anil_ravindranath@pmc-sierra.com>
@@ -5984,7 +5988,6 @@ F:        Documentation/serial/specialix.txt
 F:     drivers/staging/tty/specialix*
 
 SPI SUBSYSTEM
-M:     David Brownell <dbrownell@users.sourceforge.net>
 M:     Grant Likely <grant.likely@secretlab.ca>
 L:     spi-devel-general@lists.sourceforge.net
 Q:     http://patchwork.kernel.org/project/spi-devel-general/list/
@@ -6100,7 +6103,7 @@ F:        include/target/
 F:     Documentation/target/
 
 TASKSTATS STATISTICS INTERFACE
-M:     Balbir Singh <balbir@linux.vnet.ibm.com>
+M:     Balbir Singh <bsingharora@gmail.com>
 S:     Maintained
 F:     Documentation/accounting/taskstats*
 F:     include/linux/taskstats*
@@ -6432,9 +6435,8 @@ S:        Maintained
 F:     drivers/usb/misc/rio500*
 
 USB EHCI DRIVER
-M:     David Brownell <dbrownell@users.sourceforge.net>
 L:     linux-usb@vger.kernel.org
-S:     Odd Fixes
+S:     Orphan
 F:     Documentation/usb/ehci.txt
 F:     drivers/usb/host/ehci*
 
@@ -6448,9 +6450,10 @@ S:       Maintained
 F:     drivers/media/video/et61x251/
 
 USB GADGET/PERIPHERAL SUBSYSTEM
-M:     David Brownell <dbrownell@users.sourceforge.net>
+M:     Felipe Balbi <balbi@ti.com>
 L:     linux-usb@vger.kernel.org
 W:     http://www.linux-usb.org/gadget
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
 S:     Maintained
 F:     drivers/usb/gadget/
 F:     include/linux/usb/gadget*
@@ -6460,7 +6463,7 @@ M:        Jiri Kosina <jkosina@suse.cz>
 L:     linux-usb@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git
 S:     Maintained
-F:     Documentation/usb/hiddev.txt
+F:     Documentation/hid/hiddev.txt
 F:     drivers/hid/usbhid/
 
 USB ISP116X DRIVER
@@ -6492,9 +6495,8 @@ S:        Maintained
 F:     sound/usb/midi.*
 
 USB OHCI DRIVER
-M:     David Brownell <dbrownell@users.sourceforge.net>
 L:     linux-usb@vger.kernel.org
-S:     Odd Fixes
+S:     Orphan
 F:     Documentation/usb/ohci.txt
 F:     drivers/usb/host/ohci*
 
@@ -6720,6 +6722,14 @@ S:       Maintained
 F:     Documentation/filesystems/vfat.txt
 F:     fs/fat/
 
+VIDEOBUF2 FRAMEWORK
+M:     Pawel Osciak <pawel@osciak.com>
+M:     Marek Szyprowski <m.szyprowski@samsung.com>
+L:     linux-media@vger.kernel.org
+S:     Maintained
+F:     drivers/media/video/videobuf2-*
+F:     include/media/videobuf2-*
+
 VIRTIO CONSOLE DRIVER
 M:     Amit Shah <amit.shah@redhat.com>
 L:     virtualization@lists.linux-foundation.org
@@ -6997,6 +7007,13 @@ T:       git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.
 S:     Maintained
 F:     drivers/platform/x86
 
+X86 MCE INFRASTRUCTURE
+M:     Tony Luck <tony.luck@intel.com>
+M:     Borislav Petkov <bp@amd64.org>
+L:     linux-edac@vger.kernel.org
+S:     Maintained
+F:     arch/x86/kernel/cpu/mcheck/*
+
 XEN HYPERVISOR INTERFACE
 M:     Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
 M:     Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
index afb8e0d26f2c6134828d5d63e557799e8ef6e199..0499c2ee8541f7dfde385a74ea27270be75e45a0 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc3
 NAME = Sneaky Weasel
 
 # *DOCUMENTATION*
@@ -378,7 +378,7 @@ KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
 
 # Read KERNELRELEASE from include/config/kernel.release (if it exists)
 KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
-KERNELVERSION = $(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
+KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION)
 
 export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION
 export ARCH SRCARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC
@@ -1005,7 +1005,7 @@ endef
 
 define filechk_version.h
        (echo \#define LINUX_VERSION_CODE $(shell                             \
-       expr $(VERSION) \* 65536 + $(PATCHLEVEL) \* 256 + $(SUBLEVEL));     \
+       expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 0$(SUBLEVEL));    \
        echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))';)
 endef
 
@@ -1110,11 +1110,6 @@ modules_install: _modinst_ _modinst_post
 
 PHONY += _modinst_
 _modinst_:
-       @if [ -z "`$(DEPMOD) -V 2>/dev/null | grep module-init-tools`" ]; then \
-               echo "Warning: you may need to install module-init-tools"; \
-               echo "See http://www.codemonkey.org.uk/docs/post-halloween-2.6.txt";\
-               sleep 1; \
-       fi
        @rm -rf $(MODLIB)/kernel
        @rm -f $(MODLIB)/source
        @mkdir -p $(MODLIB)/kernel
@@ -1531,12 +1526,8 @@ quiet_cmd_rmfiles = $(if $(wildcard $(rm-files)),CLEAN   $(wildcard $(rm-files))
 
 # Run depmod only if we have System.map and depmod is executable
 quiet_cmd_depmod = DEPMOD  $(KERNELRELEASE)
-      cmd_depmod = \
-       if [ -r System.map -a -x $(DEPMOD) ]; then                              \
-               $(DEPMOD) -ae -F System.map                                     \
-               $(if $(strip $(INSTALL_MOD_PATH)), -b $(INSTALL_MOD_PATH) )     \
-               $(KERNELRELEASE);                                               \
-       fi
+      cmd_depmod = $(CONFIG_SHELL) $(srctree)/scripts/depmod.sh $(DEPMOD) \
+                   $(KERNELRELEASE)
 
 # Create temporary dir for module support files
 # clean it up only when building all modules
index 376f2213079190f65196b0e3f6554f0f55834183..326f0a2d56e52922920aad724dfec0e78487986a 100644 (file)
@@ -409,7 +409,7 @@ SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen)
                return -EFAULT;
 
        len = namelen;
-       if (namelen > 32)
+       if (len > 32)
                len = 32;
 
        down_read(&uts_sem);
@@ -594,7 +594,7 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
        down_read(&uts_sem);
        res = sysinfo_table[offset];
        len = strlen(res)+1;
-       if (len > count)
+       if ((unsigned long)len > (unsigned long)count)
                len = count;
        if (copy_to_user(buf, res, len))
                err = -EFAULT;
@@ -649,7 +649,7 @@ SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer,
                return 1;
 
        case GSI_GET_HWRPB:
-               if (nbytes < sizeof(*hwrpb))
+               if (nbytes > sizeof(*hwrpb))
                        return -EINVAL;
                if (copy_to_user(buffer, hwrpb, nbytes) != 0)
                        return -EFAULT;
@@ -1008,6 +1008,7 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
 {
        struct rusage r;
        long ret, err;
+       unsigned int status = 0;
        mm_segment_t old_fs;
 
        if (!ur)
@@ -1016,13 +1017,15 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
        old_fs = get_fs();
                
        set_fs (KERNEL_DS);
-       ret = sys_wait4(pid, ustatus, options, (struct rusage __user *) &r);
+       ret = sys_wait4(pid, (unsigned int __user *) &status, options,
+                       (struct rusage __user *) &r);
        set_fs (old_fs);
 
        if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur)))
                return -EFAULT;
 
        err = 0;
+       err |= put_user(status, ustatus);
        err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec);
        err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec);
        err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);
index f9da41921c521e216f07816eb5cab99c041f4845..942fad97e4472c8e27b98068efaafc1cc518787e 100644 (file)
@@ -691,9 +691,9 @@ proc_types:
 
                .word   0x41069260              @ ARM926EJ-S (v5TEJ)
                .word   0xff0ffff0
-               b       __arm926ejs_mmu_cache_on
-               b       __armv4_mmu_cache_off
-               b       __armv5tej_mmu_cache_flush
+               W(b)    __arm926ejs_mmu_cache_on
+               W(b)    __armv4_mmu_cache_off
+               W(b)    __armv5tej_mmu_cache_flush
 
                .word   0x00007000              @ ARM7 IDs
                .word   0x0000f000
index 889922ad229c31e8419c7f211cb5ef481e4d612a..67b5abb6f8576e3fa47d248ecea51facf7f7b5dc 100644 (file)
@@ -157,7 +157,7 @@ CONFIG_LEDS_GPIO=m
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=m
 CONFIG_LEDS_TRIGGER_HEARTBEAT=m
-CONFIG_RTC_CLASS=m
+CONFIG_RTC_CLASS=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 CONFIG_XFS_FS=m
index 316af5479d90b0cec88b479d70f5991155d5497e..9c0ad7993986f340952c2a74352e3d24e75c1a01 100644 (file)
@@ -60,7 +60,7 @@ CONFIG_FB_ARMCLCD=y
 # CONFIG_VGA_CONSOLE is not set
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
-CONFIG_RTC_CLASS=m
+CONFIG_RTC_CLASS=y
 CONFIG_INOTIFY=y
 CONFIG_TMPFS=y
 CONFIG_JFFS2_FS=y
index 8b0c717378faa6c9596b9c4b22a4d9dea1b9da65..1d01ddd33122d76fa7db12e989ed0ac09e2293d7 100644 (file)
@@ -142,7 +142,7 @@ CONFIG_USB_GADGETFS=m
 CONFIG_USB_FILE_STORAGE=m
 CONFIG_USB_G_SERIAL=m
 CONFIG_USB_G_PRINTER=m
-CONFIG_RTC_CLASS=m
+CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_DS1307=m
 CONFIG_RTC_DRV_SA1100=m
 CONFIG_EXT2_FS=m
index 5b5504143647fe23fafb93e0a7df67b9b5b7f8c3..721832ffe2d728476ff5395333d23795efcfba8a 100644 (file)
@@ -73,7 +73,7 @@ CONFIG_SENSORS_MAX6650=m
 # CONFIG_VGA_CONSOLE is not set
 # CONFIG_HID_SUPPORT is not set
 # CONFIG_USB_SUPPORT is not set
-CONFIG_RTC_CLASS=m
+CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_SA1100=m
 CONFIG_DMADEVICES=y
 # CONFIG_DNOTIFY is not set
index 960f65514d88161389798236bb26ecd4c704b0cd..59577ad3f4efdfda65e65753054673a32614e80f 100644 (file)
@@ -158,7 +158,7 @@ CONFIG_LEDS_TRIGGER_HEARTBEAT=m
 CONFIG_LEDS_TRIGGER_BACKLIGHT=m
 CONFIG_LEDS_TRIGGER_GPIO=m
 CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
-CONFIG_RTC_CLASS=m
+CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_ISL1208=m
 CONFIG_RTC_DRV_PXA=m
 CONFIG_EXT2_FS=y
index a701e4226a6c85f804ce13a627de3d6d82127464..0cdd7b456cb2a072ce3902f2a6d7934967b083d7 100644 (file)
@@ -76,6 +76,9 @@ struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
        unsigned long dt_root;
        const char *model;
 
+       if (!dt_phys)
+               return NULL;
+
        devtree = phys_to_virt(dt_phys);
 
        /* check device tree validity */
index e8d88567680718167cf2fe389dbe4af3e0155887..90c62cd51ca9ffc891f7c95336b1c2017097d512 100644 (file)
@@ -435,6 +435,10 @@ __irq_usr:
        usr_entry
        kuser_cmpxchg_check
 
+#ifdef CONFIG_IRQSOFF_TRACER
+       bl      trace_hardirqs_off
+#endif
+
        get_thread_info tsk
 #ifdef CONFIG_PREEMPT
        ldr     r8, [tsk, #TI_PREEMPT]          @ get preempt count
@@ -453,7 +457,7 @@ __irq_usr:
 #endif
 
        mov     why, #0
-       b       ret_to_user
+       b       ret_to_user_from_irq
  UNWIND(.fnend         )
 ENDPROC(__irq_usr)
 
index 1e7b04a40a3164f7226ce82907012acb231fec45..b2a27b6b0046ee6c1e0f0928c3269ab5f6c63cd2 100644 (file)
@@ -64,6 +64,7 @@ work_resched:
 ENTRY(ret_to_user)
 ret_slow_syscall:
        disable_irq                             @ disable interrupts
+ENTRY(ret_to_user_from_irq)
        ldr     r1, [tsk, #TI_FLAGS]
        tst     r1, #_TIF_WORK_MASK
        bne     work_pending
@@ -75,6 +76,7 @@ no_work_pending:
        arch_ret_to_user r1, lr
 
        restore_user_regs fast = 0, offset = 0
+ENDPROC(ret_to_user_from_irq)
 ENDPROC(ret_to_user)
 
 /*
index d52eec268b4746a006ef3badf6805b8acae5d7e1..6807cb1e76ddb119f78100aa3ccc7d59b1184721 100644 (file)
@@ -139,7 +139,7 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
        fs = get_fs();
        set_fs(KERNEL_DS);
 
-       for (i = -4; i < 1; i++) {
+       for (i = -4; i < 1 + !!thumb; i++) {
                unsigned int val, bad;
 
                if (thumb)
@@ -563,7 +563,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
                if (!pmd_present(*pmd))
                        goto bad_access;
                pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
-               if (!pte_present(*pte) || !pte_dirty(*pte)) {
+               if (!pte_present(*pte) || !pte_write(*pte) || !pte_dirty(*pte)) {
                        pte_unmap_unlock(pte, ptl);
                        goto bad_access;
                }
index 4e66881c7aee748dddf82a304fb261dc89edde97..fc4e98ea7543a397e4ece003b3ff9f9d498fd42f 100644 (file)
@@ -494,7 +494,7 @@ static struct platform_device da850_mcasp_device = {
        .resource       = da850_mcasp_resources,
 };
 
-struct platform_device davinci_pcm_device = {
+static struct platform_device davinci_pcm_device = {
        .name   = "davinci-pcm-audio",
        .id     = -1,
 };
index 8f4f736aa267104455fdf2f1502e467e63bd9aad..806a2f02b9808abf870ac9b9b0e1c77d99e60bb4 100644 (file)
@@ -298,7 +298,7 @@ static void davinci_init_wdt(void)
 
 /*-------------------------------------------------------------------------*/
 
-struct platform_device davinci_pcm_device = {
+static struct platform_device davinci_pcm_device = {
        .name           = "davinci-pcm-audio",
        .id             = -1,
 };
index a0b838894ac99b27daaf413fbfbd8f78ae842066..e7221398e5af9c751d1dc8f85c1d294b2afb0f9b 100644 (file)
@@ -252,9 +252,11 @@ static struct irq_chip gpio_irqchip = {
 static void
 gpio_irq_handler(unsigned irq, struct irq_desc *desc)
 {
-       struct davinci_gpio_regs __iomem *g = irq2regs(irq);
+       struct davinci_gpio_regs __iomem *g;
        u32 mask = 0xffff;
 
+       g = (__force struct davinci_gpio_regs __iomem *) irq_desc_get_handler_data(desc);
+
        /* we only care about one bank */
        if (irq & 1)
                mask <<= 16;
@@ -422,8 +424,7 @@ static int __init davinci_gpio_irq_setup(void)
 
                /* set up all irqs in this bank */
                irq_set_chained_handler(bank_irq, gpio_irq_handler);
-               irq_set_chip_data(bank_irq, (__force void *)g);
-               irq_set_handler_data(bank_irq, (void *)irq);
+               irq_set_handler_data(bank_irq, (__force void *)g);
 
                for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) {
                        irq_set_chip(irq, &gpio_irqchip);
index 82079545adc4e8b788e98509cc8e77c7a5e95508..1d4b65fd673eb23e9d2ac2fd9dbded0f2e8ba6d9 100644 (file)
@@ -402,11 +402,15 @@ static struct resource ep93xx_eth_resource[] = {
        }
 };
 
+static u64 ep93xx_eth_dma_mask = DMA_BIT_MASK(32);
+
 static struct platform_device ep93xx_eth_device = {
        .name           = "ep93xx-eth",
        .id             = -1,
        .dev            = {
-               .platform_data  = &ep93xx_eth_data,
+               .platform_data          = &ep93xx_eth_data,
+               .coherent_dma_mask      = DMA_BIT_MASK(32),
+               .dma_mask               = &ep93xx_eth_dma_mask,
        },
        .num_resources  = ARRAY_SIZE(ep93xx_eth_resource),
        .resource       = ep93xx_eth_resource,
index b92c1e557145d5184df2a3eb2c4d908ff3079c25..1435fc31c4b29e55e2209050f0f51eab21fbb390 100644 (file)
@@ -91,6 +91,11 @@ config EXYNOS4_SETUP_FIMC
        help
          Common setup code for the camera interfaces.
 
+config EXYNOS4_SETUP_USB_PHY
+       bool
+       help
+         Common setup code for USB PHY controller
+
 # machine support
 
 menu "EXYNOS4 Machines"
@@ -176,6 +181,7 @@ config MACH_NURI
        select EXYNOS4_SETUP_I2C3
        select EXYNOS4_SETUP_I2C5
        select EXYNOS4_SETUP_SDHCI
+       select EXYNOS4_SETUP_USB_PHY
        select SAMSUNG_DEV_PWM
        help
          Machine support for Samsung Mobile NURI Board.
index a9bb94fabaa71c3ca31e823d9ad87aa409f0bfb2..60fe5ecf359963ec365abb7f06b4a160b65cd2cb 100644 (file)
@@ -56,4 +56,4 @@ obj-$(CONFIG_EXYNOS4_SETUP_KEYPAD)    += setup-keypad.o
 obj-$(CONFIG_EXYNOS4_SETUP_SDHCI)      += setup-sdhci.o
 obj-$(CONFIG_EXYNOS4_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o
 
-obj-$(CONFIG_USB_SUPPORT)              += usb-phy.o
+obj-$(CONFIG_EXYNOS4_SETUP_USB_PHY)    += setup-usb-phy.o
index 08813a6f66b13f9df62f570524f3de592c19a022..9babe4473e8893614e2cddef1ebfd1855f9e94c2 100644 (file)
@@ -98,7 +98,7 @@ static struct map_desc exynos4_iodesc[] __initdata = {
                .length         = SZ_4K,
                .type           = MT_DEVICE,
        }, {
-               .virtual        = (unsigned long)S5P_VA_USB_HSPHY,
+               .virtual        = (unsigned long)S3C_VA_USB_HSPHY,
                .pfn            = __phys_to_pfn(EXYNOS4_PA_HSPHY),
                .length         = SZ_4K,
                .type           = MT_DEVICE,
index 703118d5173c0e4125f47305d946f2603474add5..c337cf3a71bf35e39bf4780f481a36a1c54db9aa 100644 (file)
@@ -11,7 +11,7 @@
 #ifndef __PLAT_S5P_REGS_USB_PHY_H
 #define __PLAT_S5P_REGS_USB_PHY_H
 
-#define EXYNOS4_HSOTG_PHYREG(x)                ((x) + S5P_VA_USB_HSPHY)
+#define EXYNOS4_HSOTG_PHYREG(x)                ((x) + S3C_VA_USB_HSPHY)
 
 #define EXYNOS4_PHYPWR                 EXYNOS4_HSOTG_PHYREG(0x00)
 #define PHY1_HSIC_NORMAL_MASK          (0xf << 9)
index 86b9fa0d3639e1d69278e22a72ee3d188718a14a..ebb8f38d54059dbadb2cc32679d91fce3dbd348c 100644 (file)
@@ -206,6 +206,7 @@ static cycle_t exynos4_pwm4_read(struct clocksource *cs)
        return (cycle_t) ~__raw_readl(S3C_TIMERREG(0x40));
 }
 
+#ifdef CONFIG_PM
 static void exynos4_pwm4_resume(struct clocksource *cs)
 {
        unsigned long pclk;
@@ -218,6 +219,7 @@ static void exynos4_pwm4_resume(struct clocksource *cs)
        exynos4_pwm_init(4, ~0);
        exynos4_pwm_start(4, 1);
 }
+#endif
 
 struct clocksource pwm_clocksource = {
        .name           = "pwm_timer4",
index 5f1f9867fc70eedff4620119fead758eb7c0e2b2..121ad1d4fa39f9a0d4f70c6cbd23021f4e62617f 100644 (file)
@@ -103,6 +103,7 @@ static void __init footbridge_timer_init(void)
        clockevents_calc_mult_shift(ce, mem_fclk_21285, 5);
        ce->max_delta_ns = clockevent_delta2ns(0xffffff, ce);
        ce->min_delta_ns = clockevent_delta2ns(0x000004, ce);
+       ce->cpumask = cpumask_of(smp_processor_id());
 
        clockevents_register_device(ce);
 }
index 30b971d65815f342069719afca2052769ac99ba9..1be2eeb7a0a042198a73ec78a3b33278c25e533b 100644 (file)
@@ -26,6 +26,7 @@
 #include <asm/hardware/debug-8250.S>
 
 #else
+#include <mach/hardware.h>
        /* For EBSA285 debugging */
                .equ    dc21285_high, ARMCSR_BASE & 0xff000000
                .equ    dc21285_low,  ARMCSR_BASE & 0x00ffffff
@@ -36,8 +37,8 @@
                .else
                mov     \rp, #0
                .endif
-               orr     \rv, \rp, #0x42000000
-               orr     \rp, \rp, #dc21285_high
+               orr     \rv, \rp, #dc21285_high
+               orr     \rp, \rp, #0x42000000
                .endm
 
                .macro  senduart,rd,rx
index 65157a35dbba351d6751bdcb4ee483e37b25ee88..54add60f94c98c5d8d15c119e996395a13b6fc86 100644 (file)
@@ -16,6 +16,8 @@
 #include <linux/err.h>
 #include <linux/mutex.h>
 
+#include <asm/processor.h>     /* for cpu_relax() */
+
 #include <mach/mxs.h>
 
 #define OCOTP_WORD_OFFSET              0x20
index af98117043d214f8e088e8fd1429656410300a14..5b114d1558c83f41fe8acc524b1aa86f2ffa4a54 100644 (file)
@@ -4,14 +4,14 @@
 
 # Common support
 obj-y := io.o id.o sram.o time.o irq.o mux.o flash.o serial.o devices.o dma.o
-obj-y += clock.o clock_data.o opp_data.o reset.o
+obj-y += clock.o clock_data.o opp_data.o reset.o pm_bus.o
 
 obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
 
 obj-$(CONFIG_OMAP_32K_TIMER)   += timer32k.o
 
 # Power Management
-obj-$(CONFIG_PM) += pm.o sleep.o pm_bus.o
+obj-$(CONFIG_PM) += pm.o sleep.o
 
 # DSP
 obj-$(CONFIG_OMAP_MBOX_FWK)    += mailbox_mach.o
index d8559344c6e2927581d19663de05e0b23c10c849..f5a52204b89fa8e85908ab8f2566594575324027 100644 (file)
@@ -284,14 +284,15 @@ static int __init omap1_system_dma_init(void)
        dma_base = ioremap(res[0].start, resource_size(&res[0]));
        if (!dma_base) {
                pr_err("%s: Unable to ioremap\n", __func__);
-               return -ENODEV;
+               ret = -ENODEV;
+               goto exit_device_put;
        }
 
        ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
        if (ret) {
                dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n",
                        __func__, pdev->name, pdev->id);
-               goto exit_device_del;
+               goto exit_device_put;
        }
 
        p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL);
@@ -299,7 +300,7 @@ static int __init omap1_system_dma_init(void)
                dev_err(&pdev->dev, "%s: Unable to allocate 'p' for %s\n",
                        __func__, pdev->name);
                ret = -ENOMEM;
-               goto exit_device_put;
+               goto exit_device_del;
        }
 
        d = kzalloc(sizeof(struct omap_dma_dev_attr), GFP_KERNEL);
@@ -380,10 +381,10 @@ exit_release_d:
        kfree(d);
 exit_release_p:
        kfree(p);
-exit_device_put:
-       platform_device_put(pdev);
 exit_device_del:
        platform_device_del(pdev);
+exit_device_put:
+       platform_device_put(pdev);
 
        return ret;
 }
index fe31d933f0edee78106d52d0c82fb95ef70a3a58..334fb8871bc319348f9edb152e5d611fb539aa6f 100644 (file)
@@ -56,9 +56,13 @@ static struct dev_power_domain default_power_domain = {
                USE_PLATFORM_PM_SLEEP_OPS
        },
 };
+#define OMAP1_PWR_DOMAIN (&default_power_domain)
+#else
+#define OMAP1_PWR_DOMAIN NULL
+#endif /* CONFIG_PM_RUNTIME */
 
 static struct pm_clk_notifier_block platform_bus_notifier = {
-       .pwr_domain = &default_power_domain,
+       .pwr_domain = OMAP1_PWR_DOMAIN,
        .con_ids = { "ick", "fck", NULL, },
 };
 
@@ -72,4 +76,4 @@ static int __init omap1_pm_runtime_init(void)
        return 0;
 }
 core_initcall(omap1_pm_runtime_init);
-#endif /* CONFIG_PM_RUNTIME */
+
index d54969be0a54e90535a3578f48a85c31144f0b9b..5de6eac0a72520a8b37e7a9d5cde73c3de40b1c5 100644 (file)
 #include <linux/err.h>
 #include <linux/clk.h>
 #include <linux/io.h>
+#include <linux/gpio.h>
 
 #include <mach/hardware.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 
-#include <mach/gpio.h>
 #include <plat/board.h>
 #include <plat/common.h>
 #include <plat/gpmc.h>
index ae2963a98041711a091a473cc1a66283a851397c..5dac974be6256bd4a22a4fc570142d56bee90efa 100644 (file)
@@ -622,19 +622,19 @@ static struct omap_device_pad serial3_pads[] __initdata = {
                         OMAP_MUX_MODE0),
 };
 
-static struct omap_board_data serial1_data = {
+static struct omap_board_data serial1_data __initdata = {
        .id             = 0,
        .pads           = serial1_pads,
        .pads_cnt       = ARRAY_SIZE(serial1_pads),
 };
 
-static struct omap_board_data serial2_data = {
+static struct omap_board_data serial2_data __initdata = {
        .id             = 1,
        .pads           = serial2_pads,
        .pads_cnt       = ARRAY_SIZE(serial2_pads),
 };
 
-static struct omap_board_data serial3_data = {
+static struct omap_board_data serial3_data __initdata = {
        .id             = 2,
        .pads           = serial3_pads,
        .pads_cnt       = ARRAY_SIZE(serial3_pads),
index 73fa90bb6953dcffb47119328a0aa0f059ae6164..63de2d396e2dddf84eaec2b6035aad64aba49385 100644 (file)
@@ -258,7 +258,7 @@ static struct gpio sdp4430_eth_gpios[] __initdata = {
        { ETH_KS8851_IRQ,       GPIOF_IN,               "eth_irq"       },
 };
 
-static int omap_ethernet_init(void)
+static int __init omap_ethernet_init(void)
 {
        int status;
 
@@ -322,6 +322,7 @@ static struct omap2_hsmmc_info mmc[] = {
                .gpio_wp        = -EINVAL,
                .nonremovable   = true,
                .ocr_mask       = MMC_VDD_29_30,
+               .no_off_init    = true,
        },
        {
                .mmc            = 1,
@@ -681,19 +682,19 @@ static struct omap_device_pad serial4_pads[] __initdata = {
                         OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
 };
 
-static struct omap_board_data serial2_data = {
+static struct omap_board_data serial2_data __initdata = {
        .id             = 1,
        .pads           = serial2_pads,
        .pads_cnt       = ARRAY_SIZE(serial2_pads),
 };
 
-static struct omap_board_data serial3_data = {
+static struct omap_board_data serial3_data __initdata = {
        .id             = 2,
        .pads           = serial3_pads,
        .pads_cnt       = ARRAY_SIZE(serial3_pads),
 };
 
-static struct omap_board_data serial4_data = {
+static struct omap_board_data serial4_data __initdata = {
        .id             = 3,
        .pads           = serial4_pads,
        .pads_cnt       = ARRAY_SIZE(serial4_pads),
@@ -729,7 +730,7 @@ static void __init omap_4430sdp_init(void)
 
        if (omap_rev() == OMAP4430_REV_ES1_0)
                package = OMAP_PACKAGE_CBL;
-       omap4_mux_init(board_mux, package);
+       omap4_mux_init(board_mux, NULL, package);
 
        omap_board_config = sdp4430_config;
        omap_board_config_size = ARRAY_SIZE(sdp4430_config);
index f3beb8eeef77a2a2cc1c2d229f0d17420041460a..b124bdfb4239ebfcf42c8ca5ab011322cdec3182 100644 (file)
 #include <linux/err.h>
 #include <linux/clk.h>
 #include <linux/smc91x.h>
+#include <linux/gpio.h>
 
 #include <mach/hardware.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/flash.h>
 
-#include <mach/gpio.h>
 #include <plat/led.h>
 #include <plat/usb.h>
 #include <plat/board.h>
index c63115bc15368d7e953b3b784562f4a22c612ad2..77456dec93ea9640c60a7b48f7dcf3db4b68e568 100644 (file)
@@ -63,8 +63,6 @@
 #define SB_T35_SMSC911X_CS     4
 #define SB_T35_SMSC911X_GPIO   65
 
-#define NAND_BLOCK_SIZE                SZ_128K
-
 #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
 #include <linux/smsc911x.h>
 #include <plat/gpmc-smsc911x.h>
index 08f08e812492920e24cb79d8f04eec5d3cc88659..c3a9fd35034a3b40ad33df50e2573c7f5a000cbb 100644 (file)
@@ -48,6 +48,7 @@
 
 #include "mux.h"
 #include "control.h"
+#include "common-board-devices.h"
 
 #if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
 static struct gpio_led cm_t3517_leds[] = {
@@ -177,7 +178,7 @@ static struct usbhs_omap_board_data cm_t3517_ehci_pdata __initdata = {
        .reset_gpio_port[2]  = -EINVAL,
 };
 
-static int cm_t3517_init_usbh(void)
+static int __init cm_t3517_init_usbh(void)
 {
        int err;
 
@@ -203,8 +204,6 @@ static inline int cm_t3517_init_usbh(void)
 #endif
 
 #if defined(CONFIG_MTD_NAND_OMAP2) || defined(CONFIG_MTD_NAND_OMAP2_MODULE)
-#define NAND_BLOCK_SIZE                SZ_128K
-
 static struct mtd_partition cm_t3517_nand_partitions[] = {
        {
                .name           = "xloader",
index cf520d7dd614a999879ebdb64ff1bda2c6d2ba44..34956ec832960f1e215241f196ef2d89b3006d91 100644 (file)
@@ -61,8 +61,6 @@
 #include "timer-gp.h"
 #include "common-board-devices.h"
 
-#define NAND_BLOCK_SIZE                SZ_128K
-
 #define OMAP_DM9000_GPIO_IRQ   25
 #define OMAP3_DEVKIT_TS_GPIO   27
 
index be71426359f2ecda9644f41a7e6cd6e64ab6e58b..7f21d24bd437732724a45af9302c50a514f56482 100644 (file)
@@ -54,8 +54,6 @@
 #include "pm.h"
 #include "common-board-devices.h"
 
-#define NAND_BLOCK_SIZE                SZ_128K
-
 /*
  * OMAP3 Beagle revision
  * Run time detection of Beagle revision is done by reading GPIO.
@@ -106,6 +104,9 @@ static void __init omap3_beagle_init_rev(void)
        beagle_rev = gpio_get_value(171) | (gpio_get_value(172) << 1)
                        | (gpio_get_value(173) << 2);
 
+       gpio_free_array(omap3_beagle_rev_gpios,
+                       ARRAY_SIZE(omap3_beagle_rev_gpios));
+
        switch (beagle_rev) {
        case 7:
                printk(KERN_INFO "OMAP3 Beagle Rev: Ax/Bx\n");
@@ -579,6 +580,9 @@ static void __init omap3_beagle_init(void)
        omap_nand_flash_init(NAND_BUSWIDTH_16, omap3beagle_nand_partitions,
                             ARRAY_SIZE(omap3beagle_nand_partitions));
 
+       /* Ensure msecure is mux'd to be able to set the RTC. */
+       omap_mux_init_signal("sys_drm_msecure", OMAP_PIN_OFF_OUTPUT_HIGH);
+
        /* Ensure SDRC pins are mux'd for self-refresh */
        omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
        omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
index 1d10736c6d3c1d358cbef88468294cb7d4f1834b..23f71d40883ea1fafbd2fd331d33fd9c581731dd 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/leds.h>
 #include <linux/input.h>
 #include <linux/input/matrix_keypad.h>
+#include <linux/gpio.h>
 #include <linux/gpio_keys.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/card.h>
@@ -41,7 +42,6 @@
 
 #include <plat/board.h>
 #include <plat/common.h>
-#include <mach/gpio.h>
 #include <mach/hardware.h>
 #include <plat/mcspi.h>
 #include <plat/usb.h>
@@ -57,8 +57,6 @@
 #define PANDORA_WIFI_NRESET_GPIO       23
 #define OMAP3_PANDORA_TS_GPIO          94
 
-#define NAND_BLOCK_SIZE                        SZ_128K
-
 static struct mtd_partition omap3pandora_nand_partitions[] = {
        {
                .name           = "xloader",
@@ -86,7 +84,8 @@ static struct mtd_partition omap3pandora_nand_partitions[] = {
 
 static struct omap_nand_platform_data pandora_nand_data = {
        .cs             = 0,
-       .devsize        = 1,    /* '0' for 8-bit, '1' for 16-bit device */
+       .devsize        = NAND_BUSWIDTH_16,
+       .xfer_type      = NAND_OMAP_PREFETCH_DMA,
        .parts          = omap3pandora_nand_partitions,
        .nr_parts       = ARRAY_SIZE(omap3pandora_nand_partitions),
 };
index 82872d7d313b7dd624bf5f39c0a819e61bf4bf9e..5f649faf7377ecb757c49c73e9430d91adf2940f 100644 (file)
@@ -56,8 +56,6 @@
 
 #include <asm/setup.h>
 
-#define NAND_BLOCK_SIZE                SZ_128K
-
 #define OMAP3_AC_GPIO          136
 #define OMAP3_TS_GPIO          162
 #define TB_BL_PWM_TIMER                9
index 90485fced973db9ee265465eac4c474a1adf3848..0cfe2005cb506a32c79d96f9864bb7e24bd35a0f 100644 (file)
@@ -526,19 +526,19 @@ static struct omap_device_pad serial4_pads[] __initdata = {
                         OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
 };
 
-static struct omap_board_data serial2_data = {
+static struct omap_board_data serial2_data __initdata = {
        .id             = 1,
        .pads           = serial2_pads,
        .pads_cnt       = ARRAY_SIZE(serial2_pads),
 };
 
-static struct omap_board_data serial3_data = {
+static struct omap_board_data serial3_data __initdata = {
        .id             = 2,
        .pads           = serial3_pads,
        .pads_cnt       = ARRAY_SIZE(serial3_pads),
 };
 
-static struct omap_board_data serial4_data = {
+static struct omap_board_data serial4_data __initdata = {
        .id             = 3,
        .pads           = serial4_pads,
        .pads_cnt       = ARRAY_SIZE(serial4_pads),
@@ -687,7 +687,7 @@ static void __init omap4_panda_init(void)
 
        if (omap_rev() == OMAP4430_REV_ES1_0)
                package = OMAP_PACKAGE_CBL;
-       omap4_mux_init(board_mux, package);
+       omap4_mux_init(board_mux, NULL, package);
 
        if (wl12xx_set_platform_data(&omap_panda_wlan_data))
                pr_err("error setting wl12xx data\n");
index 1555918e3ffa05c0a79e501e24cf6041b4cca30c..175e1ab2b04d7225a0a13485e12e239e9692030d 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/io.h>
+#include <linux/gpio.h>
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 #include <linux/i2c/twl.h>
@@ -45,7 +46,6 @@
 #include <plat/common.h>
 #include <video/omapdss.h>
 #include <video/omap-panel-generic-dpi.h>
-#include <mach/gpio.h>
 #include <plat/gpmc.h>
 #include <mach/hardware.h>
 #include <plat/nand.h>
@@ -65,8 +65,6 @@
 #define OVERO_GPIO_USBH_CPEN   168
 #define OVERO_GPIO_USBH_NRESET 183
 
-#define NAND_BLOCK_SIZE SZ_128K
-
 #define OVERO_SMSC911X_CS      5
 #define OVERO_SMSC911X_GPIO    176
 #define OVERO_SMSC911X2_CS     4
index f6247e71a194419063064c906f690655bf83fceb..990366726c58f2c2614e43536dc28c5f36b70cbf 100644 (file)
@@ -488,6 +488,7 @@ static struct regulator_init_data rx51_vmmc2 = {
                .name                   = "V28_A",
                .min_uV                 = 2800000,
                .max_uV                 = 3000000,
+               .always_on              = true, /* due VIO leak to AIC34 VDDs */
                .apply_uV               = true,
                .valid_modes_mask       = REGULATOR_MODE_NORMAL
                                        | REGULATOR_MODE_STANDBY,
@@ -582,7 +583,7 @@ static int rx51_twlgpio_setup(struct device *dev, unsigned gpio, unsigned n)
 {
        /* FIXME this gpio setup is just a placeholder for now */
        gpio_request_one(gpio + 6, GPIOF_OUT_INIT_LOW, "backlight_pwm");
-       gpio_request_one(gpio + 7, GPIOF_OUT_INIT_HIGH, "speaker_en");
+       gpio_request_one(gpio + 7, GPIOF_OUT_INIT_LOW, "speaker_en");
 
        return 0;
 }
index c7c6beb1ec24743737c5b9abe99a85909d5e310b..d4683ba5f72182aa47ef94267ff9dc97c446b043 100644 (file)
@@ -26,7 +26,7 @@ static struct gpio zoom_lcd_gpios[] __initdata = {
        { LCD_PANEL_QVGA_GPIO,  GPIOF_OUT_INIT_HIGH, "lcd qvga"  },
 };
 
-static void zoom_lcd_panel_init(void)
+static void __init zoom_lcd_panel_init(void)
 {
        zoom_lcd_gpios[0].gpio = (omap_rev() > OMAP3430_REV_ES3_0) ?
                        LCD_PANEL_RESET_GPIO_PROD :
index e94903b2c65bb0f4d0a1cc1fecb5c8512ff9bdeb..94ccf464677b78eeb831671f54f862325881a0ac 100644 (file)
@@ -85,17 +85,17 @@ void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce,
        struct spi_board_info *spi_bi = &ads7846_spi_board_info;
        int err;
 
-       err = gpio_request(gpio_pendown, "TS PenDown");
-       if (err) {
-               pr_err("Could not obtain gpio for TS PenDown: %d\n", err);
-               return;
-       }
-
-       gpio_direction_input(gpio_pendown);
-       gpio_export(gpio_pendown, 0);
+       if (board_pdata && board_pdata->get_pendown_state) {
+               err = gpio_request_one(gpio_pendown, GPIOF_IN, "TSPenDown");
+               if (err) {
+                       pr_err("Couldn't obtain gpio for TSPenDown: %d\n", err);
+                       return;
+               }
+               gpio_export(gpio_pendown, 0);
 
-       if (gpio_debounce)
-               gpio_set_debounce(gpio_pendown, gpio_debounce);
+               if (gpio_debounce)
+                       gpio_set_debounce(gpio_pendown, gpio_debounce);
+       }
 
        ads7846_config.gpio_pendown = gpio_pendown;
 
index eb80b3b0ef478678d4dd914749d7632780f664cd..679719051df5bdfa70569ead1291e9206604ce6a 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __OMAP_COMMON_BOARD_DEVICES__
 #define __OMAP_COMMON_BOARD_DEVICES__
 
+#define NAND_BLOCK_SIZE        SZ_128K
+
 struct twl4030_platform_data;
 struct mtd_partition;
 
index 7b855856459143a6cfaa8c20ab597d661c5213a5..5b8ca680ed93d2e9f919548cca05cb656f4ee787 100644 (file)
@@ -97,7 +97,7 @@ static int __init omap4_l3_init(void)
 
        WARN(IS_ERR(od), "could not build omap_device for %s\n", oh_name);
 
-       return PTR_ERR(od);
+       return IS_ERR(od) ? PTR_ERR(od) : 0;
 }
 postcore_initcall(omap4_l3_init);
 
index b2f30bed5a2041d5e27a7b3ce24a172eb8d966e1..66868c5d5a29b8207da355ef7163e941296f615c 100644 (file)
@@ -145,6 +145,7 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot,
                                 int power_on, int vdd)
 {
        u32 reg;
+       unsigned long timeout;
 
        if (power_on) {
                reg = omap4_ctrl_pad_readl(control_pbias_offset);
@@ -157,9 +158,15 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot,
                        OMAP4_MMC1_PWRDNZ_MASK |
                        OMAP4_USBC1_ICUSB_PWRDNZ_MASK);
                omap4_ctrl_pad_writel(reg, control_pbias_offset);
-               /* 4 microsec delay for comparator to generate an error*/
-               udelay(4);
-               reg = omap4_ctrl_pad_readl(control_pbias_offset);
+
+               timeout = jiffies + msecs_to_jiffies(5);
+               do {
+                       reg = omap4_ctrl_pad_readl(control_pbias_offset);
+                       if (!(reg & OMAP4_MMC1_PBIASLITE_VMODE_ERROR_MASK))
+                               break;
+                       usleep_range(100, 200);
+               } while (!time_after(jiffies, timeout));
+
                if (reg & OMAP4_MMC1_PBIASLITE_VMODE_ERROR_MASK) {
                        pr_err("Pbias Voltage is not same as LDO\n");
                        /* Caution : On VMODE_ERROR Power Down MMC IO */
@@ -331,6 +338,9 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
        if (c->no_off)
                mmc->slots[0].no_off = 1;
 
+       if (c->no_off_init)
+               mmc->slots[0].no_regulator_off_init = c->no_off_init;
+
        if (c->vcc_aux_disable_is_sleep)
                mmc->slots[0].vcc_aux_disable_is_sleep = 1;
 
index f119348827d46d029c3cc7544ca1293420b7156c..f757e78d4d4f5759f73bdc27acc502c1ef8f497f 100644 (file)
@@ -18,6 +18,7 @@ struct omap2_hsmmc_info {
        bool    nonremovable;   /* Nonremovable e.g. eMMC */
        bool    power_saving;   /* Try to sleep or power off when possible */
        bool    no_off;         /* power_saving and power is not to go off */
+       bool    no_off_init;    /* no power off when not in MMC sleep state */
        bool    vcc_aux_disable_is_sleep; /* Regulator off remapped to sleep */
        int     gpio_cd;        /* or -EINVAL */
        int     gpio_wp;        /* or -EINVAL */
index a4ab1e364313693dcdef6292e3310a8afa9fe6be..c7fb22abc219db3756758275e0aa30c90fcdd5e6 100644 (file)
@@ -83,6 +83,9 @@ void omap_mux_write(struct omap_mux_partition *partition, u16 val,
 void omap_mux_write_array(struct omap_mux_partition *partition,
                                 struct omap_board_mux *board_mux)
 {
+       if (!board_mux)
+               return;
+
        while (board_mux->reg_offset != OMAP_MUX_TERMINATOR) {
                omap_mux_write(partition, board_mux->value,
                               board_mux->reg_offset);
@@ -906,7 +909,7 @@ static struct omap_mux *omap_mux_get_by_gpio(
 u16 omap_mux_get_gpio(int gpio)
 {
        struct omap_mux_partition *partition;
-       struct omap_mux *m;
+       struct omap_mux *m = NULL;
 
        list_for_each_entry(partition, &mux_partitions, node) {
                m = omap_mux_get_by_gpio(partition, gpio);
index 137f321c029f59a240fd885cfd7c8538c59bf9d8..2132308ad1e41fffc14856b48294ffd42e748c8b 100644 (file)
@@ -323,10 +323,12 @@ int omap3_mux_init(struct omap_board_mux *board_mux, int flags);
 
 /**
  * omap4_mux_init() - initialize mux system with board specific set
- * @board_mux:         Board specific mux table
+ * @board_subset:      Board specific mux table
+ * @board_wkup_subset: Board specific mux table for wakeup instance
  * @flags:             OMAP package type used for the board
  */
-int omap4_mux_init(struct omap_board_mux *board_mux, int flags);
+int omap4_mux_init(struct omap_board_mux *board_subset,
+       struct omap_board_mux *board_wkup_subset, int flags);
 
 /**
  * omap_mux_init - private mux init function, do not call
index 9a66445112ae8ca37a7ad8b1c136c04d7cebaa4f..f5a74daab2ff679ecfc7717d1b8dd3b1295d8606 100644 (file)
@@ -1309,7 +1309,8 @@ static struct omap_ball __initdata omap4_wkup_cbl_cbs_ball[] = {
 #define omap4_wkup_cbl_cbs_ball  NULL
 #endif
 
-int __init omap4_mux_init(struct omap_board_mux *board_subset, int flags)
+int __init omap4_mux_init(struct omap_board_mux *board_subset,
+       struct omap_board_mux *board_wkup_subset, int flags)
 {
        struct omap_ball *package_balls_core;
        struct omap_ball *package_balls_wkup = omap4_wkup_cbl_cbs_ball;
@@ -1347,7 +1348,7 @@ int __init omap4_mux_init(struct omap_board_mux *board_subset, int flags)
                            OMAP_MUX_GPIO_IN_MODE3,
                            OMAP4_CTRL_MODULE_PAD_WKUP_MUX_PBASE,
                            OMAP4_CTRL_MODULE_PAD_WKUP_MUX_SIZE,
-                           omap4_wkup_muxmodes, NULL, board_subset,
+                           omap4_wkup_muxmodes, NULL, board_wkup_subset,
                            package_balls_wkup);
 
        return ret;
index e03429453ce7aa8b20eee7fe0b1026a66fec8c5b..293fa6cd50e14192cf075e1b9f9937c307b7c2e4 100644 (file)
@@ -1628,7 +1628,7 @@ int omap_hwmod_for_each(int (*fn)(struct omap_hwmod *oh, void *data),
                        void *data)
 {
        struct omap_hwmod *temp_oh;
-       int ret;
+       int ret = 0;
 
        if (!fn)
                return -EINVAL;
index abc548a0c98dacae4bd6c5fc7d8eb615dde9b4c5..e1c69ffe0f69db181b9dd6245be9e72ffbaf63f0 100644 (file)
@@ -5109,7 +5109,7 @@ static __initdata struct omap_hwmod *omap44xx_hwmods[] = {
        &omap44xx_iva_seq1_hwmod,
 
        /* kbd class */
-/*     &omap44xx_kbd_hwmod, */
+       &omap44xx_kbd_hwmod,
 
        /* mailbox class */
        &omap44xx_mailbox_hwmod,
index f47813edd95143d938c554fd3dc19b71f492237a..58775e3c84762e59b9cbe3c82a5d4022638f15ae 100644 (file)
@@ -56,8 +56,10 @@ int omap4430_phy_init(struct device *dev)
        /* Power down the phy */
        __raw_writel(PHY_PD, ctrl_base + CONTROL_DEV_CONF);
 
-       if (!dev)
+       if (!dev) {
+               iounmap(ctrl_base);
                return 0;
+       }
 
        phyclk = clk_get(dev, "ocp2scp_usb_phy_ick");
        if (IS_ERR(phyclk)) {
index a5a83b358ddd89724de71eace26e11d8289c8e65..e01da45c053756f62ac08956620dc16dd09d3407 100644 (file)
@@ -189,7 +189,7 @@ static struct dentry *pm_dbg_dir;
 
 static int pm_dbg_init_done;
 
-static int __init pm_dbg_init(void);
+static int pm_dbg_init(void);
 
 enum {
        DEBUG_FILE_COUNTERS = 0,
@@ -595,7 +595,7 @@ static int option_set(void *data, u64 val)
 
 DEFINE_SIMPLE_ATTRIBUTE(pm_dbg_option_fops, option_get, option_set, "%llu\n");
 
-static int __init pm_dbg_init(void)
+static int pm_dbg_init(void)
 {
        int i;
        struct dentry *d;
index 7fe74067d85fc7e2aeb1469878f28c8cd73b3456..094279aefe9c6fd5d6a86dd88fbbc44b6d55c07b 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/delay.h>
+#include <linux/gpio.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/apm-emulation.h>
index 0d468e96e83ef399b40f094f8304a1735e84957f..81695353d8f497d52d848a3637ac2b133003e841 100644 (file)
@@ -10,7 +10,6 @@ obj-n                         :=
 obj-                           :=
 
 obj-$(CONFIG_CPU_S3C2410)      += s3c2410.o
-obj-$(CONFIG_CPU_S3C2410)      += irq.o
 obj-$(CONFIG_CPU_S3C2410_DMA)  += dma.o
 obj-$(CONFIG_CPU_S3C2410_DMA)  += dma.o
 obj-$(CONFIG_S3C2410_PM)       += pm.o sleep.o
diff --git a/arch/arm/mach-s3c2410/irq.c b/arch/arm/mach-s3c2410/irq.c
deleted file mode 100644 (file)
index 2854129..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-/* linux/arch/arm/mach-s3c2410/irq.c
- *
- * Copyright (c) 2006 Simtec Electronics
- *     Ben Dooks <ben@simtec.co.uk>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
-*/
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/syscore_ops.h>
-
-#include <plat/cpu.h>
-#include <plat/pm.h>
-
-struct syscore_ops s3c24xx_irq_syscore_ops = {
-       .suspend        = s3c24xx_irq_suspend,
-       .resume         = s3c24xx_irq_resume,
-};
index 22046e2f53c2a17d1a064f7257696551ff325677..153af8b359ec00994994dc3abf9c052ddd39b3cb 100644 (file)
@@ -101,12 +101,14 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
        unsigned long tmp, tmp1;
        void __iomem *reg = NULL;
 
-       if (ch == DMC0)
+       if (ch == DMC0) {
                reg = (S5P_VA_DMC0 + 0x30);
-       else if (ch == DMC1)
+       } else if (ch == DMC1) {
                reg = (S5P_VA_DMC1 + 0x30);
-       else
+       } else {
                printk(KERN_ERR "Cannot find DMC port\n");
+               return;
+       }
 
        /* Find current DRAM frequency */
        tmp = s5pv210_dram_conf[ch].freq;
index c95258c274c11c0c3eea565ef0709531440c3cab..1e2aba23e0d6d56ea6ddcd696937344027493e8b 100644 (file)
@@ -382,10 +382,8 @@ void ag5evm_sdhi1_set_pwr(struct platform_device *pdev, int state)
 }
 
 static struct sh_mobile_sdhi_info sh_sdhi1_platdata = {
-       .dma_slave_tx   = SHDMA_SLAVE_SDHI1_TX,
-       .dma_slave_rx   = SHDMA_SLAVE_SDHI1_RX,
        .tmio_flags     = TMIO_MMC_WRPROTECT_DISABLE,
-       .tmio_caps      = MMC_CAP_NONREMOVABLE,
+       .tmio_caps      = MMC_CAP_NONREMOVABLE | MMC_CAP_SDIO_IRQ,
        .tmio_ocr_mask  = MMC_VDD_32_33 | MMC_VDD_33_34,
        .set_pwr        = ag5evm_sdhi1_set_pwr,
 };
index 08acb6ec81390c5a4eb6204dfda57b5a5e28d862..f6b687f61c28ebe4def01c748e0a576a53a362d9 100644 (file)
@@ -249,6 +249,29 @@ static int slot_cn7_get_cd(struct platform_device *pdev)
 {
        return !gpio_get_value(GPIO_PORT41);
 }
+/* MERAM */
+static struct sh_mobile_meram_info meram_info = {
+       .addr_mode      = SH_MOBILE_MERAM_MODE1,
+};
+
+static struct resource meram_resources[] = {
+       [0] = {
+               .name   = "MERAM",
+               .start  = 0xe8000000,
+               .end    = 0xe81fffff,
+               .flags  = IORESOURCE_MEM,
+       },
+};
+
+static struct platform_device meram_device = {
+       .name           = "sh_mobile_meram",
+       .id             = 0,
+       .num_resources  = ARRAY_SIZE(meram_resources),
+       .resource       = meram_resources,
+       .dev            = {
+               .platform_data = &meram_info,
+       },
+};
 
 /* SH_MMCIF */
 static struct resource sh_mmcif_resources[] = {
@@ -447,13 +470,29 @@ const static struct fb_videomode ap4evb_lcdc_modes[] = {
 #endif
        },
 };
+static struct sh_mobile_meram_cfg lcd_meram_cfg = {
+       .icb[0] = {
+               .marker_icb     = 28,
+               .cache_icb      = 24,
+               .meram_offset   = 0x0,
+               .meram_size     = 0x40,
+       },
+       .icb[1] = {
+               .marker_icb     = 29,
+               .cache_icb      = 25,
+               .meram_offset   = 0x40,
+               .meram_size     = 0x40,
+       },
+};
 
 static struct sh_mobile_lcdc_info lcdc_info = {
+       .meram_dev = &meram_info,
        .ch[0] = {
                .chan = LCDC_CHAN_MAINLCD,
                .bpp = 16,
                .lcd_cfg = ap4evb_lcdc_modes,
                .num_cfg = ARRAY_SIZE(ap4evb_lcdc_modes),
+               .meram_cfg = &lcd_meram_cfg,
        }
 };
 
@@ -724,15 +763,31 @@ static struct platform_device fsi_device = {
 static struct platform_device fsi_ak4643_device = {
        .name           = "sh_fsi2_a_ak4643",
 };
+static struct sh_mobile_meram_cfg hdmi_meram_cfg = {
+       .icb[0] = {
+               .marker_icb     = 30,
+               .cache_icb      = 26,
+               .meram_offset   = 0x80,
+               .meram_size     = 0x100,
+       },
+       .icb[1] = {
+               .marker_icb     = 31,
+               .cache_icb      = 27,
+               .meram_offset   = 0x180,
+               .meram_size     = 0x100,
+       },
+};
 
 static struct sh_mobile_lcdc_info sh_mobile_lcdc1_info = {
        .clock_source = LCDC_CLK_EXTERNAL,
+       .meram_dev = &meram_info,
        .ch[0] = {
                .chan = LCDC_CHAN_MAINLCD,
                .bpp = 16,
                .interface_type = RGB24,
                .clock_divider = 1,
                .flags = LCDC_FLAGS_DWPOL,
+               .meram_cfg = &hdmi_meram_cfg,
        }
 };
 
@@ -961,6 +1016,7 @@ static struct platform_device *ap4evb_devices[] __initdata = {
        &csi2_device,
        &ceu_device,
        &ap4evb_camera,
+       &meram_device,
 };
 
 static void __init hdmi_init_pm_clock(void)
index 448ddbe4333504d517a0ba5c70099c198fb2e9a9..7e1d375843211e52944720a562a5c580dbe20149 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/physmap.h>
+#include <linux/pm_runtime.h>
 #include <linux/smsc911x.h>
 #include <linux/sh_intc.h>
 #include <linux/tca6416_keypad.h>
  * ------+--------------------+--------------------+-------
  * IRQ0  | ICR1A.IRQ0SA=0010  | SDHI2 card detect  | Low
  * IRQ6  | ICR1A.IRQ6SA=0011  | Ether(LAN9220)     | High
- * IRQ7  | ICR1A.IRQ7SA=0010  | LCD Tuch Panel     | Low
+ * IRQ7  | ICR1A.IRQ7SA=0010  | LCD Touch Panel    | Low
  * IRQ8  | ICR2A.IRQ8SA=0010  | MMC/SD card detect | Low
  * IRQ9  | ICR2A.IRQ9SA=0010  | KEY(TCA6408)       | Low
  * IRQ21 | ICR4A.IRQ21SA=0011 | Sensor(ADXL345)    | High
  * USB1 can become Host by r8a66597, and become Function by renesas_usbhs.
  * But don't select both drivers in same time.
  * These uses same IRQ number for request_irq(), and aren't supporting
- * IRQF_SHARD / IORESOURCE_IRQ_SHAREABLE.
+ * IRQF_SHARED / IORESOURCE_IRQ_SHAREABLE.
  *
  * Actually these are old/new version of USB driver.
- * This mean its register will be broken if it supports SHARD IRQ,
+ * This mean its register will be broken if it supports shared IRQ,
  */
 
 /*
@@ -314,6 +315,30 @@ static struct platform_device smc911x_device = {
        },
 };
 
+/* MERAM */
+static struct sh_mobile_meram_info mackerel_meram_info = {
+       .addr_mode      = SH_MOBILE_MERAM_MODE1,
+};
+
+static struct resource meram_resources[] = {
+       [0] = {
+               .name   = "MERAM",
+               .start  = 0xe8000000,
+               .end    = 0xe81fffff,
+               .flags  = IORESOURCE_MEM,
+       },
+};
+
+static struct platform_device meram_device = {
+       .name           = "sh_mobile_meram",
+       .id             = 0,
+       .num_resources  = ARRAY_SIZE(meram_resources),
+       .resource       = meram_resources,
+       .dev            = {
+               .platform_data = &mackerel_meram_info,
+       },
+};
+
 /* LCDC */
 static struct fb_videomode mackerel_lcdc_modes[] = {
        {
@@ -342,7 +367,23 @@ static int mackerel_get_brightness(void *board_data)
        return gpio_get_value(GPIO_PORT31);
 }
 
+static struct sh_mobile_meram_cfg lcd_meram_cfg = {
+       .icb[0] = {
+               .marker_icb     = 28,
+               .cache_icb      = 24,
+               .meram_offset   = 0x0,
+               .meram_size     = 0x40,
+       },
+       .icb[1] = {
+               .marker_icb     = 29,
+               .cache_icb      = 25,
+               .meram_offset   = 0x40,
+               .meram_size     = 0x40,
+       },
+};
+
 static struct sh_mobile_lcdc_info lcdc_info = {
+       .meram_dev = &mackerel_meram_info,
        .clock_source = LCDC_CLK_BUS,
        .ch[0] = {
                .chan = LCDC_CHAN_MAINLCD,
@@ -362,6 +403,7 @@ static struct sh_mobile_lcdc_info lcdc_info = {
                        .name = "sh_mobile_lcdc_bl",
                        .max_brightness = 1,
                },
+               .meram_cfg = &lcd_meram_cfg,
        }
 };
 
@@ -388,8 +430,23 @@ static struct platform_device lcdc_device = {
        },
 };
 
+static struct sh_mobile_meram_cfg hdmi_meram_cfg = {
+       .icb[0] = {
+               .marker_icb     = 30,
+               .cache_icb      = 26,
+               .meram_offset   = 0x80,
+               .meram_size     = 0x100,
+       },
+       .icb[1] = {
+               .marker_icb     = 31,
+               .cache_icb      = 27,
+               .meram_offset   = 0x180,
+               .meram_size     = 0x100,
+       },
+};
 /* HDMI */
 static struct sh_mobile_lcdc_info hdmi_lcdc_info = {
+       .meram_dev = &mackerel_meram_info,
        .clock_source = LCDC_CLK_EXTERNAL,
        .ch[0] = {
                .chan = LCDC_CHAN_MAINLCD,
@@ -397,6 +454,7 @@ static struct sh_mobile_lcdc_info hdmi_lcdc_info = {
                .interface_type = RGB24,
                .clock_divider = 1,
                .flags = LCDC_FLAGS_DWPOL,
+               .meram_cfg = &hdmi_meram_cfg,
        }
 };
 
@@ -504,7 +562,121 @@ out:
                clk_put(hdmi_ick);
 }
 
-/* USB1 (Host) */
+/* USBHS0 is connected to CN22 which takes a USB Mini-B plug
+ *
+ * The sh7372 SoC has IRQ7 set aside for USBHS0 hotplug,
+ * but on this particular board IRQ7 is already used by
+ * the touch screen. This leaves us with software polling.
+ */
+#define USBHS0_POLL_INTERVAL (HZ * 5)
+
+struct usbhs_private {
+       unsigned int usbphyaddr;
+       unsigned int usbcrcaddr;
+       struct renesas_usbhs_platform_info info;
+       struct delayed_work work;
+       struct platform_device *pdev;
+};
+
+#define usbhs_get_priv(pdev)                           \
+       container_of(renesas_usbhs_get_info(pdev),      \
+                    struct usbhs_private, info)
+
+#define usbhs_is_connected(priv)                       \
+       (!((1 << 7) & __raw_readw(priv->usbcrcaddr)))
+
+static int usbhs_get_vbus(struct platform_device *pdev)
+{
+       return usbhs_is_connected(usbhs_get_priv(pdev));
+}
+
+static void usbhs_phy_reset(struct platform_device *pdev)
+{
+       struct usbhs_private *priv = usbhs_get_priv(pdev);
+
+       /* init phy */
+       __raw_writew(0x8a0a, priv->usbcrcaddr);
+}
+
+static int usbhs0_get_id(struct platform_device *pdev)
+{
+       return USBHS_GADGET;
+}
+
+static void usbhs0_work_function(struct work_struct *work)
+{
+       struct usbhs_private *priv = container_of(work, struct usbhs_private,
+                                                 work.work);
+
+       renesas_usbhs_call_notify_hotplug(priv->pdev);
+       schedule_delayed_work(&priv->work, USBHS0_POLL_INTERVAL);
+}
+
+static int usbhs0_hardware_init(struct platform_device *pdev)
+{
+       struct usbhs_private *priv = usbhs_get_priv(pdev);
+
+       priv->pdev = pdev;
+       INIT_DELAYED_WORK(&priv->work, usbhs0_work_function);
+       schedule_delayed_work(&priv->work, USBHS0_POLL_INTERVAL);
+       return 0;
+}
+
+static void usbhs0_hardware_exit(struct platform_device *pdev)
+{
+       struct usbhs_private *priv = usbhs_get_priv(pdev);
+
+       cancel_delayed_work_sync(&priv->work);
+}
+
+static struct usbhs_private usbhs0_private = {
+       .usbcrcaddr     = 0xe605810c,           /* USBCR2 */
+       .info = {
+               .platform_callback = {
+                       .hardware_init  = usbhs0_hardware_init,
+                       .hardware_exit  = usbhs0_hardware_exit,
+                       .phy_reset      = usbhs_phy_reset,
+                       .get_id         = usbhs0_get_id,
+                       .get_vbus       = usbhs_get_vbus,
+               },
+               .driver_param = {
+                       .buswait_bwait  = 4,
+               },
+       },
+};
+
+static struct resource usbhs0_resources[] = {
+       [0] = {
+               .name   = "USBHS0",
+               .start  = 0xe6890000,
+               .end    = 0xe68900e6 - 1,
+               .flags  = IORESOURCE_MEM,
+       },
+       [1] = {
+               .start  = evt2irq(0x1ca0) /* USB0_USB0I0 */,
+               .flags  = IORESOURCE_IRQ,
+       },
+};
+
+static struct platform_device usbhs0_device = {
+       .name   = "renesas_usbhs",
+       .id     = 0,
+       .dev = {
+               .platform_data          = &usbhs0_private.info,
+       },
+       .num_resources  = ARRAY_SIZE(usbhs0_resources),
+       .resource       = usbhs0_resources,
+};
+
+/* USBHS1 is connected to CN31 which takes a USB Mini-AB plug
+ *
+ * Use J30 to select between Host and Function. This setting
+ * can however not be detected by software. Hotplug of USBHS1
+ * is provided via IRQ8.
+ */
+#define IRQ8 evt2irq(0x0300)
+
+/* USBHS1 USB Host support via r8a66597_hcd */
 static void usb1_host_port_power(int port, int power)
 {
        if (!power) /* only power-on is supported for now */
@@ -521,9 +693,9 @@ static struct r8a66597_platdata usb1_host_data = {
 
 static struct resource usb1_host_resources[] = {
        [0] = {
-               .name   = "USBHS",
-               .start  = 0xE68B0000,
-               .end    = 0xE68B00E6 - 1,
+               .name   = "USBHS1",
+               .start  = 0xe68b0000,
+               .end    = 0xe68b00e6 - 1,
                .flags  = IORESOURCE_MEM,
        },
        [1] = {
@@ -544,37 +716,14 @@ static struct platform_device usb1_host_device = {
        .resource       = usb1_host_resources,
 };
 
-/* USB1 (Function) */
+/* USBHS1 USB Function support via renesas_usbhs */
+
 #define USB_PHY_MODE           (1 << 4)
 #define USB_PHY_INT_EN         ((1 << 3) | (1 << 2))
 #define USB_PHY_ON             (1 << 1)
 #define USB_PHY_OFF            (1 << 0)
 #define USB_PHY_INT_CLR                (USB_PHY_ON | USB_PHY_OFF)
 
-struct usbhs_private {
-       unsigned int irq;
-       unsigned int usbphyaddr;
-       unsigned int usbcrcaddr;
-       struct renesas_usbhs_platform_info info;
-};
-
-#define usbhs_get_priv(pdev)                           \
-       container_of(renesas_usbhs_get_info(pdev),      \
-                    struct usbhs_private, info)
-
-#define usbhs_is_connected(priv)                       \
-       (!((1 << 7) & __raw_readw(priv->usbcrcaddr)))
-
-static int usbhs1_get_id(struct platform_device *pdev)
-{
-       return USBHS_GADGET;
-}
-
-static int usbhs1_get_vbus(struct platform_device *pdev)
-{
-       return usbhs_is_connected(usbhs_get_priv(pdev));
-}
-
 static irqreturn_t usbhs1_interrupt(int irq, void *data)
 {
        struct platform_device *pdev = data;
@@ -596,12 +745,10 @@ static int usbhs1_hardware_init(struct platform_device *pdev)
        struct usbhs_private *priv = usbhs_get_priv(pdev);
        int ret;
 
-       irq_set_irq_type(priv->irq, IRQ_TYPE_LEVEL_HIGH);
-
        /* clear interrupt status */
        __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->usbphyaddr);
 
-       ret = request_irq(priv->irq, usbhs1_interrupt, 0,
+       ret = request_irq(IRQ8, usbhs1_interrupt, IRQF_TRIGGER_HIGH,
                          dev_name(&pdev->dev), pdev);
        if (ret) {
                dev_err(&pdev->dev, "request_irq err\n");
@@ -621,15 +768,12 @@ static void usbhs1_hardware_exit(struct platform_device *pdev)
        /* clear interrupt status */
        __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->usbphyaddr);
 
-       free_irq(priv->irq, pdev);
+       free_irq(IRQ8, pdev);
 }
 
-static void usbhs1_phy_reset(struct platform_device *pdev)
+static int usbhs1_get_id(struct platform_device *pdev)
 {
-       struct usbhs_private *priv = usbhs_get_priv(pdev);
-
-       /* init phy */
-       __raw_writew(0x8a0a, priv->usbcrcaddr);
+       return USBHS_GADGET;
 }
 
 static u32 usbhs1_pipe_cfg[] = {
@@ -652,16 +796,15 @@ static u32 usbhs1_pipe_cfg[] = {
 };
 
 static struct usbhs_private usbhs1_private = {
-       .irq            = evt2irq(0x0300),      /* IRQ8 */
-       .usbphyaddr     = 0xE60581E2,           /* USBPHY1INTAP */
-       .usbcrcaddr     = 0xE6058130,           /* USBCR4 */
+       .usbphyaddr     = 0xe60581e2,           /* USBPHY1INTAP */
+       .usbcrcaddr     = 0xe6058130,           /* USBCR4 */
        .info = {
                .platform_callback = {
                        .hardware_init  = usbhs1_hardware_init,
                        .hardware_exit  = usbhs1_hardware_exit,
-                       .phy_reset      = usbhs1_phy_reset,
                        .get_id         = usbhs1_get_id,
-                       .get_vbus       = usbhs1_get_vbus,
+                       .phy_reset      = usbhs_phy_reset,
+                       .get_vbus       = usbhs_get_vbus,
                },
                .driver_param = {
                        .buswait_bwait  = 4,
@@ -673,9 +816,9 @@ static struct usbhs_private usbhs1_private = {
 
 static struct resource usbhs1_resources[] = {
        [0] = {
-               .name   = "USBHS",
-               .start  = 0xE68B0000,
-               .end    = 0xE68B00E6 - 1,
+               .name   = "USBHS1",
+               .start  = 0xe68b0000,
+               .end    = 0xe68b00e6 - 1,
                .flags  = IORESOURCE_MEM,
        },
        [1] = {
@@ -694,7 +837,6 @@ static struct platform_device usbhs1_device = {
        .resource       = usbhs1_resources,
 };
 
-
 /* LED */
 static struct gpio_led mackerel_leds[] = {
        {
@@ -856,6 +998,17 @@ static int slot_cn7_get_cd(struct platform_device *pdev)
 }
 
 /* SDHI0 */
+static irqreturn_t mackerel_sdhi0_gpio_cd(int irq, void *arg)
+{
+       struct device *dev = arg;
+       struct sh_mobile_sdhi_info *info = dev->platform_data;
+       struct tmio_mmc_data *pdata = info->pdata;
+
+       tmio_mmc_cd_wakeup(pdata);
+
+       return IRQ_HANDLED;
+}
+
 static struct sh_mobile_sdhi_info sdhi0_info = {
        .dma_slave_tx   = SHDMA_SLAVE_SDHI0_TX,
        .dma_slave_rx   = SHDMA_SLAVE_SDHI0_RX,
@@ -1134,6 +1287,7 @@ static struct platform_device *mackerel_devices[] __initdata = {
        &nor_flash_device,
        &smc911x_device,
        &lcdc_device,
+       &usbhs0_device,
        &usb1_host_device,
        &usbhs1_device,
        &leds_device,
@@ -1150,6 +1304,7 @@ static struct platform_device *mackerel_devices[] __initdata = {
        &mackerel_camera,
        &hdmi_lcdc_device,
        &hdmi_device,
+       &meram_device,
 };
 
 /* Keypad Initialization */
@@ -1231,6 +1386,7 @@ static void __init mackerel_map_io(void)
 
 #define GPIO_PORT9CR   0xE6051009
 #define GPIO_PORT10CR  0xE605100A
+#define GPIO_PORT167CR 0xE60520A7
 #define GPIO_PORT168CR 0xE60520A8
 #define SRCR4          0xe61580bc
 #define USCCR1         0xE6058144
@@ -1238,6 +1394,7 @@ static void __init mackerel_init(void)
 {
        u32 srcr4;
        struct clk *clk;
+       int ret;
 
        sh7372_pinmux_init();
 
@@ -1283,17 +1440,17 @@ static void __init mackerel_init(void)
        gpio_request(GPIO_PORT151, NULL); /* LCDDON */
        gpio_direction_output(GPIO_PORT151, 1);
 
-       /* USB enable */
-       gpio_request(GPIO_FN_VBUS0_1,    NULL);
-       gpio_request(GPIO_FN_IDIN_1_18,  NULL);
-       gpio_request(GPIO_FN_PWEN_1_115, NULL);
-       gpio_request(GPIO_FN_OVCN_1_114, NULL);
-       gpio_request(GPIO_FN_EXTLP_1,    NULL);
-       gpio_request(GPIO_FN_OVCN2_1,    NULL);
-       gpio_pull_down(GPIO_PORT168CR);
+       /* USBHS0 */
+       gpio_request(GPIO_FN_VBUS0_0, NULL);
+       gpio_pull_down(GPIO_PORT168CR); /* VBUS0_0 pull down */
 
-       /* setup USB phy */
-       __raw_writew(0x8a0a, 0xE6058130);       /* USBCR4 */
+       /* USBHS1 */
+       gpio_request(GPIO_FN_VBUS0_1, NULL);
+       gpio_pull_down(GPIO_PORT167CR); /* VBUS0_1 pull down */
+       gpio_request(GPIO_FN_IDIN_1_113, NULL);
+
+       /* USB phy tweak to make the r8a66597_hcd host driver work */
+       __raw_writew(0x8a0a, 0xe6058130);       /* USBCR4 */
 
        /* enable FSI2 port A (ak4643) */
        gpio_request(GPIO_FN_FSIAIBT,   NULL);
@@ -1343,6 +1500,13 @@ static void __init mackerel_init(void)
        gpio_request(GPIO_FN_SDHID0_1, NULL);
        gpio_request(GPIO_FN_SDHID0_0, NULL);
 
+       ret = request_irq(evt2irq(0x3340), mackerel_sdhi0_gpio_cd,
+                         IRQF_TRIGGER_FALLING, "sdhi0 cd", &sdhi0_device.dev);
+       if (!ret)
+               sdhi0_info.tmio_flags |= TMIO_MMC_HAS_COLD_CD;
+       else
+               pr_err("Cannot get IRQ #%d: %d\n", evt2irq(0x3340), ret);
+
 #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
        /* enable SDHI1 */
        gpio_request(GPIO_FN_SDHICMD1, NULL);
index d17eb66f4ac20980fb7ec441edf15e80b9f142b3..c0800d83971e62c591993cce8ad77c87a5d3b6dd 100644 (file)
@@ -509,6 +509,7 @@ enum { MSTP001,
        MSTP118, MSTP117, MSTP116, MSTP113,
        MSTP106, MSTP101, MSTP100,
        MSTP223,
+       MSTP218, MSTP217, MSTP216,
        MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
        MSTP329, MSTP328, MSTP323, MSTP322, MSTP314, MSTP313, MSTP312,
        MSTP423, MSTP415, MSTP413, MSTP411, MSTP410, MSTP406, MSTP403,
@@ -534,6 +535,9 @@ static struct clk mstp_clks[MSTP_NR] = {
        [MSTP101] = MSTP(&div4_clks[DIV4_M1], SMSTPCR1, 1, 0), /* VPU */
        [MSTP100] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */
        [MSTP223] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR2, 23, 0), /* SPU2 */
+       [MSTP218] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* DMAC1 */
+       [MSTP217] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 17, 0), /* DMAC2 */
+       [MSTP216] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 16, 0), /* DMAC3 */
        [MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */
        [MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */
        [MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */
@@ -626,6 +630,9 @@ static struct clk_lookup lookups[] = {
        CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
        CLKDEV_DEV_ID("uio_pdrv_genirq.6", &mstp_clks[MSTP223]), /* SPU2DSP0 */
        CLKDEV_DEV_ID("uio_pdrv_genirq.7", &mstp_clks[MSTP223]), /* SPU2DSP1 */
+       CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), /* DMAC1 */
+       CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[MSTP217]), /* DMAC2 */
+       CLKDEV_DEV_ID("sh-dma-engine.2", &mstp_clks[MSTP216]), /* DMAC3 */
        CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
        CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP206]), /* SCIFB */
        CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */
index 5d0e1503ece66fb9385f3ffc80abb88e60d2ed27..a911a60e7719a1e0fa040f5798b0e7174b060fa1 100644 (file)
@@ -250,6 +250,11 @@ static irqreturn_t sh73a0_intcs_demux(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+static int sh73a0_set_wake(struct irq_data *data, unsigned int on)
+{
+       return 0; /* always allow wakeup */
+}
+
 void __init sh73a0_init_irq(void)
 {
        void __iomem *gic_dist_base = __io(0xf0001000);
@@ -257,6 +262,7 @@ void __init sh73a0_init_irq(void)
        void __iomem *intevtsa = ioremap_nocache(0xffd20100, PAGE_SIZE);
 
        gic_init(0, 29, gic_dist_base, gic_cpu_base);
+       gic_arch_extn.irq_set_wake = sh73a0_set_wake;
 
        register_intc_controller(&intcs_desc);
 
index 2c10190dbb554eb7a9063de3f31408dfab1d5e93..e546017f15dea1202df2be20fa6303b69d70f1f4 100644 (file)
@@ -38,7 +38,7 @@ static struct plat_sci_port scif0_platform_data = {
        .flags          = UPF_BOOT_AUTOCONF,
        .scscr          = SCSCR_RE | SCSCR_TE,
        .scbrr_algo_id  = SCBRR_ALGO_4,
-       .type           = PORT_SCIF,
+       .type           = PORT_SCIFA,
        .irqs           = { evt2irq(0xc00), evt2irq(0xc00),
                            evt2irq(0xc00), evt2irq(0xc00) },
 };
@@ -57,7 +57,7 @@ static struct plat_sci_port scif1_platform_data = {
        .flags          = UPF_BOOT_AUTOCONF,
        .scscr          = SCSCR_RE | SCSCR_TE,
        .scbrr_algo_id  = SCBRR_ALGO_4,
-       .type           = PORT_SCIF,
+       .type           = PORT_SCIFA,
        .irqs           = { evt2irq(0xc20), evt2irq(0xc20),
                            evt2irq(0xc20), evt2irq(0xc20) },
 };
@@ -76,7 +76,7 @@ static struct plat_sci_port scif2_platform_data = {
        .flags          = UPF_BOOT_AUTOCONF,
        .scscr          = SCSCR_RE | SCSCR_TE,
        .scbrr_algo_id  = SCBRR_ALGO_4,
-       .type           = PORT_SCIF,
+       .type           = PORT_SCIFA,
        .irqs           = { evt2irq(0xc40), evt2irq(0xc40),
                            evt2irq(0xc40), evt2irq(0xc40) },
 };
@@ -95,7 +95,7 @@ static struct plat_sci_port scif3_platform_data = {
        .flags          = UPF_BOOT_AUTOCONF,
        .scscr          = SCSCR_RE | SCSCR_TE,
        .scbrr_algo_id  = SCBRR_ALGO_4,
-       .type           = PORT_SCIF,
+       .type           = PORT_SCIFA,
        .irqs           = { evt2irq(0xc60), evt2irq(0xc60),
                            evt2irq(0xc60), evt2irq(0xc60) },
 };
@@ -114,7 +114,7 @@ static struct plat_sci_port scif4_platform_data = {
        .flags          = UPF_BOOT_AUTOCONF,
        .scscr          = SCSCR_RE | SCSCR_TE,
        .scbrr_algo_id  = SCBRR_ALGO_4,
-       .type           = PORT_SCIF,
+       .type           = PORT_SCIFA,
        .irqs           = { evt2irq(0xd20), evt2irq(0xd20),
                            evt2irq(0xd20), evt2irq(0xd20) },
 };
@@ -133,7 +133,7 @@ static struct plat_sci_port scif5_platform_data = {
        .flags          = UPF_BOOT_AUTOCONF,
        .scscr          = SCSCR_RE | SCSCR_TE,
        .scbrr_algo_id  = SCBRR_ALGO_4,
-       .type           = PORT_SCIF,
+       .type           = PORT_SCIFA,
        .irqs           = { evt2irq(0xd40), evt2irq(0xd40),
                            evt2irq(0xd40), evt2irq(0xd40) },
 };
@@ -152,7 +152,7 @@ static struct plat_sci_port scif6_platform_data = {
        .flags          = UPF_BOOT_AUTOCONF,
        .scscr          = SCSCR_RE | SCSCR_TE,
        .scbrr_algo_id  = SCBRR_ALGO_4,
-       .type           = PORT_SCIF,
+       .type           = PORT_SCIFB,
        .irqs           = { evt2irq(0xd60), evt2irq(0xd60),
                            evt2irq(0xd60), evt2irq(0xd60) },
 };
index c84442cabe077b3fad50b110641cbbd95f96afd8..5ad8b2f94f8dbbbaa374c2c6ab307c307591ce72 100644 (file)
@@ -24,6 +24,8 @@
 
 #include <mach/irqs.h>
 
+#include "board-harmony.h"
+
 #define PMC_CTRL               0x0
 #define PMC_CTRL_INTR_LOW      (1 << 17)
 
@@ -98,7 +100,7 @@ static struct tps6586x_platform_data tps_platform = {
        .irq_base       = TEGRA_NR_IRQS,
        .num_subdevs    = ARRAY_SIZE(tps_devs),
        .subdevs        = tps_devs,
-       .gpio_base      = TEGRA_NR_GPIOS,
+       .gpio_base      = HARMONY_GPIO_TPS6586X(0),
 };
 
 static struct i2c_board_info __initdata harmony_regulators[] = {
index 1e57b071f52de9c6d37178bca02f0d6a563db1e8..d85142edaf6bc0eebc7d66b385e457d91fc5a100 100644 (file)
@@ -17,7 +17,8 @@
 #ifndef _MACH_TEGRA_BOARD_HARMONY_H
 #define _MACH_TEGRA_BOARD_HARMONY_H
 
-#define HARMONY_GPIO_WM8903(_x_)       (TEGRA_NR_GPIOS + (_x_))
+#define HARMONY_GPIO_TPS6586X(_x_)     (TEGRA_NR_GPIOS + (_x_))
+#define HARMONY_GPIO_WM8903(_x_)       (HARMONY_GPIO_TPS6586X(4) + (_x_))
 
 #define TEGRA_GPIO_SD2_CD              TEGRA_GPIO_PI5
 #define TEGRA_GPIO_SD2_WP              TEGRA_GPIO_PH1
index c34f3ea3017c4db1b4da624b12c1f800e24a91b0..4f50ca8f901e2bebd5de51defd425ba9e83f9ff4 100644 (file)
@@ -31,7 +31,7 @@ struct clk {
        bool reset;
        __u16 clk_val;
        __s8 usecount;
-       __u32 res_reg;
+       void __iomem * res_reg;
        __u16 res_mask;
 
        bool hw_ctrld;
index 8b85df4c8d8fcce5530535ffb2d91e15f25b636d..035fdc9dbdb03ebb50e88873a67232707d4fbc50 100644 (file)
  * the defines are used for setting up the I/O memory mapping.
  */
 
+#ifdef __ASSEMBLER__
+#define IOMEM(a) (a)
+#else
+#define IOMEM(a) (void __iomem *) a
+#endif
+
 /* NAND Flash CS0 */
 #define U300_NAND_CS0_PHYS_BASE                0x80000000
 
 #define U300_SEMI_CONFIG_BASE          0x30000000
 #endif
 
-/*
- * All the following peripherals are specified at their PHYSICAL address,
- * so if you need to access them (in the kernel), you MUST use the macros
- * defined in <asm/io.h> to map to the IO_ADDRESS_AHB() IO_ADDRESS_FAST()
- * etc.
- */
-
 /*
  * AHB peripherals
  */
 
 /* Vectored Interrupt Controller 0, servicing 32 interrupts */
 #define U300_INTCON0_BASE              (U300_AHB_PER_PHYS_BASE+0x1000)
-#define U300_INTCON0_VBASE             (U300_AHB_PER_VIRT_BASE+0x1000)
+#define U300_INTCON0_VBASE             IOMEM(U300_AHB_PER_VIRT_BASE+0x1000)
 
 /* Vectored Interrupt Controller 1, servicing 32 interrupts */
 #define U300_INTCON1_BASE              (U300_AHB_PER_PHYS_BASE+0x2000)
-#define U300_INTCON1_VBASE             (U300_AHB_PER_VIRT_BASE+0x2000)
+#define U300_INTCON1_VBASE             IOMEM(U300_AHB_PER_VIRT_BASE+0x2000)
 
 /* Memory Stick Pro (MSPRO) controller */
 #define U300_MSPRO_BASE                        (U300_AHB_PER_PHYS_BASE+0x3000)
 
 /* SYSCON */
 #define U300_SYSCON_BASE               (U300_SLOW_PER_PHYS_BASE+0x1000)
-#define U300_SYSCON_VBASE              (U300_SLOW_PER_VIRT_BASE+0x1000)
+#define U300_SYSCON_VBASE              IOMEM(U300_SLOW_PER_VIRT_BASE+0x1000)
 
 /* Watchdog */
 #define U300_WDOG_BASE                 (U300_SLOW_PER_PHYS_BASE+0x2000)
 
 /* APP side special timer */
 #define U300_TIMER_APP_BASE            (U300_SLOW_PER_PHYS_BASE+0x4000)
-#define U300_TIMER_APP_VBASE           (U300_SLOW_PER_VIRT_BASE+0x4000)
+#define U300_TIMER_APP_VBASE           IOMEM(U300_SLOW_PER_VIRT_BASE+0x4000)
 
 /* Keypad */
 #define U300_KEYPAD_BASE               (U300_SLOW_PER_PHYS_BASE+0x5000)
  * Virtual accessor macros for static devices
  */
 
-
 #endif
index 891cf44591e073ea4de84968d33f598595a8263c..18d7fa0603c230259ff96d3f9dc910b4a53a3b04 100644 (file)
@@ -411,8 +411,7 @@ static void __init u300_timer_init(void)
        /* Use general purpose timer 2 as clock source */
        if (clocksource_mmio_init(U300_TIMER_APP_VBASE + U300_TIMER_APP_GPT2CC,
                        "GPT2", rate, 300, 32, clocksource_mmio_readl_up))
-               printk(KERN_ERR "timer: failed to initialize clock "
-                      "source %s\n", clocksource_u300_1mhz.name);
+               pr_err("timer: failed to initialize U300 clock source\n");
 
        clockevents_calc_mult_shift(&clockevent_u300_1mhz,
                                    rate, APPTIMER_MIN_RANGE);
index c3c417656bd96ecd531b4b7ebe9ca0c26fac9de6..4598b06c8c554383a711b205f103ebfae29b87e7 100644 (file)
@@ -159,6 +159,9 @@ static void __init db8500_add_gpios(void)
                /* No custom data yet */
        };
 
+       if (cpu_is_u8500v2())
+               pdata.supports_sleepmode = true;
+
        dbx500_add_gpios(ARRAY_AND_SIZE(db8500_gpio_base),
                         IRQ_DB8500_GPIO0, &pdata);
 }
index 285edcd2da2aa9a9d6186a6c17cf053918b0b66e..9e6b93b1a04342e7883aa241ddecb02856818a0e 100644 (file)
@@ -46,12 +46,6 @@ static struct map_desc v2m_io_desc[] __initdata = {
        },
 };
 
-static void __init v2m_init_early(void)
-{
-       ct_desc->init_early();
-       versatile_sched_clock_init(MMIO_P2V(V2M_SYS_24MHZ), 24000000);
-}
-
 static void __init v2m_timer_init(void)
 {
        u32 scctrl;
@@ -365,6 +359,13 @@ static struct clk_lookup v2m_lookups[] = {
        },
 };
 
+static void __init v2m_init_early(void)
+{
+       ct_desc->init_early();
+       clkdev_add_table(v2m_lookups, ARRAY_SIZE(v2m_lookups));
+       versatile_sched_clock_init(MMIO_P2V(V2M_SYS_24MHZ), 24000000);
+}
+
 static void v2m_power_off(void)
 {
        if (v2m_cfg_write(SYS_CFG_SHUTDOWN | SYS_CFG_SITE_MB, 0))
@@ -418,8 +419,6 @@ static void __init v2m_init(void)
 {
        int i;
 
-       clkdev_add_table(v2m_lookups, ARRAY_SIZE(v2m_lookups));
-
        platform_device_register(&v2m_pcie_i2c_device);
        platform_device_register(&v2m_ddc_i2c_device);
        platform_device_register(&v2m_flash_device);
index 8bfae964b133987e061c430a0426f951b07172fa..b0ee9ba3cfab41a52853eca727466abff88ddf27 100644 (file)
@@ -24,7 +24,9 @@ DEFINE_PER_CPU(struct mm_struct *, current_mm);
 
 /*
  * We fork()ed a process, and we need a new context for the child
- * to run in.
+ * to run in.  We reserve version 0 for initial tasks so we will
+ * always allocate an ASID. The ASID 0 is reserved for the TTBR
+ * register changing sequence.
  */
 void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 {
@@ -34,11 +36,8 @@ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 
 static void flush_context(void)
 {
-       u32 ttb;
-       /* Copy TTBR1 into TTBR0 */
-       asm volatile("mrc       p15, 0, %0, c2, c0, 1\n"
-                    "mcr       p15, 0, %0, c2, c0, 0"
-                    : "=r" (ttb));
+       /* set the reserved ASID before flushing the TLB */
+       asm("mcr        p15, 0, %0, c13, c0, 1\n" : : "r" (0));
        isb();
        local_flush_tlb_all();
        if (icache_is_vivt_asid_tagged()) {
@@ -94,7 +93,7 @@ static void reset_context(void *info)
                return;
 
        smp_rmb();
-       asid = cpu_last_asid + cpu;
+       asid = cpu_last_asid + cpu + 1;
 
        flush_context();
        set_mm_context(mm, asid);
@@ -144,13 +143,13 @@ void __new_context(struct mm_struct *mm)
         * to start a new version and flush the TLB.
         */
        if (unlikely((asid & ~ASID_MASK) == 0)) {
-               asid = cpu_last_asid + smp_processor_id();
+               asid = cpu_last_asid + smp_processor_id() + 1;
                flush_context();
 #ifdef CONFIG_SMP
                smp_wmb();
                smp_call_function(reset_context, NULL, 1);
 #endif
-               cpu_last_asid += NR_CPUS - 1;
+               cpu_last_asid += NR_CPUS;
        }
 
        set_mm_context(mm, asid);
index 2c2cce9cd8c8399e02cf841298649daf72d06330..c19571c40a21ca62902d65cd01dcb8db9ea4f178 100644 (file)
@@ -330,6 +330,12 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
        memblock_reserve(__pa(_stext), _end - _stext);
 #endif
 #ifdef CONFIG_BLK_DEV_INITRD
+       if (phys_initrd_size &&
+           !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
+               pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n",
+                      phys_initrd_start, phys_initrd_size);
+               phys_initrd_start = phys_initrd_size = 0;
+       }
        if (phys_initrd_size &&
            memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
                pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n",
@@ -635,7 +641,8 @@ void __init mem_init(void)
                        "    modules : 0x%08lx - 0x%08lx   (%4ld MB)\n"
                        "      .init : 0x%p" " - 0x%p" "   (%4d kB)\n"
                        "      .text : 0x%p" " - 0x%p" "   (%4d kB)\n"
-                       "      .data : 0x%p" " - 0x%p" "   (%4d kB)\n",
+                       "      .data : 0x%p" " - 0x%p" "   (%4d kB)\n"
+                       "       .bss : 0x%p" " - 0x%p" "   (%4d kB)\n",
 
                        MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
                                (PAGE_SIZE)),
@@ -657,7 +664,8 @@ void __init mem_init(void)
 
                        MLK_ROUNDUP(__init_begin, __init_end),
                        MLK_ROUNDUP(_text, _etext),
-                       MLK_ROUNDUP(_sdata, _edata));
+                       MLK_ROUNDUP(_sdata, _edata),
+                       MLK_ROUNDUP(__bss_start, __bss_stop));
 
 #undef MLK
 #undef MLM
index e4c165ca669680eb6d74ce8e3d5822de50bbe44f..537ffcb0646d5e0c77ef8a336f4b39b07937beab 100644 (file)
@@ -146,7 +146,7 @@ __arm7tdmi_proc_info:
                .long   0
                .long   0
                .long   v4_cache_fns
-               .size   __arm7tdmi_proc_info, . - __arm7dmi_proc_info
+               .size   __arm7tdmi_proc_info, . - __arm7tdmi_proc_info
 
                .type   __triscenda7_proc_info, #object
 __triscenda7_proc_info:
index 7b7ebd4d096d9cb3939bed7a059be8514486d463..546b54da10059752195476dc6bd9fc0b1b243775 100644 (file)
@@ -116,7 +116,7 @@ __arm9tdmi_proc_info:
                .long   0
                .long   0
                .long   v4_cache_fns
-               .size   __arm9tdmi_proc_info, . - __arm9dmi_proc_info
+               .size   __arm9tdmi_proc_info, . - __arm9tdmi_proc_info
 
                .type   __p2001_proc_info, #object
 __p2001_proc_info:
index b3b566ec83d397c1074c1dcbee0d5e33542e7d9c..3c3867850a3011d1e163d347a25ffbabc4aa4dc7 100644 (file)
@@ -108,16 +108,18 @@ ENTRY(cpu_v7_switch_mm)
 #ifdef CONFIG_ARM_ERRATA_430973
        mcr     p15, 0, r2, c7, c5, 6           @ flush BTAC/BTB
 #endif
-       mrc     p15, 0, r2, c2, c0, 1           @ load TTB 1
-       mcr     p15, 0, r2, c2, c0, 0           @ into TTB 0
+#ifdef CONFIG_ARM_ERRATA_754322
+       dsb
+#endif
+       mcr     p15, 0, r2, c13, c0, 1          @ set reserved context ID
+       isb
+1:     mcr     p15, 0, r0, c2, c0, 0           @ set TTB 0
        isb
 #ifdef CONFIG_ARM_ERRATA_754322
        dsb
 #endif
        mcr     p15, 0, r1, c13, c0, 1          @ set context ID
        isb
-       mcr     p15, 0, r0, c2, c0, 0           @ set TTB 0
-       isb
 #endif
        mov     pc, lr
 ENDPROC(cpu_v7_switch_mm)
index 3538b85ede910a4bc32c8e85b604789809cef21d..b130f60ca6b73e9b7b8435dcc52633bb1761078c 100644 (file)
@@ -139,7 +139,7 @@ static struct sdma_script_start_addrs addr_imx35_to2 = {
 #endif
 
 #ifdef CONFIG_SOC_IMX51
-static struct sdma_script_start_addrs addr_imx51_to1 = {
+static struct sdma_script_start_addrs addr_imx51 = {
        .ap_2_ap_addr = 642,
        .uart_2_mcu_addr = 817,
        .mcu_2_app_addr = 747,
@@ -196,7 +196,9 @@ static int __init imxXX_add_imx_dma(void)
 
 #if defined(CONFIG_SOC_IMX51)
        if (cpu_is_mx51()) {
-               imx51_imx_sdma_data.pdata.script_addrs = &addr_imx51_to1;
+               int to_version = mx51_revision() >> 4;
+               imx51_imx_sdma_data.pdata.to_version = to_version;
+               imx51_imx_sdma_data.pdata.script_addrs = &addr_imx51;
                ret = imx_add_imx_sdma(&imx51_imx_sdma_data);
        } else
 #endif
index ea19a5b2f22754814d99732686c5982c02eef527..d5d7e651269c3b434c5f500c5a491b9f7e3437c1 100644 (file)
@@ -90,6 +90,7 @@ struct nmk_gpio_platform_data {
        int num_gpio;
        u32 (*get_secondary_status)(unsigned int bank);
        void (*set_ioforce)(bool enable);
+       bool supports_sleepmode;
 };
 
 #endif /* __ASM_PLAT_GPIO_H */
index 3083195123ea2c1f9c46ea840d56a83ceda838b0..0d88499b79e903b1f7cf9ef4d2afe5c31d61c086 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <linux/mtd/map.h>
 
+struct platform_device;
 extern void omap1_set_vpp(struct platform_device *pdev, int enable);
 
 #endif
index 32a2f6c4d39e3b37437f9e9c60b792a84297ed6e..e992b9655fbc4fb873d489846ce82e8644ea5ea6 100644 (file)
@@ -29,9 +29,6 @@ struct iovm_struct {
  * lower 16 bit is used for h/w and upper 16 bit is for s/w.
  */
 #define IOVMF_SW_SHIFT         16
-#define IOVMF_HW_SIZE          (1 << IOVMF_SW_SHIFT)
-#define IOVMF_HW_MASK          (IOVMF_HW_SIZE - 1)
-#define IOVMF_SW_MASK          (~IOVMF_HW_MASK)UL
 
 /*
  * iovma: h/w flags derived from cam and ram attribute
index f38fef9f1310ca56fdf73fb3a3816ec674ba06a9..c7b874186c27017d58a38bc4209e1c225752d3f5 100644 (file)
@@ -101,6 +101,9 @@ struct omap_mmc_platform_data {
                /* If using power_saving and the MMC power is not to go off */
                unsigned no_off:1;
 
+               /* eMMC does not handle power off when not in sleep state */
+               unsigned no_regulator_off_init:1;
+
                /* Regulator off remapped to sleep */
                unsigned vcc_aux_disable_is_sleep:1;
 
index 51ef43e8def6194e5d7bacfc361f093c3f89c79e..83a37c54342f414573d942c2c28f5bb9459b4ec7 100644 (file)
@@ -648,7 +648,6 @@ u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt,
                        return PTR_ERR(va);
        }
 
-       flags &= IOVMF_HW_MASK;
        flags |= IOVMF_DISCONT;
        flags |= IOVMF_MMIO;
 
@@ -706,7 +705,6 @@ u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
        if (!va)
                return -ENOMEM;
 
-       flags &= IOVMF_HW_MASK;
        flags |= IOVMF_DISCONT;
        flags |= IOVMF_ALLOC;
 
@@ -795,7 +793,6 @@ u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes,
        if (!va)
                return -ENOMEM;
 
-       flags &= IOVMF_HW_MASK;
        flags |= IOVMF_LINEAR;
        flags |= IOVMF_MMIO;
 
@@ -853,7 +850,6 @@ u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
                return -ENOMEM;
        pa = virt_to_phys(va);
 
-       flags &= IOVMF_HW_MASK;
        flags |= IOVMF_LINEAR;
        flags |= IOVMF_ALLOC;
 
index a37b8eb65b76a6c2b13a019545852b3c9b48a7cc..49fc0df0c21f58be364d6da00e14d73eb8c32344 100644 (file)
@@ -84,6 +84,7 @@
 #include <linux/io.h>
 #include <linux/clk.h>
 #include <linux/clkdev.h>
+#include <linux/pm_runtime.h>
 
 #include <plat/omap_device.h>
 #include <plat/omap_hwmod.h>
@@ -539,20 +540,34 @@ int omap_early_device_register(struct omap_device *od)
 static int _od_runtime_suspend(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
+       int ret;
+
+       ret = pm_generic_runtime_suspend(dev);
+
+       if (!ret)
+               omap_device_idle(pdev);
+
+       return ret;
+}
 
-       return omap_device_idle(pdev);
+static int _od_runtime_idle(struct device *dev)
+{
+       return pm_generic_runtime_idle(dev);
 }
 
 static int _od_runtime_resume(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
 
-       return omap_device_enable(pdev);
+       omap_device_enable(pdev);
+
+       return pm_generic_runtime_resume(dev);
 }
 
 static struct dev_power_domain omap_device_power_domain = {
        .ops = {
                .runtime_suspend = _od_runtime_suspend,
+               .runtime_idle = _od_runtime_idle,
                .runtime_resume = _od_runtime_resume,
                USE_PLATFORM_PM_SLEEP_OPS
        }
index a3f50b34a90d3ffed85c0fcd4bc6baad080df7cd..6af3d0b1f8d058e8387697b3c560bce32f7e31ef 100644 (file)
@@ -166,7 +166,7 @@ static void __init omap_detect_sram(void)
                else if (cpu_is_omap1611())
                        omap_sram_size = SZ_256K;
                else {
-                       printk(KERN_ERR "Could not detect SRAM size\n");
+                       pr_err("Could not detect SRAM size\n");
                        omap_sram_size = 0x4000;
                }
        }
@@ -221,10 +221,10 @@ static void __init omap_map_sram(void)
        omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE);
        iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc));
 
-       printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n",
-       __pfn_to_phys(omap_sram_io_desc[0].pfn),
-       omap_sram_io_desc[0].virtual,
-              omap_sram_io_desc[0].length);
+       pr_info("SRAM: Mapped pa 0x%08llx to va 0x%08lx size: 0x%lx\n",
+               (long long) __pfn_to_phys(omap_sram_io_desc[0].pfn),
+               omap_sram_io_desc[0].virtual,
+               omap_sram_io_desc[0].length);
 
        /*
         * Normally devicemaps_init() would flush caches and tlb after
@@ -252,7 +252,7 @@ static void __init omap_map_sram(void)
 void *omap_sram_push_address(unsigned long size)
 {
        if (size > (omap_sram_ceil - (omap_sram_base + SRAM_BOOTLOADER_SZ))) {
-               printk(KERN_ERR "Not enough space in SRAM\n");
+               pr_err("Not enough space in SRAM\n");
                return NULL;
        }
 
index c10d10c56e2e71a632877a9576efe62a1a8a717e..2abf9660bc6cc6eaa9879b82281bf79d3c83225d 100644 (file)
@@ -1199,7 +1199,7 @@ EXPORT_SYMBOL(s3c2410_dma_getposition);
 
 #ifdef CONFIG_PM
 
-static void s3c2410_dma_suspend_chan(s3c2410_dma_chan *cp)
+static void s3c2410_dma_suspend_chan(struct s3c2410_dma_chan *cp)
 {
        printk(KERN_DEBUG "suspending dma channel %d\n", cp->number);
 
index 9aee7e1668b1e464a908af6243ea62f6b5f9e76b..fc8c5f89954d453c29fcdd45d3a385af63ca0a41 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/sysdev.h>
+#include <linux/syscore_ops.h>
 
 #include <asm/irq.h>
 #include <asm/mach/irq.h>
@@ -668,3 +669,8 @@ void __init s3c24xx_init_irq(void)
 
        irqdbf("s3c2410: registered interrupt handlers\n");
 }
+
+struct syscore_ops s3c24xx_irq_syscore_ops = {
+       .suspend        = s3c24xx_irq_suspend,
+       .resume         = s3c24xx_irq_resume,
+};
index 6db926202caa6adcb193b899e21eb78584c28ff0..20336c8f247919f359bb22107cb8eadce842e55c 100644 (file)
@@ -15,8 +15,6 @@
 
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/onenand.h>
 
 #include <mach/irqs.h>
 #include <mach/map.h>
@@ -45,13 +43,3 @@ struct platform_device s5p_device_onenand = {
        .num_resources  = ARRAY_SIZE(s5p_onenand_resources),
        .resource       = s5p_onenand_resources,
 };
-
-void s5p_onenand_set_platdata(struct onenand_platform_data *pdata)
-{
-       struct onenand_platform_data *pd;
-
-       pd = kmemdup(pdata, sizeof(struct onenand_platform_data), GFP_KERNEL);
-       if (!pd)
-               printk(KERN_ERR "%s: no memory for platform data\n", __func__);
-       s5p_device_onenand.dev.platform_data = pd;
-}
index a6c3d327ce72d1306a8b9308501f60e7c8879efb..d973d39666a3f9e83adc79964810c6a2d8b85da3 100644 (file)
@@ -39,7 +39,7 @@
 #define S5P_VA_TWD             S5P_VA_COREPERI(0x600)
 #define S5P_VA_GIC_DIST                S5P_VA_COREPERI(0x1000)
 
-#define S5P_VA_USB_HSPHY       S3C_ADDR(0x02900000)
+#define S3C_VA_USB_HSPHY       S3C_ADDR(0x02900000)
 
 #define VA_VIC(x)              (S3C_VA_IRQ + ((x) * 0x10000))
 #define VA_VIC0                        VA_VIC(0)
index 45ec73287d8c89aeb0d0cde81d3ed95661d798ad..f54ae71f0cd2d461d296c4a5b9053ece80765a68 100644 (file)
@@ -13,8 +13,6 @@
 
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/onenand.h>
 
 #include <mach/irqs.h>
 #include <mach/map.h>
@@ -43,13 +41,3 @@ struct platform_device s3c_device_onenand = {
        .num_resources  = ARRAY_SIZE(s3c_onenand_resources),
        .resource       = s3c_onenand_resources,
 };
-
-void s3c_onenand_set_platdata(struct onenand_platform_data *pdata)
-{
-       struct onenand_platform_data *pd;
-
-       pd = kmemdup(pdata, sizeof(struct onenand_platform_data), GFP_KERNEL);
-       if (!pd)
-               printk(KERN_ERR "%s: no memory for platform data\n", __func__);
-       s3c_device_onenand.dev.platform_data = pd;
-}
index b61b8ee7cc52a3735c6507c874d3ba4ca5550dc6..4af108ff41121da579b41e0717802daf6c56285b 100644 (file)
@@ -75,10 +75,8 @@ extern struct platform_device s5pc100_device_spi1;
 extern struct platform_device s5pc100_device_spi2;
 extern struct platform_device s5pv210_device_spi0;
 extern struct platform_device s5pv210_device_spi1;
-extern struct platform_device s5p6440_device_spi0;
-extern struct platform_device s5p6440_device_spi1;
-extern struct platform_device s5p6450_device_spi0;
-extern struct platform_device s5p6450_device_spi1;
+extern struct platform_device s5p64x0_device_spi0;
+extern struct platform_device s5p64x0_device_spi1;
 
 extern struct platform_device s3c_device_hwmon;
 
index 6f9ca56de1f6b19ee52cca2b98e8a07411700109..a06bfccc2840e5204c5519203a6c343837ebac3c 100644 (file)
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_BASE_FULL is not set
 # CONFIG_COMPAT_BRK is not set
index 7eece0af34c92ee46992b0a4d315110a2d254620..d8f1fe80d210daea5b764c50b0977a09d58c6a9f 100644 (file)
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_BASE_FULL is not set
 # CONFIG_COMPAT_BRK is not set
index 387eb9d6e423321297fa1959741a9ff93812777c..d4c5b19ec950a2eddc4b6399e8d0286626aeb0c9 100644 (file)
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_BASE_FULL is not set
 # CONFIG_COMPAT_BRK is not set
index 19f6ceeeff7b4710d5ccf916b231e0b469f9478f..77ca4f905d2cf9da872e12cb1efb4536a728a8f4 100644 (file)
@@ -7,6 +7,7 @@ CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_BASE_FULL is not set
 # CONFIG_SLUB_DEBUG is not set
@@ -109,7 +110,7 @@ CONFIG_LEDS_GPIO=y
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=y
 CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_RTC_CLASS=m
+CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_S35390A=m
 CONFIG_RTC_DRV_AT32AP700X=m
 CONFIG_DMADEVICES=y
index f0fe237133a93f1a2d0984ab10e57d403f83cd71..6e0dca4d3131ff920a7580408ab48b45cd92b916 100644 (file)
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_BASE_FULL is not set
 # CONFIG_COMPAT_BRK is not set
index e4a7c1dc8380f4aefbd17b055ef66b4a42acd4cb..7f2a344a5fa8334594929bcae48ec4fee4b5242c 100644 (file)
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_BASE_FULL is not set
 # CONFIG_COMPAT_BRK is not set
index 6f37f70c2c37a83d4126162b4910705a1693dd10..085eeba88f67e5b94efefe4284c57b69ca1c60c7 100644 (file)
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_BASE_FULL is not set
 # CONFIG_COMPAT_BRK is not set
index 4fb01f5ab42f1db7f20e653564946ca1443dc03f..d1a887e6405595010c7181c6a928c5fc35a3cb7b 100644 (file)
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_BASE_FULL is not set
 # CONFIG_COMPAT_BRK is not set
index 9faaf9b900f242358032ea65279d61575ffe6ec7..956f2819ad45495a96c9244a53781028b48c4d91 100644 (file)
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_BASE_FULL is not set
 # CONFIG_COMPAT_BRK is not set
index 3d2a5d85f970f46985096290085a1fe19e1006e1..40c69f38c61a2ab2b9a409dc1304cf495d0606b5 100644 (file)
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_BASE_FULL is not set
 # CONFIG_COMPAT_BRK is not set
index 1ed8f22d4fe2cd8ff5f8b9f3bfc04805fd19b3d6..511eb8af356dab0a3681ee274aade2a988c89096 100644 (file)
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_BASE_FULL is not set
 # CONFIG_COMPAT_BRK is not set
index aeadc955db323b9da70dbb0b8b675243bcdcfee6..19973b06170c94a5518619a4fb07869c08c1727d 100644 (file)
@@ -6,6 +6,7 @@ CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_BASE_FULL is not set
 # CONFIG_COMPAT_BRK is not set
index 1692beeb7ed3beeb321a1de732448dc0f886b12e..6f45681196d135a1290c2a5d7ae8be2b7b180d99 100644 (file)
@@ -7,6 +7,7 @@ CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_BASE_FULL is not set
 # CONFIG_COMPAT_BRK is not set
index 8b670a6530bf163304e48fe383e66bfc03123e15..3befab966827bacb4339247637e27d39333a7b1b 100644 (file)
@@ -7,6 +7,7 @@ CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_BASE_FULL is not set
 CONFIG_MODULES=y
index 5a51f2e7ffb9f9390294145d0332b936187ee536..1bee51f2215475e5f4bdbbaf00b49e84daf6719a 100644 (file)
@@ -7,6 +7,7 @@ CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_BASE_FULL is not set
 # CONFIG_COMPAT_BRK is not set
index 49a88f5a9d2feda1c7b033cfd19c71453c6dd6bf..108502bc67706786d3125dff42be43f6a1d9b7d2 100644 (file)
@@ -131,7 +131,6 @@ struct thread_struct {
  */
 #define start_thread(regs, new_pc, new_sp)      \
        do {                                     \
-               set_fs(USER_DS);                 \
                memset(regs, 0, sizeof(*regs));  \
                regs->sr = MODE_USER;            \
                regs->pc = new_pc & ~1;          \
index aa677e2a3823d89272ed42a367400be93598c2fa..7fbf0dcb9afe5f88e9e307f3530cdbd875cf20f1 100644 (file)
@@ -1043,8 +1043,9 @@ void __init at32_map_usart(unsigned int hw_id, unsigned int line, int flags)
                data->regs = (void __iomem *)pdev->resource[0].start;
        }
 
+       pdev->id = line;
        pdata = pdev->dev.platform_data;
-       pdata->num = portnr;
+       pdata->num = line;
        at32_usarts[line] = pdev;
 }
 
index 9c96a130f3a858b02b33a3d66f1347623a2ba1ea..8181293115e427b1087c95a3b56f041437fa1ced 100644 (file)
 #define cpu_is_at91sam9263()   (0)
 #define cpu_is_at91sam9rl()    (0)
 #define cpu_is_at91cap9()      (0)
+#define cpu_is_at91cap9_revB() (0)
+#define cpu_is_at91cap9_revC() (0)
 #define cpu_is_at91sam9g10()   (0)
+#define cpu_is_at91sam9g20()   (0)
 #define cpu_is_at91sam9g45()   (0)
 #define cpu_is_at91sam9g45es() (0)
+#define cpu_is_at91sam9m10()   (0)
+#define cpu_is_at91sam9g46()   (0)
+#define cpu_is_at91sam9m11()   (0)
+#define cpu_is_at91sam9x5()    (0)
+#define cpu_is_at91sam9g15()   (0)
+#define cpu_is_at91sam9g35()   (0)
+#define cpu_is_at91sam9x35()   (0)
+#define cpu_is_at91sam9g25()   (0)
+#define cpu_is_at91sam9x25()   (0)
 
 #endif /* __ASM_ARCH_CPU_H */
index 3e3646186c9ffa686aa01ff86821fe1cc7639ebe..c9ac2f8e8f648af54f08407c7f31fb85a4807d06 100644 (file)
@@ -167,14 +167,12 @@ static int intc_suspend(void)
        return 0;
 }
 
-static int intc_resume(void)
+static void intc_resume(void)
 {
        int i;
 
        for (i = 0; i < 64; i++)
                intc_writel(&intc0, INTPR0 + 4 * i, intc0.saved_ipr[i]);
-
-       return 0;
 }
 #else
 #define intc_suspend   NULL
index 31d954216c054ec37748ffb942d6d3f463d8ba8b..9f1d08401fcaacdf6d53f661da44e2cee641b66e 100644 (file)
@@ -112,7 +112,7 @@ CONFIG_USB_G_SERIAL=m
 CONFIG_USB_G_PRINTER=m
 CONFIG_MMC=m
 CONFIG_SDH_BFIN=m
-CONFIG_RTC_CLASS=m
+CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_BFIN=m
 CONFIG_EXT2_FS=m
 # CONFIG_DNOTIFY is not set
index f3931d50b4a715a50878363a43bd14e9c63b9dd1..2c07dddac9956bd24da0603910c9a7d35957f42b 100644 (file)
@@ -25,7 +25,7 @@
 
 ENTRY(_strncpy)
        CC = R2 == 0;
-       if CC JUMP 4f;
+       if CC JUMP 6f;
 
        P2 = R2 ;       /* size */
        P0 = R0 ;       /* dst*/
index fc98f9b9d4d2ce417f29b547cad1884e9f7964df..b004dc1b1710bc3f8bfceddf7695266249b5bbdb 100644 (file)
@@ -14,6 +14,33 @@ config GENERIC_CLOCKEVENTS
        bool
        default n
 
+config M68000
+       bool
+       help
+         The Freescale (was Motorola) 68000 CPU is the first generation of
+         the well known M68K family of processors. The CPU core as well as
+         being available as a stand alone CPU was also used in many
+         System-On-Chip devices (eg 68328, 68302, etc). It does not contain
+         a paging MMU.
+
+config MCPU32
+       bool
+       help
+         The Freescale (was then Motorola) CPU32 is a CPU core that is
+         based on the 68020 processor. For the most part it is used in
+         System-On-Chip parts, and does not contain a paging MMU.
+
+config COLDFIRE
+       bool
+       select GENERIC_GPIO
+       select ARCH_REQUIRE_GPIOLIB
+       help
+         The Freescale ColdFire family of processors is a modern derivitive
+         of the 68000 processor family. They are mainly targeted at embedded
+         applications, and are all System-On-Chip (SOC) devices, as opposed
+         to stand alone CPUs. They implement a subset of the original 68000
+         processor instruction set.
+
 config COLDFIRE_SW_A7
        bool
        default n
@@ -36,26 +63,31 @@ choice
 
 config M68328
        bool "MC68328"
+       select M68000
        help
          Motorola 68328 processor support.
 
 config M68EZ328
        bool "MC68EZ328"
+       select M68000
        help
          Motorola 68EX328 processor support.
 
 config M68VZ328
        bool "MC68VZ328"
+       select M68000
        help
          Motorola 68VZ328 processor support.
 
 config M68360
        bool "MC68360"
+       select MCPU32
        help
          Motorola 68360 processor support.
 
 config M5206
        bool "MCF5206"
+       select COLDFIRE
        select COLDFIRE_SW_A7
        select HAVE_MBAR
        help
@@ -63,6 +95,7 @@ config M5206
 
 config M5206e
        bool "MCF5206e"
+       select COLDFIRE
        select COLDFIRE_SW_A7
        select HAVE_MBAR
        help
@@ -70,6 +103,7 @@ config M5206e
 
 config M520x
        bool "MCF520x"
+       select COLDFIRE
        select GENERIC_CLOCKEVENTS
        select HAVE_CACHE_SPLIT
        help
@@ -77,6 +111,7 @@ config M520x
 
 config M523x
        bool "MCF523x"
+       select COLDFIRE
        select GENERIC_CLOCKEVENTS
        select HAVE_CACHE_SPLIT
        select HAVE_IPSBAR
@@ -85,6 +120,7 @@ config M523x
 
 config M5249
        bool "MCF5249"
+       select COLDFIRE
        select COLDFIRE_SW_A7
        select HAVE_MBAR
        help
@@ -92,6 +128,7 @@ config M5249
 
 config M5271
        bool "MCF5271"
+       select COLDFIRE
        select HAVE_CACHE_SPLIT
        select HAVE_IPSBAR
        help
@@ -99,6 +136,7 @@ config M5271
 
 config M5272
        bool "MCF5272"
+       select COLDFIRE
        select COLDFIRE_SW_A7
        select HAVE_MBAR
        help
@@ -106,6 +144,7 @@ config M5272
 
 config M5275
        bool "MCF5275"
+       select COLDFIRE
        select HAVE_CACHE_SPLIT
        select HAVE_IPSBAR
        help
@@ -113,6 +152,7 @@ config M5275
 
 config M528x
        bool "MCF528x"
+       select COLDFIRE
        select GENERIC_CLOCKEVENTS
        select HAVE_CACHE_SPLIT
        select HAVE_IPSBAR
@@ -121,6 +161,7 @@ config M528x
 
 config M5307
        bool "MCF5307"
+       select COLDFIRE
        select COLDFIRE_SW_A7
        select HAVE_CACHE_CB
        select HAVE_MBAR
@@ -129,12 +170,14 @@ config M5307
 
 config M532x
        bool "MCF532x"
+       select COLDFIRE
        select HAVE_CACHE_CB
        help
          Freescale (Motorola) ColdFire 532x processor support.
 
 config M5407
        bool "MCF5407"
+       select COLDFIRE
        select COLDFIRE_SW_A7
        select HAVE_CACHE_CB
        select HAVE_MBAR
@@ -143,6 +186,7 @@ config M5407
 
 config M547x
        bool "MCF547x"
+       select COLDFIRE
        select HAVE_CACHE_CB
        select HAVE_MBAR
        help
@@ -150,6 +194,7 @@ config M547x
 
 config M548x
        bool "MCF548x"
+       select COLDFIRE
        select HAVE_CACHE_CB
        select HAVE_MBAR
        help
@@ -168,13 +213,6 @@ config M54xx
        depends on (M548x || M547x)
        default y
 
-config COLDFIRE
-       bool
-       depends on (M5206 || M5206e || M520x || M523x || M5249 || M527x || M5272 || M528x || M5307 || M532x || M5407 || M54xx)
-       select GENERIC_GPIO
-       select ARCH_REQUIRE_GPIOLIB
-       default y
-
 config CLOCK_SET
        bool "Enable setting the CPU clock frequency"
        default n
index 33f82769547c003e85a5a3084fc09441f38c6e90..1b7a14d1a00070af31fe62299f6dbe3b8e03d4be 100644 (file)
@@ -14,8 +14,7 @@ EXPORT_SYMBOL(__ashrdi3);
 EXPORT_SYMBOL(__lshrdi3);
 EXPORT_SYMBOL(__muldi3);
 
-#if !defined(__mc68020__) && !defined(__mc68030__) && \
-    !defined(__mc68040__) && !defined(__mc68060__) && !defined(__mcpu32__)
+#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
 /*
  * Simpler 68k and ColdFire parts also need a few other gcc functions.
  */
index f4d715cdca0e7d5dca002943d88a8aeafa255ea3..7dc4087a9545aaa9df48b92a18ab80bbead41469 100644 (file)
@@ -84,52 +84,52 @@ SECTIONS {
                /* Kernel symbol table: Normal symbols */
                . = ALIGN(4);
                __start___ksymtab = .;
-               *(__ksymtab)
+               *(SORT(___ksymtab+*))
                __stop___ksymtab = .;
 
                /* Kernel symbol table: GPL-only symbols */
                __start___ksymtab_gpl = .;
-               *(__ksymtab_gpl)
+               *(SORT(___ksymtab_gpl+*))
                __stop___ksymtab_gpl = .;
 
                /* Kernel symbol table: Normal unused symbols */
                __start___ksymtab_unused = .;
-               *(__ksymtab_unused)
+               *(SORT(___ksymtab_unused+*))
                __stop___ksymtab_unused = .;
 
                /* Kernel symbol table: GPL-only unused symbols */
                __start___ksymtab_unused_gpl = .;
-               *(__ksymtab_unused_gpl)
+               *(SORT(___ksymtab_unused_gpl+*))
                __stop___ksymtab_unused_gpl = .;
 
                /* Kernel symbol table: GPL-future symbols */
                __start___ksymtab_gpl_future = .;
-               *(__ksymtab_gpl_future)
+               *(SORT(___ksymtab_gpl_future+*))
                __stop___ksymtab_gpl_future = .;
 
                /* Kernel symbol table: Normal symbols */
                __start___kcrctab = .;
-               *(__kcrctab)
+               *(SORT(___kcrctab+*))
                __stop___kcrctab = .;
 
                /* Kernel symbol table: GPL-only symbols */
                __start___kcrctab_gpl = .;
-               *(__kcrctab_gpl)
+               *(SORT(___kcrctab_gpl+*))
                __stop___kcrctab_gpl = .;
 
                /* Kernel symbol table: Normal unused symbols */
                __start___kcrctab_unused = .;
-               *(__kcrctab_unused)
+               *(SORT(___kcrctab_unused+*))
                __stop___kcrctab_unused = .;
 
                /* Kernel symbol table: GPL-only unused symbols */
                __start___kcrctab_unused_gpl = .;
-               *(__kcrctab_unused_gpl)
+               *(SORT(___kcrctab_unused_gpl+*))
                __stop___kcrctab_unused_gpl = .;
 
                /* Kernel symbol table: GPL-future symbols */
                __start___kcrctab_gpl_future = .;
-               *(__kcrctab_gpl_future)
+               *(SORT(___kcrctab_gpl_future+*))
                __stop___kcrctab_gpl_future = .;
 
                /* Kernel symbol table: strings */
index 62182c81e91c55bf106df98d14a00e4dc950ccd9..064889316974756878ff3a5f969068ab71b5cc2a 100644 (file)
@@ -34,8 +34,10 @@ void *memcpy(void *to, const void *from, size_t n)
        if (temp) {
                long *lto = to;
                const long *lfrom = from;
-#if defined(__mc68020__) || defined(__mc68030__) || \
-    defined(__mc68040__) || defined(__mc68060__) || defined(__mcpu32__)
+#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
+               for (; temp; temp--)
+                       *lto++ = *lfrom++;
+#else
                asm volatile (
                        "       movel %2,%3\n"
                        "       andw  #7,%3\n"
@@ -56,9 +58,6 @@ void *memcpy(void *to, const void *from, size_t n)
                        "       jpl   4b"
                        : "=a" (lfrom), "=a" (lto), "=d" (temp), "=&d" (temp1)
                        : "0" (lfrom), "1" (lto), "2" (temp));
-#else
-               for (; temp; temp--)
-                       *lto++ = *lfrom++;
 #endif
                to = lto;
                from = lfrom;
index f649e6a2e644686966e3b7c4ed01b245292c04c6..8a7639f0a2fe5c05d1ecc70065e15da88961db99 100644 (file)
@@ -32,8 +32,10 @@ void *memset(void *s, int c, size_t count)
        temp = count >> 2;
        if (temp) {
                long *ls = s;
-#if defined(__mc68020__) || defined(__mc68030__) || \
-    defined(__mc68040__) || defined(__mc68060__) || defined(__mcpu32__)
+#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
+               for (; temp; temp--)
+                       *ls++ = c;
+#else
                size_t temp1;
                asm volatile (
                        "       movel %1,%2\n"
@@ -55,9 +57,6 @@ void *memset(void *s, int c, size_t count)
                        "       jpl   1b"
                        : "=a" (ls), "=d" (temp), "=&d" (temp1)
                        : "d" (c), "0" (ls), "1" (temp));
-#else
-               for (; temp; temp--)
-                       *ls++ = c;
 #endif
                s = ls;
        }
index 079bafca073effea75f2af80847d1f174bf674f5..79e928a525d078169e0617270e1fcea9da08b5f2 100644 (file)
@@ -19,17 +19,7 @@ along with GNU CC; see the file COPYING.  If not, write to
 the Free Software Foundation, 59 Temple Place - Suite 330,
 Boston, MA 02111-1307, USA.  */
 
-#if defined(__mc68020__) || defined(__mc68030__) || \
-    defined(__mc68040__) || defined(__mc68060__) || defined(__mcpu32__)
-
-#define umul_ppmm(w1, w0, u, v) \
-  __asm__ ("mulu%.l %3,%1:%0"                                          \
-           : "=d" ((USItype)(w0)),                                     \
-             "=d" ((USItype)(w1))                                      \
-           : "%0" ((USItype)(u)),                                      \
-             "dmi" ((USItype)(v)))
-
-#else
+#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
 
 #define SI_TYPE_SIZE 32
 #define __BITS4 (SI_TYPE_SIZE / 4)
@@ -61,6 +51,15 @@ Boston, MA 02111-1307, USA.  */
     (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0);         \
   } while (0)
 
+#else
+
+#define umul_ppmm(w1, w0, u, v) \
+  __asm__ ("mulu%.l %3,%1:%0"                                          \
+           : "=d" ((USItype)(w0)),                                     \
+             "=d" ((USItype)(w1))                                      \
+           : "%0" ((USItype)(u)),                                      \
+             "dmi" ((USItype)(v)))
+
 #endif
 
 #define __umulsidi3(u, v) \
index 37862b2ce3633a3f87f3aeb0ce3f8e119688a143..807c97eed8a8ed48a5957c144e907894c8d8c397 100644 (file)
@@ -678,7 +678,7 @@ CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=y
 CONFIG_LEDS_TRIGGER_HEARTBEAT=y
 CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
-CONFIG_RTC_CLASS=m
+CONFIG_RTC_CLASS=y
 CONFIG_RTC_INTF_DEV_UIE_EMUL=y
 CONFIG_RTC_DRV_TEST=m
 CONFIG_RTC_DRV_DS1307=m
index f03cb278828f400c1e66580be491c30da507e9b3..bd3e5e73826e1839b307d5ae7a279a3e93f6afbc 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/irq.h>
 #include <asm/processor.h>
 #include <asm/system.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 #include <asm/io.h>
 #include <asm/atomic.h>
 #include <asm/smp.h>
@@ -156,7 +156,7 @@ int die_if_no_fixup(const char *str, struct pt_regs *regs,
 
        case EXCEP_TRAP:
        case EXCEP_UNIMPINS:
-               if (get_user(opcode, (uint8_t __user *)regs->pc) != 0)
+               if (probe_kernel_read(&opcode, (u8 *)regs->pc, 1) < 0)
                        break;
                if (opcode == 0xff) {
                        if (notify_die(DIE_BREAKPOINT, str, regs, code, 0, 0))
index 6f702a6ab3955531ff31f65fb7ace7c37f01aa1f..13c4814c29f840dd4ecb95e4e7206da7127aef49 100644 (file)
@@ -44,6 +44,7 @@ SECTIONS
   RO_DATA(PAGE_SIZE)
 
   /* writeable */
+  _sdata = .;     /* Start of rw data section */
   RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE)
   _edata = .;
 
index 665919f2ab62ef3e8f43c4466f6f72fdd69988e9..a775ea5d7cee077cf37c5ceb90c8bfe773e4fd99 100644 (file)
@@ -120,14 +120,14 @@ debugger_local_cache_flushinv_one:
        # conditionally purge this line in all ways
        mov     d1,(L1_CACHE_WAYDISP*0,a0)
 
-debugger_local_cache_flushinv_no_dcache:
+debugger_local_cache_flushinv_one_no_dcache:
        #
        # now try to flush the icache
        #
        mov     CHCTR,a0
        movhu   (a0),d0
        btst    CHCTR_ICEN,d0
-       beq     mn10300_local_icache_inv_range_reg_end
+       beq     debugger_local_cache_flushinv_one_end
 
        LOCAL_CLI_SAVE(d1)
 
index 3d80c3e9cf6003b2fe9bfe512c660a2961d81a32..12da77ec0228c419b8611675a6ddfce282cede81 100644 (file)
@@ -1,5 +1,4 @@
 addnote
-dtc
 empty.c
 hack-coff
 infblock.c
diff --git a/arch/powerpc/boot/dtc-src/.gitignore b/arch/powerpc/boot/dtc-src/.gitignore
deleted file mode 100644 (file)
index a7c3f94..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-dtc-lexer.lex.c
-dtc-parser.tab.c
-dtc-parser.tab.h
index 7f7e4a8786029897df66ca0e49c64b2c6aa5065d..22e719575c60b33cb64cee5fcb840c2507f942c8 100644 (file)
@@ -85,7 +85,7 @@ CONFIG_USB_OHCI_HCD=m
 CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
 # CONFIG_USB_OHCI_HCD_PCI is not set
 CONFIG_USB_STORAGE=m
-CONFIG_RTC_CLASS=m
+CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_PCF8563=m
 CONFIG_EXT2_FS=m
 CONFIG_EXT3_FS=m
index 6472322bf13b6820af3c364c1a994baebe914fbe..185c292b0f1c8cab6968420212c9690bc895e6b3 100644 (file)
@@ -141,7 +141,7 @@ CONFIG_USB_EHCI_TT_NEWSCHED=y
 # CONFIG_USB_EHCI_HCD_PPC_OF is not set
 CONFIG_USB_OHCI_HCD=m
 CONFIG_USB_STORAGE=m
-CONFIG_RTC_CLASS=m
+CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_PS3=m
 CONFIG_EXT2_FS=m
 CONFIG_EXT3_FS=m
index d902abd3399506cfcacc056645e68f6e5de3f263..b1d2deceeedbc1f24030a302e2f190ff8929084b 100644 (file)
@@ -14,7 +14,7 @@
 #define ASM_PPC_RIO_H
 
 extern void platform_rio_init(void);
-#ifdef CONFIG_RAPIDIO
+#ifdef CONFIG_FSL_RIO
 extern int fsl_rio_mcheck_exception(struct pt_regs *);
 #else
 static inline int fsl_rio_mcheck_exception(struct pt_regs *regs) {return 0; }
index 34d2722b9451f816b08405549a691d421ae7d81a..9fb933248ab69fe2272f447d1b286407dee5d195 100644 (file)
@@ -1979,7 +1979,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
                .pvr_value              = 0x80240000,
                .cpu_name               = "e5500",
                .cpu_features           = CPU_FTRS_E5500,
-               .cpu_user_features      = COMMON_USER_BOOKE,
+               .cpu_user_features      = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
                .mmu_features           = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
                        MMU_FTR_USE_TLBILX,
                .icache_bsize           = 64,
index f2c906b1d8d3f77158d44f40d7bedec4be253cfc..8c3112a57cf25ec751a118d8550bc95ed4c92575 100644 (file)
@@ -82,11 +82,29 @@ static int __init early_parse_mem(char *p)
 }
 early_param("mem", early_parse_mem);
 
+/*
+ * overlaps_initrd - check for overlap with page aligned extension of
+ * initrd.
+ */
+static inline int overlaps_initrd(unsigned long start, unsigned long size)
+{
+#ifdef CONFIG_BLK_DEV_INITRD
+       if (!initrd_start)
+               return 0;
+
+       return  (start + size) > _ALIGN_DOWN(initrd_start, PAGE_SIZE) &&
+                       start <= _ALIGN_UP(initrd_end, PAGE_SIZE);
+#else
+       return 0;
+#endif
+}
+
 /**
  * move_device_tree - move tree to an unused area, if needed.
  *
  * The device tree may be allocated beyond our memory limit, or inside the
- * crash kernel region for kdump. If so, move it out of the way.
+ * crash kernel region for kdump, or within the page aligned range of initrd.
+ * If so, move it out of the way.
  */
 static void __init move_device_tree(void)
 {
@@ -99,7 +117,8 @@ static void __init move_device_tree(void)
        size = be32_to_cpu(initial_boot_params->totalsize);
 
        if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) ||
-                       overlaps_crashkernel(start, size)) {
+                       overlaps_crashkernel(start, size) ||
+                       overlaps_initrd(start, size)) {
                p = __va(memblock_alloc(size, PAGE_SIZE));
                memcpy(p, initial_boot_params, size);
                initial_boot_params = (struct boot_param_header *)p;
@@ -555,7 +574,9 @@ static void __init early_reserve_mem(void)
 #ifdef CONFIG_BLK_DEV_INITRD
        /* then reserve the initrd, if any */
        if (initrd_start && (initrd_end > initrd_start))
-               memblock_reserve(__pa(initrd_start), initrd_end - initrd_start);
+               memblock_reserve(_ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE),
+                       _ALIGN_UP(initrd_end, PAGE_SIZE) -
+                       _ALIGN_DOWN(initrd_start, PAGE_SIZE));
 #endif /* CONFIG_BLK_DEV_INITRD */
 
 #ifdef CONFIG_PPC32
index d65b591e5556bd01a4ee4bb7b5238b43ee389f6a..5de0f254dbb5cb38fc39b2879fd2f6c40e36c3c6 100644 (file)
@@ -223,21 +223,6 @@ void free_initmem(void)
 #undef FREESEC
 }
 
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
-       if (start < end)
-               printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
-       for (; start < end; start += PAGE_SIZE) {
-               ClearPageReserved(virt_to_page(start));
-               init_page_count(virt_to_page(start));
-               free_page(start);
-               totalram_pages++;
-       }
-}
-#endif
-
-
 #ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */
 void setup_initial_memory_limit(phys_addr_t first_memblock_base,
                                phys_addr_t first_memblock_size)
index 6374b2196a17a33b97589fb8097fc0c00a1f176b..f6dbb4c20e645ad071d87e6f5244c214e565e667 100644 (file)
@@ -99,20 +99,6 @@ void free_initmem(void)
                ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
 }
 
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
-       if (start < end)
-               printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
-       for (; start < end; start += PAGE_SIZE) {
-               ClearPageReserved(virt_to_page(start));
-               init_page_count(virt_to_page(start));
-               free_page(start);
-               totalram_pages++;
-       }
-}
-#endif
-
 static void pgd_ctor(void *addr)
 {
        memset(addr, 0, PGD_TABLE_SIZE);
index 57e545b84bf199cb8db2b2fbefa0d4755a419394..29d4dde65c45f9b6f075d2d575fac1d05e2bbdc7 100644 (file)
@@ -382,6 +382,25 @@ void __init mem_init(void)
        mem_init_done = 1;
 }
 
+#ifdef CONFIG_BLK_DEV_INITRD
+void __init free_initrd_mem(unsigned long start, unsigned long end)
+{
+       if (start >= end)
+               return;
+
+       start = _ALIGN_DOWN(start, PAGE_SIZE);
+       end = _ALIGN_UP(end, PAGE_SIZE);
+       pr_info("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
+
+       for (; start < end; start += PAGE_SIZE) {
+               ClearPageReserved(virt_to_page(start));
+               init_page_count(virt_to_page(start));
+               free_page(start);
+               totalram_pages++;
+       }
+}
+#endif
+
 /*
  * This is called when a page has been modified by the kernel.
  * It just marks the page as not i-cache clean.  We do the i-cache
index 0608b1657da41f511ceca7a5bb4ef677ee14fbad..d917573cf1a854183d425053b479ed6423ae0c3c 100644 (file)
@@ -196,9 +196,6 @@ static int __devinit fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl,
        out_be32(&lbc->lteccr, LTECCR_CLEAR);
        out_be32(&lbc->ltedr, LTEDR_ENABLE);
 
-       /* Enable interrupts for any detected events */
-       out_be32(&lbc->lteir, LTEIR_ENABLE);
-
        /* Set the monitor timeout value to the maximum for erratum A001 */
        if (of_device_is_compatible(node, "fsl,elbc"))
                clrsetbits_be32(&lbc->lbcr, LBCR_BMT, LBCR_BMTPS);
@@ -322,6 +319,9 @@ static int __devinit fsl_lbc_ctrl_probe(struct platform_device *dev)
                goto err;
        }
 
+       /* Enable interrupts for any detected events */
+       out_be32(&fsl_lbc_ctrl_dev->regs->lteir, LTEIR_ENABLE);
+
        return 0;
 
 err:
index 9fab2aa9c2c80d0bbf1fbf0f943b377de3583118..90d77bd078f51acc243b21ee0c63432d01369ee9 100644 (file)
@@ -89,6 +89,7 @@ config S390
        select HAVE_GET_USER_PAGES_FAST
        select HAVE_ARCH_MUTEX_CPU_RELAX
        select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
+       select HAVE_RCU_TABLE_FREE if SMP
        select ARCH_INLINE_SPIN_TRYLOCK
        select ARCH_INLINE_SPIN_TRYLOCK_BH
        select ARCH_INLINE_SPIN_LOCK
index f6314af3b354ca4375547db504367bc547c351eb..38e71ebcd3c276c2483d5e378f4cc63f189c20a2 100644 (file)
 #include <linux/gfp.h>
 #include <linux/mm.h>
 
-#define check_pgt_cache()      do {} while (0)
-
 unsigned long *crst_table_alloc(struct mm_struct *);
 void crst_table_free(struct mm_struct *, unsigned long *);
-void crst_table_free_rcu(struct mm_struct *, unsigned long *);
 
 unsigned long *page_table_alloc(struct mm_struct *);
 void page_table_free(struct mm_struct *, unsigned long *);
-void page_table_free_rcu(struct mm_struct *, unsigned long *);
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+void page_table_free_rcu(struct mmu_gather *, unsigned long *);
+void __tlb_remove_table(void *_table);
+#endif
 
 static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
 {
index e4efacfe1b63a7ea936910e230f2e7a701729a76..801fbe1d837d32f04e4c3106da6cb85dae2c6ec9 100644 (file)
@@ -293,19 +293,6 @@ extern unsigned long VMALLOC_START;
  * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
  */
 
-/* Page status table bits for virtualization */
-#define RCP_ACC_BITS   0xf000000000000000UL
-#define RCP_FP_BIT     0x0800000000000000UL
-#define RCP_PCL_BIT    0x0080000000000000UL
-#define RCP_HR_BIT     0x0040000000000000UL
-#define RCP_HC_BIT     0x0020000000000000UL
-#define RCP_GR_BIT     0x0004000000000000UL
-#define RCP_GC_BIT     0x0002000000000000UL
-
-/* User dirty / referenced bit for KVM's migration feature */
-#define KVM_UR_BIT     0x0000800000000000UL
-#define KVM_UC_BIT     0x0000400000000000UL
-
 #ifndef __s390x__
 
 /* Bits in the segment table address-space-control-element */
@@ -325,6 +312,19 @@ extern unsigned long VMALLOC_START;
 #define _SEGMENT_ENTRY         (_SEGMENT_ENTRY_PTL)
 #define _SEGMENT_ENTRY_EMPTY   (_SEGMENT_ENTRY_INV)
 
+/* Page status table bits for virtualization */
+#define RCP_ACC_BITS   0xf0000000UL
+#define RCP_FP_BIT     0x08000000UL
+#define RCP_PCL_BIT    0x00800000UL
+#define RCP_HR_BIT     0x00400000UL
+#define RCP_HC_BIT     0x00200000UL
+#define RCP_GR_BIT     0x00040000UL
+#define RCP_GC_BIT     0x00020000UL
+
+/* User dirty / referenced bit for KVM's migration feature */
+#define KVM_UR_BIT     0x00008000UL
+#define KVM_UC_BIT     0x00004000UL
+
 #else /* __s390x__ */
 
 /* Bits in the segment/region table address-space-control-element */
@@ -367,6 +367,19 @@ extern unsigned long VMALLOC_START;
 #define _SEGMENT_ENTRY_LARGE   0x400   /* STE-format control, large page   */
 #define _SEGMENT_ENTRY_CO      0x100   /* change-recording override   */
 
+/* Page status table bits for virtualization */
+#define RCP_ACC_BITS   0xf000000000000000UL
+#define RCP_FP_BIT     0x0800000000000000UL
+#define RCP_PCL_BIT    0x0080000000000000UL
+#define RCP_HR_BIT     0x0040000000000000UL
+#define RCP_HC_BIT     0x0020000000000000UL
+#define RCP_GR_BIT     0x0004000000000000UL
+#define RCP_GC_BIT     0x0002000000000000UL
+
+/* User dirty / referenced bit for KVM's migration feature */
+#define KVM_UR_BIT     0x0000800000000000UL
+#define KVM_UC_BIT     0x0000400000000000UL
+
 #endif /* __s390x__ */
 
 /*
index 350e7ee5952daaa025ed239189a1df2f78f9142d..15c97625df8d3f0411c17a44c999b117ef1ff49d 100644 (file)
@@ -139,110 +139,47 @@ struct slib {
        struct slibe slibe[QDIO_MAX_BUFFERS_PER_Q];
 } __attribute__ ((packed, aligned(2048)));
 
-/**
- * struct sbal_flags - storage block address list flags
- * @last: last entry
- * @cont: contiguous storage
- * @frag: fragmentation
- */
-struct sbal_flags {
-       u8      : 1;
-       u8 last : 1;
-       u8 cont : 1;
-       u8      : 1;
-       u8 frag : 2;
-       u8      : 2;
-} __attribute__ ((packed));
-
-#define SBAL_FLAGS_FIRST_FRAG          0x04000000UL
-#define SBAL_FLAGS_MIDDLE_FRAG         0x08000000UL
-#define SBAL_FLAGS_LAST_FRAG           0x0c000000UL
-#define SBAL_FLAGS_LAST_ENTRY          0x40000000UL
-#define SBAL_FLAGS_CONTIGUOUS          0x20000000UL
+#define SBAL_EFLAGS_LAST_ENTRY         0x40
+#define SBAL_EFLAGS_CONTIGUOUS         0x20
+#define SBAL_EFLAGS_FIRST_FRAG         0x04
+#define SBAL_EFLAGS_MIDDLE_FRAG                0x08
+#define SBAL_EFLAGS_LAST_FRAG          0x0c
+#define SBAL_EFLAGS_MASK               0x6f
 
-#define SBAL_FLAGS0_DATA_CONTINUATION  0x20UL
+#define SBAL_SFLAGS0_PCI_REQ           0x40
+#define SBAL_SFLAGS0_DATA_CONTINUATION 0x20
 
 /* Awesome OpenFCP extensions */
-#define SBAL_FLAGS0_TYPE_STATUS                0x00UL
-#define SBAL_FLAGS0_TYPE_WRITE         0x08UL
-#define SBAL_FLAGS0_TYPE_READ          0x10UL
-#define SBAL_FLAGS0_TYPE_WRITE_READ    0x18UL
-#define SBAL_FLAGS0_MORE_SBALS         0x04UL
-#define SBAL_FLAGS0_COMMAND            0x02UL
-#define SBAL_FLAGS0_LAST_SBAL          0x00UL
-#define SBAL_FLAGS0_ONLY_SBAL          SBAL_FLAGS0_COMMAND
-#define SBAL_FLAGS0_MIDDLE_SBAL                SBAL_FLAGS0_MORE_SBALS
-#define SBAL_FLAGS0_FIRST_SBAL SBAL_FLAGS0_MORE_SBALS | SBAL_FLAGS0_COMMAND
-#define SBAL_FLAGS0_PCI                        0x40
-
-/**
- * struct sbal_sbalf_0 - sbal flags for sbale 0
- * @pci: PCI indicator
- * @cont: data continuation
- * @sbtype: storage-block type (FCP)
- */
-struct sbal_sbalf_0 {
-       u8        : 1;
-       u8 pci    : 1;
-       u8 cont   : 1;
-       u8 sbtype : 2;
-       u8        : 3;
-} __attribute__ ((packed));
-
-/**
- * struct sbal_sbalf_1 - sbal flags for sbale 1
- * @key: storage key
- */
-struct sbal_sbalf_1 {
-       u8     : 4;
-       u8 key : 4;
-} __attribute__ ((packed));
-
-/**
- * struct sbal_sbalf_14 - sbal flags for sbale 14
- * @erridx: error index
- */
-struct sbal_sbalf_14 {
-       u8        : 4;
-       u8 erridx : 4;
-} __attribute__ ((packed));
-
-/**
- * struct sbal_sbalf_15 - sbal flags for sbale 15
- * @reason: reason for error state
- */
-struct sbal_sbalf_15 {
-       u8 reason;
-} __attribute__ ((packed));
-
-/**
- * union sbal_sbalf - storage block address list flags
- * @i0: sbalf0
- * @i1: sbalf1
- * @i14: sbalf14
- * @i15: sblaf15
- * @value: raw value
- */
-union sbal_sbalf {
-       struct sbal_sbalf_0  i0;
-       struct sbal_sbalf_1  i1;
-       struct sbal_sbalf_14 i14;
-       struct sbal_sbalf_15 i15;
-       u8 value;
-};
+#define SBAL_SFLAGS0_TYPE_STATUS       0x00
+#define SBAL_SFLAGS0_TYPE_WRITE                0x08
+#define SBAL_SFLAGS0_TYPE_READ         0x10
+#define SBAL_SFLAGS0_TYPE_WRITE_READ   0x18
+#define SBAL_SFLAGS0_MORE_SBALS                0x04
+#define SBAL_SFLAGS0_COMMAND           0x02
+#define SBAL_SFLAGS0_LAST_SBAL         0x00
+#define SBAL_SFLAGS0_ONLY_SBAL         SBAL_SFLAGS0_COMMAND
+#define SBAL_SFLAGS0_MIDDLE_SBAL       SBAL_SFLAGS0_MORE_SBALS
+#define SBAL_SFLAGS0_FIRST_SBAL (SBAL_SFLAGS0_MORE_SBALS | SBAL_SFLAGS0_COMMAND)
 
 /**
  * struct qdio_buffer_element - SBAL entry
- * @flags: flags
+ * @eflags: SBAL entry flags
+ * @scount: SBAL count
+ * @sflags: whole SBAL flags
  * @length: length
  * @addr: address
 */
 struct qdio_buffer_element {
-       u32 flags;
+       u8 eflags;
+       /* private: */
+       u8 res1;
+       /* public: */
+       u8 scount;
+       u8 sflags;
        u32 length;
 #ifdef CONFIG_32BIT
        /* private: */
-       void *reserved;
+       void *res2;
        /* public: */
 #endif
        void *addr;
index 77eee5477a52fa8ce8404d8c6fd8a4fdfdac51f5..c687a2c834626adb1f01cc24ae4f62713d83adaa 100644 (file)
 #include <linux/swap.h>
 #include <asm/processor.h>
 #include <asm/pgalloc.h>
-#include <asm/smp.h>
 #include <asm/tlbflush.h>
 
 struct mmu_gather {
        struct mm_struct *mm;
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+       struct mmu_table_batch *batch;
+#endif
        unsigned int fullmm;
-       unsigned int nr_ptes;
-       unsigned int nr_pxds;
-       unsigned int max;
-       void **array;
-       void *local[8];
+       unsigned int need_flush;
 };
 
-static inline void __tlb_alloc_page(struct mmu_gather *tlb)
-{
-       unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+struct mmu_table_batch {
+       struct rcu_head         rcu;
+       unsigned int            nr;
+       void                    *tables[0];
+};
 
-       if (addr) {
-               tlb->array = (void *) addr;
-               tlb->max = PAGE_SIZE / sizeof(void *);
-       }
-}
+#define MAX_TABLE_BATCH                \
+       ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
+
+extern void tlb_table_flush(struct mmu_gather *tlb);
+extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
+#endif
 
 static inline void tlb_gather_mmu(struct mmu_gather *tlb,
                                  struct mm_struct *mm,
                                  unsigned int full_mm_flush)
 {
        tlb->mm = mm;
-       tlb->max = ARRAY_SIZE(tlb->local);
-       tlb->array = tlb->local;
        tlb->fullmm = full_mm_flush;
+       tlb->need_flush = 0;
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+       tlb->batch = NULL;
+#endif
        if (tlb->fullmm)
                __tlb_flush_mm(mm);
-       else
-               __tlb_alloc_page(tlb);
-       tlb->nr_ptes = 0;
-       tlb->nr_pxds = tlb->max;
 }
 
 static inline void tlb_flush_mmu(struct mmu_gather *tlb)
 {
-       if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pxds < tlb->max))
-               __tlb_flush_mm(tlb->mm);
-       while (tlb->nr_ptes > 0)
-               page_table_free_rcu(tlb->mm, tlb->array[--tlb->nr_ptes]);
-       while (tlb->nr_pxds < tlb->max)
-               crst_table_free_rcu(tlb->mm, tlb->array[tlb->nr_pxds++]);
+       if (!tlb->need_flush)
+               return;
+       tlb->need_flush = 0;
+       __tlb_flush_mm(tlb->mm);
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+       tlb_table_flush(tlb);
+#endif
 }
 
 static inline void tlb_finish_mmu(struct mmu_gather *tlb,
                                  unsigned long start, unsigned long end)
 {
        tlb_flush_mmu(tlb);
-
-       rcu_table_freelist_finish();
-
-       /* keep the page table cache within bounds */
-       check_pgt_cache();
-
-       if (tlb->array != tlb->local)
-               free_pages((unsigned long) tlb->array, 0);
 }
 
 /*
@@ -112,12 +105,11 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
                                unsigned long address)
 {
-       if (!tlb->fullmm) {
-               tlb->array[tlb->nr_ptes++] = pte;
-               if (tlb->nr_ptes >= tlb->nr_pxds)
-                       tlb_flush_mmu(tlb);
-       } else
-               page_table_free(tlb->mm, (unsigned long *) pte);
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+       if (!tlb->fullmm)
+               return page_table_free_rcu(tlb, (unsigned long *) pte);
+#endif
+       page_table_free(tlb->mm, (unsigned long *) pte);
 }
 
 /*
@@ -133,12 +125,11 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
 #ifdef __s390x__
        if (tlb->mm->context.asce_limit <= (1UL << 31))
                return;
-       if (!tlb->fullmm) {
-               tlb->array[--tlb->nr_pxds] = pmd;
-               if (tlb->nr_ptes >= tlb->nr_pxds)
-                       tlb_flush_mmu(tlb);
-       } else
-               crst_table_free(tlb->mm, (unsigned long *) pmd);
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+       if (!tlb->fullmm)
+               return tlb_remove_table(tlb, pmd);
+#endif
+       crst_table_free(tlb->mm, (unsigned long *) pmd);
 #endif
 }
 
@@ -155,12 +146,11 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
 #ifdef __s390x__
        if (tlb->mm->context.asce_limit <= (1UL << 42))
                return;
-       if (!tlb->fullmm) {
-               tlb->array[--tlb->nr_pxds] = pud;
-               if (tlb->nr_ptes >= tlb->nr_pxds)
-                       tlb_flush_mmu(tlb);
-       } else
-               crst_table_free(tlb->mm, (unsigned long *) pud);
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+       if (!tlb->fullmm)
+               return tlb_remove_table(tlb, pud);
+#endif
+       crst_table_free(tlb->mm, (unsigned long *) pud);
 #endif
 }
 
index 30ca85cce3147f89b70cb2ba991c20e1275a1a5a..67345ae7ce8d967a6e952070e2969d10ad7dcd5e 100644 (file)
@@ -731,6 +731,7 @@ static int __init kvm_s390_init(void)
        }
        memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
        facilities[0] &= 0xff00fff3f47c0000ULL;
+       facilities[1] &= 0x201c000000000000ULL;
        return 0;
 }
 
index ab0e041ac54cf2b17fc0b631bf0ed6cd4f59fd53..5faa1b1b23fa78f19d8f399f7681e3573ade6ee7 100644 (file)
@@ -93,4 +93,6 @@ sie_err:
 
        .section __ex_table,"a"
        .quad   sie_inst,sie_err
+       .quad   sie_exit,sie_err
+       .quad   sie_reenter,sie_err
        .previous
index b09763fe5da1a5385f1b77c17f5949c0ffdd592d..37a23c22370576415441e7542728b79a233f2f58 100644 (file)
 #include <asm/tlbflush.h>
 #include <asm/mmu_context.h>
 
-struct rcu_table_freelist {
-       struct rcu_head rcu;
-       struct mm_struct *mm;
-       unsigned int pgt_index;
-       unsigned int crst_index;
-       unsigned long *table[0];
-};
-
-#define RCU_FREELIST_SIZE \
-       ((PAGE_SIZE - sizeof(struct rcu_table_freelist)) \
-         / sizeof(unsigned long))
-
-static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist);
-
-static void __page_table_free(struct mm_struct *mm, unsigned long *table);
-
-static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm)
-{
-       struct rcu_table_freelist **batchp = &__get_cpu_var(rcu_table_freelist);
-       struct rcu_table_freelist *batch = *batchp;
-
-       if (batch)
-               return batch;
-       batch = (struct rcu_table_freelist *) __get_free_page(GFP_ATOMIC);
-       if (batch) {
-               batch->mm = mm;
-               batch->pgt_index = 0;
-               batch->crst_index = RCU_FREELIST_SIZE;
-               *batchp = batch;
-       }
-       return batch;
-}
-
-static void rcu_table_freelist_callback(struct rcu_head *head)
-{
-       struct rcu_table_freelist *batch =
-               container_of(head, struct rcu_table_freelist, rcu);
-
-       while (batch->pgt_index > 0)
-               __page_table_free(batch->mm, batch->table[--batch->pgt_index]);
-       while (batch->crst_index < RCU_FREELIST_SIZE)
-               crst_table_free(batch->mm, batch->table[batch->crst_index++]);
-       free_page((unsigned long) batch);
-}
-
-void rcu_table_freelist_finish(void)
-{
-       struct rcu_table_freelist **batchp = &get_cpu_var(rcu_table_freelist);
-       struct rcu_table_freelist *batch = *batchp;
-
-       if (!batch)
-               goto out;
-       call_rcu(&batch->rcu, rcu_table_freelist_callback);
-       *batchp = NULL;
-out:
-       put_cpu_var(rcu_table_freelist);
-}
-
-static void smp_sync(void *arg)
-{
-}
-
 #ifndef CONFIG_64BIT
 #define ALLOC_ORDER    1
-#define TABLES_PER_PAGE        4
-#define FRAG_MASK      15UL
-#define SECOND_HALVES  10UL
-
-void clear_table_pgstes(unsigned long *table)
-{
-       clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
-       memset(table + 256, 0, PAGE_SIZE/4);
-       clear_table(table + 512, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
-       memset(table + 768, 0, PAGE_SIZE/4);
-}
-
+#define FRAG_MASK      0x0f
 #else
 #define ALLOC_ORDER    2
-#define TABLES_PER_PAGE        2
-#define FRAG_MASK      3UL
-#define SECOND_HALVES  2UL
-
-void clear_table_pgstes(unsigned long *table)
-{
-       clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
-       memset(table + 256, 0, PAGE_SIZE/2);
-}
-
+#define FRAG_MASK      0x03
 #endif
 
 unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
@@ -140,29 +58,6 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table)
        free_pages((unsigned long) table, ALLOC_ORDER);
 }
 
-void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table)
-{
-       struct rcu_table_freelist *batch;
-
-       preempt_disable();
-       if (atomic_read(&mm->mm_users) < 2 &&
-           cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
-               crst_table_free(mm, table);
-               goto out;
-       }
-       batch = rcu_table_freelist_get(mm);
-       if (!batch) {
-               smp_call_function(smp_sync, NULL, 1);
-               crst_table_free(mm, table);
-               goto out;
-       }
-       batch->table[--batch->crst_index] = table;
-       if (batch->pgt_index >= batch->crst_index)
-               rcu_table_freelist_finish();
-out:
-       preempt_enable();
-}
-
 #ifdef CONFIG_64BIT
 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
 {
@@ -238,124 +133,175 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
 }
 #endif
 
+static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
+{
+       unsigned int old, new;
+
+       do {
+               old = atomic_read(v);
+               new = old ^ bits;
+       } while (atomic_cmpxchg(v, old, new) != old);
+       return new;
+}
+
 /*
  * page table entry allocation/free routines.
  */
+#ifdef CONFIG_PGSTE
+static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm)
+{
+       struct page *page;
+       unsigned long *table;
+
+       page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
+       if (!page)
+               return NULL;
+       pgtable_page_ctor(page);
+       atomic_set(&page->_mapcount, 3);
+       table = (unsigned long *) page_to_phys(page);
+       clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
+       clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
+       return table;
+}
+
+static inline void page_table_free_pgste(unsigned long *table)
+{
+       struct page *page;
+
+       page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+       pgtable_page_ctor(page);
+       atomic_set(&page->_mapcount, -1);
+       __free_page(page);
+}
+#endif
+
 unsigned long *page_table_alloc(struct mm_struct *mm)
 {
        struct page *page;
        unsigned long *table;
-       unsigned long bits;
+       unsigned int mask, bit;
 
-       bits = (mm->context.has_pgste) ? 3UL : 1UL;
+#ifdef CONFIG_PGSTE
+       if (mm_has_pgste(mm))
+               return page_table_alloc_pgste(mm);
+#endif
+       /* Allocate fragments of a 4K page as 1K/2K page table */
        spin_lock_bh(&mm->context.list_lock);
-       page = NULL;
+       mask = FRAG_MASK;
        if (!list_empty(&mm->context.pgtable_list)) {
                page = list_first_entry(&mm->context.pgtable_list,
                                        struct page, lru);
-               if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
-                       page = NULL;
+               table = (unsigned long *) page_to_phys(page);
+               mask = atomic_read(&page->_mapcount);
+               mask = mask | (mask >> 4);
        }
-       if (!page) {
+       if ((mask & FRAG_MASK) == FRAG_MASK) {
                spin_unlock_bh(&mm->context.list_lock);
                page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
                if (!page)
                        return NULL;
                pgtable_page_ctor(page);
-               page->flags &= ~FRAG_MASK;
+               atomic_set(&page->_mapcount, 1);
                table = (unsigned long *) page_to_phys(page);
-               if (mm->context.has_pgste)
-                       clear_table_pgstes(table);
-               else
-                       clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
+               clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
                spin_lock_bh(&mm->context.list_lock);
                list_add(&page->lru, &mm->context.pgtable_list);
+       } else {
+               for (bit = 1; mask & bit; bit <<= 1)
+                       table += PTRS_PER_PTE;
+               mask = atomic_xor_bits(&page->_mapcount, bit);
+               if ((mask & FRAG_MASK) == FRAG_MASK)
+                       list_del(&page->lru);
        }
-       table = (unsigned long *) page_to_phys(page);
-       while (page->flags & bits) {
-               table += 256;
-               bits <<= 1;
-       }
-       page->flags |= bits;
-       if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
-               list_move_tail(&page->lru, &mm->context.pgtable_list);
        spin_unlock_bh(&mm->context.list_lock);
        return table;
 }
 
-static void __page_table_free(struct mm_struct *mm, unsigned long *table)
+void page_table_free(struct mm_struct *mm, unsigned long *table)
 {
        struct page *page;
-       unsigned long bits;
+       unsigned int bit, mask;
 
-       bits = ((unsigned long) table) & 15;
-       table = (unsigned long *)(((unsigned long) table) ^ bits);
+#ifdef CONFIG_PGSTE
+       if (mm_has_pgste(mm))
+               return page_table_free_pgste(table);
+#endif
+       /* Free 1K/2K page table fragment of a 4K page */
        page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
-       page->flags ^= bits;
-       if (!(page->flags & FRAG_MASK)) {
+       bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
+       spin_lock_bh(&mm->context.list_lock);
+       if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
+               list_del(&page->lru);
+       mask = atomic_xor_bits(&page->_mapcount, bit);
+       if (mask & FRAG_MASK)
+               list_add(&page->lru, &mm->context.pgtable_list);
+       spin_unlock_bh(&mm->context.list_lock);
+       if (mask == 0) {
                pgtable_page_dtor(page);
+               atomic_set(&page->_mapcount, -1);
                __free_page(page);
        }
 }
 
-void page_table_free(struct mm_struct *mm, unsigned long *table)
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+
+static void __page_table_free_rcu(void *table, unsigned bit)
 {
        struct page *page;
-       unsigned long bits;
 
-       bits = (mm->context.has_pgste) ? 3UL : 1UL;
-       bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
+#ifdef CONFIG_PGSTE
+       if (bit == FRAG_MASK)
+               return page_table_free_pgste(table);
+#endif
+       /* Free 1K/2K page table fragment of a 4K page */
        page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
-       spin_lock_bh(&mm->context.list_lock);
-       page->flags ^= bits;
-       if (page->flags & FRAG_MASK) {
-               /* Page now has some free pgtable fragments. */
-               if (!list_empty(&page->lru))
-                       list_move(&page->lru, &mm->context.pgtable_list);
-               page = NULL;
-       } else
-               /* All fragments of the 4K page have been freed. */
-               list_del(&page->lru);
-       spin_unlock_bh(&mm->context.list_lock);
-       if (page) {
+       if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
                pgtable_page_dtor(page);
+               atomic_set(&page->_mapcount, -1);
                __free_page(page);
        }
 }
 
-void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
+void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
 {
-       struct rcu_table_freelist *batch;
+       struct mm_struct *mm;
        struct page *page;
-       unsigned long bits;
+       unsigned int bit, mask;
 
-       preempt_disable();
-       if (atomic_read(&mm->mm_users) < 2 &&
-           cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
-               page_table_free(mm, table);
-               goto out;
-       }
-       batch = rcu_table_freelist_get(mm);
-       if (!batch) {
-               smp_call_function(smp_sync, NULL, 1);
-               page_table_free(mm, table);
-               goto out;
+       mm = tlb->mm;
+#ifdef CONFIG_PGSTE
+       if (mm_has_pgste(mm)) {
+               table = (unsigned long *) (__pa(table) | FRAG_MASK);
+               tlb_remove_table(tlb, table);
+               return;
        }
-       bits = (mm->context.has_pgste) ? 3UL : 1UL;
-       bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
+#endif
+       bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
        page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
        spin_lock_bh(&mm->context.list_lock);
-       /* Delayed freeing with rcu prevents reuse of pgtable fragments */
-       list_del_init(&page->lru);
+       if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
+               list_del(&page->lru);
+       mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
+       if (mask & FRAG_MASK)
+               list_add_tail(&page->lru, &mm->context.pgtable_list);
        spin_unlock_bh(&mm->context.list_lock);
-       table = (unsigned long *)(((unsigned long) table) | bits);
-       batch->table[batch->pgt_index++] = table;
-       if (batch->pgt_index >= batch->crst_index)
-               rcu_table_freelist_finish();
-out:
-       preempt_enable();
+       table = (unsigned long *) (__pa(table) | (bit << 4));
+       tlb_remove_table(tlb, table);
 }
 
+void __tlb_remove_table(void *_table)
+{
+       void *table = (void *)((unsigned long) _table & PAGE_MASK);
+       unsigned type = (unsigned long) _table & ~PAGE_MASK;
+
+       if (type)
+               __page_table_free_rcu(table, type);
+       else
+               free_pages((unsigned long) table, ALLOC_ORDER);
+}
+
+#endif
+
 /*
  * switch on pgstes for its userspace process (for kvm)
  */
@@ -369,7 +315,7 @@ int s390_enable_sie(void)
                return -EINVAL;
 
        /* Do we have pgstes? if yes, we are done */
-       if (tsk->mm->context.has_pgste)
+       if (mm_has_pgste(tsk->mm))
                return 0;
 
        /* lets check if we are allowed to replace the mm */
index 74495a5ea02738f8cd90fa1536a7daefb0c88be9..f03338c2f0886bbb830973f15e4a929d19981e6d 100644 (file)
@@ -161,7 +161,7 @@ config ARCH_HAS_CPU_IDLE_WAIT
 
 config NO_IOPORT
        def_bool !PCI
-       depends on !SH_CAYMAN && !SH_SH4202_MICRODEV
+       depends on !SH_CAYMAN && !SH_SH4202_MICRODEV && !SH_SHMIN
 
 config IO_TRAPPED
        bool
index 618bd566cf53bfe70b85b825072e0e7fa2b8aab5..969421f64a150a777b6a7f97e1d7b69aff4e1534 100644 (file)
@@ -359,37 +359,31 @@ static struct soc_camera_link camera_link = {
        .priv           = &camera_info,
 };
 
-static void dummy_release(struct device *dev)
+static struct platform_device *camera_device;
+
+static void ap325rxa_camera_release(struct device *dev)
 {
+       soc_camera_platform_release(&camera_device);
 }
 
-static struct platform_device camera_device = {
-       .name           = "soc_camera_platform",
-       .dev            = {
-               .platform_data  = &camera_info,
-               .release        = dummy_release,
-       },
-};
-
 static int ap325rxa_camera_add(struct soc_camera_link *icl,
                               struct device *dev)
 {
-       if (icl != &camera_link || camera_probe() <= 0)
-               return -ENODEV;
+       int ret = soc_camera_platform_add(icl, dev, &camera_device, &camera_link,
+                                         ap325rxa_camera_release, 0);
+       if (ret < 0)
+               return ret;
 
-       camera_info.dev = dev;
+       ret = camera_probe();
+       if (ret < 0)
+               soc_camera_platform_del(icl, camera_device, &camera_link);
 
-       return platform_device_register(&camera_device);
+       return ret;
 }
 
 static void ap325rxa_camera_del(struct soc_camera_link *icl)
 {
-       if (icl != &camera_link)
-               return;
-
-       platform_device_unregister(&camera_device);
-       memset(&camera_device.dev.kobj, 0,
-              sizeof(camera_device.dev.kobj));
+       soc_camera_platform_del(icl, camera_device, &camera_link);
 }
 #endif /* CONFIG_I2C */
 
index bb13d0e1b964cc912de97ff9579cd26ca72de3f2..513cb1a2e6c830bc2d25cf27cf485ff8b37c37f8 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/io.h>
 #include <linux/delay.h>
 #include <linux/usb/r8a66597.h>
+#include <linux/usb/renesas_usbhs.h>
 #include <linux/i2c.h>
 #include <linux/i2c/tsc2007.h>
 #include <linux/spi/spi.h>
@@ -232,6 +233,52 @@ static struct platform_device usb1_common_device = {
        .resource       = usb1_common_resources,
 };
 
+/*
+ * USBHS
+ */
+static int usbhs_get_id(struct platform_device *pdev)
+{
+       return gpio_get_value(GPIO_PTB3);
+}
+
+static struct renesas_usbhs_platform_info usbhs_info = {
+       .platform_callback = {
+               .get_id         = usbhs_get_id,
+       },
+       .driver_param = {
+               .buswait_bwait          = 4,
+               .detection_delay        = 5,
+       },
+};
+
+static struct resource usbhs_resources[] = {
+       [0] = {
+               .start  = 0xa4d90000,
+               .end    = 0xa4d90124 - 1,
+               .flags  = IORESOURCE_MEM,
+       },
+       [1] = {
+               .start  = 66,
+               .end    = 66,
+               .flags  = IORESOURCE_IRQ,
+       },
+};
+
+static struct platform_device usbhs_device = {
+       .name   = "renesas_usbhs",
+       .id     = 1,
+       .dev = {
+               .dma_mask               = NULL,         /*  not use dma */
+               .coherent_dma_mask      = 0xffffffff,
+               .platform_data          = &usbhs_info,
+       },
+       .num_resources  = ARRAY_SIZE(usbhs_resources),
+       .resource       = usbhs_resources,
+       .archdata = {
+               .hwblk_id = HWBLK_USB1,
+       },
+};
+
 /* LCDC */
 const static struct fb_videomode ecovec_lcd_modes[] = {
        {
@@ -885,6 +932,9 @@ static struct platform_device sh_mmcif_device = {
        },
        .num_resources  = ARRAY_SIZE(sh_mmcif_resources),
        .resource       = sh_mmcif_resources,
+       .archdata = {
+               .hwblk_id = HWBLK_MMC,
+       },
 };
 #endif
 
@@ -894,6 +944,7 @@ static struct platform_device *ecovec_devices[] __initdata = {
        &sh_eth_device,
        &usb0_host_device,
        &usb1_common_device,
+       &usbhs_device,
        &lcdc_device,
        &ceu0_device,
        &ceu1_device,
index 780e083e4d17fe6e2f55edf7793d24d57cb1559b..23bc849d9c64a2274924cbab5f29462c22402103 100644 (file)
@@ -27,8 +27,6 @@ IMAGE_OFFSET  := $(shell /bin/bash -c 'printf "0x%08x" \
                        $(CONFIG_BOOT_LINK_OFFSET)]')
 endif
 
-LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
-
 ifeq ($(CONFIG_MCOUNT),y)
 ORIG_CFLAGS := $(KBUILD_CFLAGS)
 KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
@@ -37,7 +35,25 @@ endif
 LDFLAGS_vmlinux := --oformat $(ld-bfd) -Ttext $(IMAGE_OFFSET) -e startup \
                   -T $(obj)/../../kernel/vmlinux.lds
 
-$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o $(LIBGCC) FORCE
+#
+# Pull in the necessary libgcc bits from the in-kernel implementation.
+#
+lib1funcs-$(CONFIG_SUPERH32)   := ashiftrt.S ashldi3.c ashrsi3.S ashlsi3.S \
+                                  lshrsi3.S
+lib1funcs-obj                  := \
+       $(addsuffix .o, $(basename $(addprefix $(obj)/, $(lib1funcs-y))))
+
+lib1funcs-dir          := $(srctree)/arch/$(SRCARCH)/lib
+ifeq ($(BITS),64)
+       lib1funcs-dir   := $(addsuffix $(BITS), $(lib1funcs-dir))
+endif
+
+KBUILD_CFLAGS += -I$(lib1funcs-dir)
+
+$(addprefix $(obj)/,$(lib1funcs-y)): $(obj)/%: $(lib1funcs-dir)/% FORCE
+       $(call cmd,shipped)
+
+$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o $(lib1funcs-obj) FORCE
        $(call if_changed,ld)
        @:
 
index 0f558914e7604cc56a6822eeb2a2dec09c510abc..e2cbd92d520b347ca8e695cb2a8392dc72ac2794 100644 (file)
@@ -227,7 +227,7 @@ CONFIG_USB_SERIAL=m
 CONFIG_USB_SERIAL_GENERIC=y
 CONFIG_USB_SERIAL_ARK3116=m
 CONFIG_USB_SERIAL_PL2303=m
-CONFIG_RTC_CLASS=m
+CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_SH=m
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
index 4676bf57693a0ec3daf573d885731cfe7ed21963..f848dec9e483de8ed72cf3240c733d52f0da38e5 100644 (file)
@@ -15,8 +15,9 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
                "   mov.l   %2,   @%1     \n\t" /* store new value */
                "1: mov     r1,   r15     \n\t" /* LOGOUT */
                : "=&r" (retval),
-                 "+r"  (m)
-               : "r"   (val)
+                 "+r"  (m),
+                 "+r"  (val)           /* inhibit r15 overloading */
+               :
                : "memory", "r0", "r1");
 
        return retval;
@@ -36,8 +37,9 @@ static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
                "   mov.b   %2,   @%1     \n\t" /* store new value */
                "1: mov     r1,   r15     \n\t" /* LOGOUT */
                : "=&r" (retval),
-                 "+r"  (m)
-               : "r"   (val)
+                 "+r"  (m),
+                 "+r"  (val)           /* inhibit r15 overloading */
+               :
                : "memory" , "r0", "r1");
 
        return retval;
@@ -54,13 +56,14 @@ static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
                "   nop                   \n\t"
                "   mov    r15,   r1      \n\t" /* r1 = saved sp */
                "   mov    #-8,   r15     \n\t" /* LOGIN */
-               "   mov.l  @%1,   %0      \n\t" /* load  old value */
-               "   cmp/eq  %0,   %2      \n\t"
+               "   mov.l  @%3,   %0      \n\t" /* load  old value */
+               "   cmp/eq  %0,   %1      \n\t"
                "   bf            1f      \n\t" /* if not equal */
-               "   mov.l   %3,   @%1     \n\t" /* store new value */
+               "   mov.l   %2,   @%3     \n\t" /* store new value */
                "1: mov     r1,   r15     \n\t" /* LOGOUT */
-               : "=&r" (retval)
-               :  "r"  (m), "r"  (old), "r"  (new)
+               : "=&r" (retval),
+                 "+r"  (old), "+r"  (new) /* old or new can be r15 */
+               :  "r"  (m)
                : "memory" , "r0", "r1", "t");
 
        return retval;
index db85916b9e95aa8def960feff9b7b9ca1a5549f4..9210e93a92c337f18c94fd962bbeb5e23cfbb739 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/pgtable-2level.h>
 #endif
 #include <asm/page.h>
+#include <asm/mmu.h>
 
 #ifndef __ASSEMBLY__
 #include <asm/addrspace.h>
index 2a541ddb5a1b2f907a00b6e4764a6f2ecc3bfafa..e25c4c7d6b63171221e247cea5f6ee45136efaf7 100644 (file)
@@ -150,7 +150,6 @@ struct thread_struct {
 #define SR_USER (SR_MMU | SR_FD)
 
 #define start_thread(_regs, new_pc, new_sp)                    \
-       set_fs(USER_DS);                                        \
        _regs->sr = SR_USER;    /* User mode. */                \
        _regs->pc = new_pc - 4; /* Compensate syscall exit */   \
        _regs->pc |= 1;         /* Set SHmedia ! */             \
index 40725b4a80186a773cc4f72371cc77e9f3b0cd73..88bd6be168a9c3c4ceb1c2494930a858958473f1 100644 (file)
@@ -41,7 +41,9 @@
 
 #define user_mode(regs)                        (((regs)->sr & 0x40000000)==0)
 #define kernel_stack_pointer(_regs)    ((unsigned long)(_regs)->regs[15])
-#define GET_USP(regs) ((regs)->regs[15])
+
+#define GET_FP(regs)   ((regs)->regs[14])
+#define GET_USP(regs)  ((regs)->regs[15])
 
 extern void show_regs(struct pt_regs *);
 
@@ -131,7 +133,7 @@ extern void ptrace_triggered(struct perf_event *bp, int nmi,
 
 static inline unsigned long profile_pc(struct pt_regs *regs)
 {
-       unsigned long pc = instruction_pointer(regs);
+       unsigned long pc = regs->pc;
 
        if (virt_addr_uncached(pc))
                return CAC_ADDR(pc);
index 6c308d8b9a50a96335a170fb13aa8069f619782a..ec88bfcdf7ce57202e1402a03e491c01ef97af28 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/pagemap.h>
 
 #ifdef CONFIG_MMU
+#include <linux/swap.h>
 #include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
 #include <asm/mmu_context.h>
index 7a5b8a331b4aa55f6d7141486fd6153491c4a4a6..bd0622788d64a83ccb7f57799cb16c3143360cc9 100644 (file)
@@ -236,6 +236,7 @@ enum {
 };
 
 enum {
+       SHDMA_SLAVE_INVALID,
        SHDMA_SLAVE_SCIF0_TX,
        SHDMA_SLAVE_SCIF0_RX,
        SHDMA_SLAVE_SCIF1_TX,
index 7eb435999426e1979a5127942a4686114626e724..cbc47e6bcab5474b4c675af5600da0c66da1883e 100644 (file)
@@ -285,6 +285,7 @@ enum {
 };
 
 enum {
+       SHDMA_SLAVE_INVALID,
        SHDMA_SLAVE_SCIF0_TX,
        SHDMA_SLAVE_SCIF0_RX,
        SHDMA_SLAVE_SCIF1_TX,
@@ -297,6 +298,14 @@ enum {
        SHDMA_SLAVE_SCIF4_RX,
        SHDMA_SLAVE_SCIF5_TX,
        SHDMA_SLAVE_SCIF5_RX,
+       SHDMA_SLAVE_USB0D0_TX,
+       SHDMA_SLAVE_USB0D0_RX,
+       SHDMA_SLAVE_USB0D1_TX,
+       SHDMA_SLAVE_USB0D1_RX,
+       SHDMA_SLAVE_USB1D0_TX,
+       SHDMA_SLAVE_USB1D0_RX,
+       SHDMA_SLAVE_USB1D1_TX,
+       SHDMA_SLAVE_USB1D1_RX,
        SHDMA_SLAVE_SDHI0_TX,
        SHDMA_SLAVE_SDHI0_RX,
        SHDMA_SLAVE_SDHI1_TX,
index 05b8196c77539a647650311d9006a2d3e04b7c9e..41f9f8b9db735164fb9755d874f10576229968a5 100644 (file)
@@ -252,6 +252,7 @@ enum {
 };
 
 enum {
+       SHDMA_SLAVE_INVALID,
        SHDMA_SLAVE_SDHI_TX,
        SHDMA_SLAVE_SDHI_RX,
        SHDMA_SLAVE_MMCIF_TX,
index 0333fe9e3881913e82c5458b45e4f6aef5d867dd..134a397b1918e0898c6f839503fe0d9a4411c08c 100644 (file)
@@ -92,6 +92,46 @@ static const struct sh_dmae_slave_config sh7724_dmae_slaves[] = {
                .addr           = 0xa4e50024,
                .chcr           = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
                .mid_rid        = 0x36,
+       }, {
+               .slave_id       = SHDMA_SLAVE_USB0D0_TX,
+               .addr           = 0xA4D80100,
+               .chcr           = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+               .mid_rid        = 0x73,
+       }, {
+               .slave_id       = SHDMA_SLAVE_USB0D0_RX,
+               .addr           = 0xA4D80100,
+               .chcr           = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+               .mid_rid        = 0x73,
+       }, {
+               .slave_id       = SHDMA_SLAVE_USB0D1_TX,
+               .addr           = 0xA4D80120,
+               .chcr           = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+               .mid_rid        = 0x77,
+       }, {
+               .slave_id       = SHDMA_SLAVE_USB0D1_RX,
+               .addr           = 0xA4D80120,
+               .chcr           = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+               .mid_rid        = 0x77,
+       }, {
+               .slave_id       = SHDMA_SLAVE_USB1D0_TX,
+               .addr           = 0xA4D90100,
+               .chcr           = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+               .mid_rid        = 0xab,
+       }, {
+               .slave_id       = SHDMA_SLAVE_USB1D0_RX,
+               .addr           = 0xA4D90100,
+               .chcr           = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+               .mid_rid        = 0xab,
+       }, {
+               .slave_id       = SHDMA_SLAVE_USB1D1_TX,
+               .addr           = 0xA4D90120,
+               .chcr           = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+               .mid_rid        = 0xaf,
+       }, {
+               .slave_id       = SHDMA_SLAVE_USB1D1_RX,
+               .addr           = 0xA4D90120,
+               .chcr           = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+               .mid_rid        = 0xaf,
        }, {
                .slave_id       = SHDMA_SLAVE_SDHI0_TX,
                .addr           = 0x04ce0030,
index 762a13984bbd76c897963c10f6513809d0cc345f..aaf6d59c201227b52c431e6be891868d756a5fda 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/fs.h>
 #include <linux/ftrace.h>
 #include <linux/hw_breakpoint.h>
+#include <linux/prefetch.h>
 #include <asm/uaccess.h>
 #include <asm/mmu_context.h>
 #include <asm/system.h>
@@ -101,8 +102,6 @@ EXPORT_SYMBOL(kernel_thread);
 void start_thread(struct pt_regs *regs, unsigned long new_pc,
                  unsigned long new_sp)
 {
-       set_fs(USER_DS);
-
        regs->pr = 0;
        regs->sr = SR_FD;
        regs->pc = new_pc;
index 52411462c4096f973243e5c05acc9ebfc357819a..115725198038da862a19ef634a0ec38adf23b76f 100644 (file)
@@ -26,9 +26,9 @@ static int cache_seq_show(struct seq_file *file, void *iter)
 {
        unsigned int cache_type = (unsigned int)file->private;
        struct cache_info *cache;
-       unsigned int waysize, way, cache_size;
-       unsigned long ccr, base;
-       static unsigned long addrstart = 0;
+       unsigned int waysize, way;
+       unsigned long ccr;
+       unsigned long addrstart = 0;
 
        /*
         * Go uncached immediately so we don't skew the results any
@@ -45,28 +45,13 @@ static int cache_seq_show(struct seq_file *file, void *iter)
        }
 
        if (cache_type == CACHE_TYPE_DCACHE) {
-               base = CACHE_OC_ADDRESS_ARRAY;
+               addrstart = CACHE_OC_ADDRESS_ARRAY;
                cache = &current_cpu_data.dcache;
        } else {
-               base = CACHE_IC_ADDRESS_ARRAY;
+               addrstart = CACHE_IC_ADDRESS_ARRAY;
                cache = &current_cpu_data.icache;
        }
 
-       /*
-        * Due to the amount of data written out (depending on the cache size),
-        * we may be iterated over multiple times. In this case, keep track of
-        * the entry position in addrstart, and rewind it when we've hit the
-        * end of the cache.
-        *
-        * Likewise, the same code is used for multiple caches, so care must
-        * be taken for bouncing addrstart back and forth so the appropriate
-        * cache is hit.
-        */
-       cache_size = cache->ways * cache->sets * cache->linesz;
-       if (((addrstart & 0xff000000) != base) ||
-            (addrstart & 0x00ffffff) > cache_size)
-               addrstart = base;
-
        waysize = cache->sets;
 
        /*
index 40733a9524021d42d42ddc5a8d7e08ef77a1c155..f251b5f27652f3aa278c3789d198e88c841c3938 100644 (file)
@@ -82,7 +82,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
        void *addr;
 
        addr = __in_29bit_mode() ?
-              (void *)P1SEGADDR((unsigned long)vaddr) : vaddr;
+              (void *)CAC_ADDR((unsigned long)vaddr) : vaddr;
 
        switch (direction) {
        case DMA_FROM_DEVICE:           /* invalidate only */
index af32e17fa170c3d9fa1ef259e4fd7cee0ad48f12..253986bd6bb625a270ae1f27b99d967650b178c4 100644 (file)
@@ -26,7 +26,6 @@ config SPARC
        select HAVE_DMA_API_DEBUG
        select HAVE_ARCH_JUMP_LABEL
        select HAVE_GENERIC_HARDIRQS
-       select GENERIC_HARDIRQS_NO_DEPRECATED
        select GENERIC_IRQ_SHOW
        select USE_GENERIC_SMP_HELPERS if SMP
 
@@ -528,6 +527,23 @@ config PCI_DOMAINS
 config PCI_SYSCALL
        def_bool PCI
 
+config PCIC_PCI
+       bool
+       depends on PCI && SPARC32 && !SPARC_LEON
+       default y
+
+config LEON_PCI
+       bool
+       depends on PCI && SPARC_LEON
+       default y
+
+config GRPCI2
+       bool "GRPCI2 Host Bridge Support"
+       depends on LEON_PCI
+       default y
+       help
+         Say Y here to include the GRPCI2 Host Bridge Driver.
+
 source "drivers/pci/Kconfig"
 
 source "drivers/pcmcia/Kconfig"
index 482c79e2a41685343025cb9514eef7e205b2f637..7440915e86d8d970216a944b32765b1499a59192 100644 (file)
@@ -138,7 +138,7 @@ static unsigned char sun_82072_fd_inb(int port)
                return sun_fdc->data_82072;
        case 7: /* FD_DIR */
                return sun_read_dir();
-       };
+       }
        panic("sun_82072_fd_inb: How did I get here?");
 }
 
@@ -161,7 +161,7 @@ static void sun_82072_fd_outb(unsigned char value, int port)
        case 4: /* FD_STATUS */
                sun_fdc->status_82072 = value;
                break;
-       };
+       }
        return;
 }
 
@@ -186,7 +186,7 @@ static unsigned char sun_82077_fd_inb(int port)
                return sun_fdc->data_82077;
        case 7: /* FD_DIR */
                return sun_read_dir();
-       };
+       }
        panic("sun_82077_fd_inb: How did I get here?");
 }
 
@@ -212,7 +212,7 @@ static void sun_82077_fd_outb(unsigned char value, int port)
        case 3: /* FD_TDR */
                sun_fdc->tapectl_82077 = value;
                break;
-       };
+       }
        return;
 }
 
index 6597ce874d78761147e7ae206bca34c444c3ad87..bcef1f5a2a6d16945022494cb7d5fd3c1122fc44 100644 (file)
@@ -111,7 +111,7 @@ static unsigned char sun_82077_fd_inb(unsigned long port)
        case 7: /* FD_DIR */
                /* XXX: Is DCL on 0x80 in sun4m? */
                return sbus_readb(&sun_fdc->dir_82077);
-       };
+       }
        panic("sun_82072_fd_inb: How did I get here?");
 }
 
@@ -135,7 +135,7 @@ static void sun_82077_fd_outb(unsigned char value, unsigned long port)
        case 4: /* FD_STATUS */
                sbus_writeb(value, &sun_fdc->status_82077);
                break;
-       };
+       }
        return;
 }
 
index 6bdaf1e43d2aef8eba06e01da9f963706d54c2d2..a4e457f003ed07a1b3a47e60d4781e8739ecbef2 100644 (file)
@@ -318,6 +318,9 @@ struct device_node;
 extern unsigned int leon_build_device_irq(unsigned int real_irq,
                                           irq_flow_handler_t flow_handler,
                                           const char *name, int do_ack);
+extern void leon_update_virq_handling(unsigned int virq,
+                             irq_flow_handler_t flow_handler,
+                             const char *name, int do_ack);
 extern void leon_clear_clock_irq(void);
 extern void leon_load_profile_irq(int cpu, unsigned int limit);
 extern void leon_init_timers(irq_handler_t counter_fn);
diff --git a/arch/sparc/include/asm/leon_pci.h b/arch/sparc/include/asm/leon_pci.h
new file mode 100644 (file)
index 0000000..42b4b31
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * asm/leon_pci.h
+ *
+ * Copyright (C) 2011 Aeroflex Gaisler AB, Daniel Hellstrom
+ */
+
+#ifndef _ASM_LEON_PCI_H_
+#define _ASM_LEON_PCI_H_
+
+/* PCI related definitions */
+struct leon_pci_info {
+       struct pci_ops *ops;
+       struct resource io_space;
+       struct resource mem_space;
+       int (*map_irq)(struct pci_dev *dev, u8 slot, u8 pin);
+};
+
+extern void leon_pci_init(struct platform_device *ofdev,
+                               struct leon_pci_info *info);
+
+#endif /* _ASM_LEON_PCI_H_ */
index 332ac9ab36bc61c486e787d4cf962ae39c067d29..862e3ce92b15d45659c1ca343a3ad317c9dfb7b3 100644 (file)
@@ -47,7 +47,31 @@ extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);
 
 #endif /* __KERNEL__ */
 
+#ifndef CONFIG_LEON_PCI
 /* generic pci stuff */
 #include <asm-generic/pci.h>
+#else
+/*
+ * On LEON PCI Memory space is mapped 1:1 with physical address space.
+ *
+ * I/O space is located at low 64Kbytes in PCI I/O space. The I/O addresses
+ * are converted into CPU addresses to virtual addresses that are mapped with
+ * MMU to the PCI Host PCI I/O space window which are translated to the low
+ * 64Kbytes by the Host controller.
+ */
+
+extern void
+pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
+                       struct resource *res);
+
+extern void
+pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
+                       struct pci_bus_region *region);
+
+static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+{
+       return PCI_IRQ_NONE;
+}
+#endif
 
 #endif /* __SPARC_PCI_H */
index 7eb5d78f5211d04782430eeb8c71b8083522f22f..6676cbcc8b6a37799633e2850fbc72540e825c24 100644 (file)
@@ -29,7 +29,7 @@ struct linux_pcic {
        int                     pcic_imdim;
 };
 
-#ifdef CONFIG_PCI
+#ifdef CONFIG_PCIC_PCI
 extern int pcic_present(void);
 extern int pcic_probe(void);
 extern void pci_time_init(void);
index 47a7e862474efa94247d0192945ae23af8fcb78e..aba16092a81b68063138594cf45aa9b4e79f7a7e 100644 (file)
@@ -220,7 +220,7 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int
        switch (size) {
        case 4:
                return xchg_u32(ptr, x);
-       };
+       }
        __xchg_called_with_bad_pointer();
        return x;
 }
index 3c96d3bb9f151d0112c922dedea653bc3acafe4d..10bcabce97b2d94a1487a1572756a73c4993dd18 100644 (file)
@@ -234,7 +234,7 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
                return xchg32(ptr, x);
        case 8:
                return xchg64(ptr, x);
-       };
+       }
        __xchg_called_with_bad_pointer();
        return x;
 }
index 9cff2709a96df89576b460d1138199c79c4eb5c2..b90b4a1d070ad3e33b6b2ba6e3de59327c59677d 100644 (file)
@@ -73,7 +73,9 @@ obj-$(CONFIG_SPARC64_SMP) += cpumap.o
 
 obj-y                     += dma.o
 
-obj-$(CONFIG_SPARC32_PCI) += pcic.o
+obj-$(CONFIG_PCIC_PCI)    += pcic.o
+obj-$(CONFIG_LEON_PCI)    += leon_pci.o
+obj-$(CONFIG_GRPCI2)      += leon_pci_grpci2.o
 
 obj-$(CONFIG_SMP)         += trampoline_$(BITS).o smp_$(BITS).o
 obj-$(CONFIG_SPARC32_SMP) += sun4m_smp.o sun4d_smp.o leon_smp.o
index 1e34f29e58bb723537429dd0aacb9e0fe7df11b9..caef9deb5866f6d87a6fd3d72cb0bc4b25719080 100644 (file)
@@ -123,7 +123,7 @@ static long apc_ioctl(struct file *f, unsigned int cmd, unsigned long __arg)
 
        default:
                return -EINVAL;
-       };
+       }
 
        return 0;
 }
index 8505e0ac78baaf27331681302961f1bb7e173154..acf5151f3c1d7b9ffe24012bbe0d0047107dec1f 100644 (file)
@@ -101,7 +101,7 @@ void set_auxio(unsigned char bits_on, unsigned char bits_off)
                break;
        default:
                panic("Can't set AUXIO register on this machine.");
-       };
+       }
        spin_unlock_irqrestore(&auxio_lock, flags);
 }
 EXPORT_SYMBOL(set_auxio);
index 668c7be5d365265b8f3ea4a645c196d528a772d6..5f450260981dd3f564db36387b8e80b817b36986 100644 (file)
@@ -664,7 +664,7 @@ static void chmc_interpret_one_decode_reg(struct chmc *p, int which_bank, u64 va
        case 0x0:
                bp->interleave = 16;
                break;
-       };
+       }
 
        /* UK[10] is reserved, and UK[11] is not set for the SDRAM
         * bank size definition.
index 8341963f4c84c10f5974edfa6f10af7db42754bd..9fe08a1ea6c6ea226f9cb4a91bf1f183067d98da 100644 (file)
@@ -229,7 +229,7 @@ real_irq_entry:
 #ifdef CONFIG_SMP
        .globl  patchme_maybe_smp_msg
 
-       cmp     %l7, 12
+       cmp     %l7, 11
 patchme_maybe_smp_msg:
        bgu     maybe_smp4m_msg
         nop
@@ -293,7 +293,7 @@ maybe_smp4m_msg:
        WRITE_PAUSE
        wr      %l4, PSR_ET, %psr
        WRITE_PAUSE
-       sll     %o2, 28, %o2            ! shift for simpler checks below
+       sll     %o3, 28, %o2            ! shift for simpler checks below
 maybe_smp4m_msg_check_single:
        andcc   %o2, 0x1, %g0
        beq,a   maybe_smp4m_msg_check_mask
@@ -1604,7 +1604,7 @@ restore_current:
        retl
         nop
 
-#ifdef CONFIG_PCI
+#ifdef CONFIG_PCIC_PCI
 #include <asm/pcic.h>
 
        .align  4
@@ -1650,7 +1650,7 @@ pcic_nmi_trap_patch:
         rd     %psr, %l0
        .word   0
 
-#endif /* CONFIG_PCI */
+#endif /* CONFIG_PCIC_PCI */
 
        .globl  flushw_all
 flushw_all:
index 2f538ac2e139b68c5ffea31b12520c0672b83a88..d17255a2bbac142be0f3fe3b1e5e4a60d49a363e 100644 (file)
@@ -236,6 +236,21 @@ static unsigned int _leon_build_device_irq(struct platform_device *op,
        return leon_build_device_irq(real_irq, handle_simple_irq, "edge", 0);
 }
 
+void leon_update_virq_handling(unsigned int virq,
+                             irq_flow_handler_t flow_handler,
+                             const char *name, int do_ack)
+{
+       unsigned long mask = (unsigned long)irq_get_chip_data(virq);
+
+       mask &= ~LEON_DO_ACK_HW;
+       if (do_ack)
+               mask |= LEON_DO_ACK_HW;
+
+       irq_set_chip_and_handler_name(virq, &leon_irq,
+                                     flow_handler, name);
+       irq_set_chip_data(virq, (void *)mask);
+}
+
 void __init leon_init_timers(irq_handler_t counter_fn)
 {
        int irq, eirq;
@@ -361,6 +376,22 @@ void __init leon_init_timers(irq_handler_t counter_fn)
                prom_halt();
        }
 
+#ifdef CONFIG_SMP
+       {
+               unsigned long flags;
+
+               /*
+                * In SMP, sun4m adds a IPI handler to IRQ trap handler that
+                * LEON never must take, sun4d and LEON overwrites the branch
+                * with a NOP.
+                */
+               local_irq_save(flags);
+               patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */
+               local_flush_cache_all();
+               local_irq_restore(flags);
+       }
+#endif
+
        LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
                              LEON3_GPTIMER_EN |
                              LEON3_GPTIMER_RL |
diff --git a/arch/sparc/kernel/leon_pci.c b/arch/sparc/kernel/leon_pci.c
new file mode 100644 (file)
index 0000000..a8a9a27
--- /dev/null
@@ -0,0 +1,253 @@
+/*
+ * leon_pci.c: LEON Host PCI support
+ *
+ * Copyright (C) 2011 Aeroflex Gaisler AB, Daniel Hellstrom
+ *
+ * Code is partially derived from pcic.c
+ */
+
+#include <linux/of_device.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <asm/leon.h>
+#include <asm/leon_pci.h>
+
+/* The LEON architecture does not rely on a BIOS or bootloader to setup
+ * PCI for us. The Linux generic routines are used to setup resources,
+ * reset values of confuration-space registers settings ae preseved.
+ */
+void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info)
+{
+       struct pci_bus *root_bus;
+
+       root_bus = pci_scan_bus_parented(&ofdev->dev, 0, info->ops, info);
+       if (root_bus) {
+               root_bus->resource[0] = &info->io_space;
+               root_bus->resource[1] = &info->mem_space;
+               root_bus->resource[2] = NULL;
+
+               /* Init all PCI devices into PCI tree */
+               pci_bus_add_devices(root_bus);
+
+               /* Setup IRQs of all devices using custom routines */
+               pci_fixup_irqs(pci_common_swizzle, info->map_irq);
+
+               /* Assign devices with resources */
+               pci_assign_unassigned_resources();
+       }
+}
+
+/* PCI Memory and Prefetchable Memory is direct-mapped. However I/O Space is
+ * accessed through a Window which is translated to low 64KB in PCI space, the
+ * first 4KB is not used so 60KB is available.
+ *
+ * This function is used by generic code to translate resource addresses into
+ * PCI addresses.
+ */
+void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
+                            struct resource *res)
+{
+       struct leon_pci_info *info = dev->bus->sysdata;
+
+       region->start = res->start;
+       region->end = res->end;
+
+       if (res->flags & IORESOURCE_IO) {
+               region->start -= (info->io_space.start - 0x1000);
+               region->end -= (info->io_space.start - 0x1000);
+       }
+}
+EXPORT_SYMBOL(pcibios_resource_to_bus);
+
+/* see pcibios_resource_to_bus() comment */
+void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
+                            struct pci_bus_region *region)
+{
+       struct leon_pci_info *info = dev->bus->sysdata;
+
+       res->start = region->start;
+       res->end = region->end;
+
+       if (res->flags & IORESOURCE_IO) {
+               res->start += (info->io_space.start - 0x1000);
+               res->end += (info->io_space.start - 0x1000);
+       }
+}
+EXPORT_SYMBOL(pcibios_bus_to_resource);
+
+void __devinit pcibios_fixup_bus(struct pci_bus *pbus)
+{
+       struct leon_pci_info *info = pbus->sysdata;
+       struct pci_dev *dev;
+       int i, has_io, has_mem;
+       u16 cmd;
+
+       /* Generic PCI bus probing sets these to point at
+        * &io{port,mem}_resouce which is wrong for us.
+        */
+       if (pbus->self == NULL) {
+               pbus->resource[0] = &info->io_space;
+               pbus->resource[1] = &info->mem_space;
+               pbus->resource[2] = NULL;
+       }
+
+       list_for_each_entry(dev, &pbus->devices, bus_list) {
+               /*
+                * We can not rely on that the bootloader has enabled I/O
+                * or memory access to PCI devices. Instead we enable it here
+                * if the device has BARs of respective type.
+                */
+               has_io = has_mem = 0;
+               for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+                       unsigned long f = dev->resource[i].flags;
+                       if (f & IORESOURCE_IO)
+                               has_io = 1;
+                       else if (f & IORESOURCE_MEM)
+                               has_mem = 1;
+               }
+               /* ROM BARs are mapped into 32-bit memory space */
+               if (dev->resource[PCI_ROM_RESOURCE].end != 0) {
+                       dev->resource[PCI_ROM_RESOURCE].flags |=
+                                                       IORESOURCE_ROM_ENABLE;
+                       has_mem = 1;
+               }
+               pci_bus_read_config_word(pbus, dev->devfn, PCI_COMMAND, &cmd);
+               if (has_io && !(cmd & PCI_COMMAND_IO)) {
+#ifdef CONFIG_PCI_DEBUG
+                       printk(KERN_INFO "LEONPCI: Enabling I/O for dev %s\n",
+                                        pci_name(dev));
+#endif
+                       cmd |= PCI_COMMAND_IO;
+                       pci_bus_write_config_word(pbus, dev->devfn, PCI_COMMAND,
+                                                                       cmd);
+               }
+               if (has_mem && !(cmd & PCI_COMMAND_MEMORY)) {
+#ifdef CONFIG_PCI_DEBUG
+                       printk(KERN_INFO "LEONPCI: Enabling MEMORY for dev"
+                                        "%s\n", pci_name(dev));
+#endif
+                       cmd |= PCI_COMMAND_MEMORY;
+                       pci_bus_write_config_word(pbus, dev->devfn, PCI_COMMAND,
+                                                                       cmd);
+               }
+       }
+}
+
+/*
+ * Other archs parse arguments here.
+ */
+char * __devinit pcibios_setup(char *str)
+{
+       return str;
+}
+
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+                               resource_size_t size, resource_size_t align)
+{
+       return res->start;
+}
+
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+       return pci_enable_resources(dev, mask);
+}
+
+struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
+{
+       /*
+        * Currently the OpenBoot nodes are not connected with the PCI device,
+        * this is because the LEON PROM does not create PCI nodes. Eventually
+        * this will change and the same approach as pcic.c can be used to
+        * match PROM nodes with pci devices.
+        */
+       return NULL;
+}
+EXPORT_SYMBOL(pci_device_to_OF_node);
+
+void __devinit pcibios_update_irq(struct pci_dev *dev, int irq)
+{
+#ifdef CONFIG_PCI_DEBUG
+       printk(KERN_DEBUG "LEONPCI: Assigning IRQ %02d to %s\n", irq,
+               pci_name(dev));
+#endif
+       pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
+}
+
+/* in/out routines taken from pcic.c
+ *
+ * This probably belongs here rather than ioport.c because
+ * we do not want this crud linked into SBus kernels.
+ * Also, think for a moment about likes of floppy.c that
+ * include architecture specific parts. They may want to redefine ins/outs.
+ *
+ * We do not use horrible macros here because we want to
+ * advance pointer by sizeof(size).
+ */
+void outsb(unsigned long addr, const void *src, unsigned long count)
+{
+       while (count) {
+               count -= 1;
+               outb(*(const char *)src, addr);
+               src += 1;
+               /* addr += 1; */
+       }
+}
+EXPORT_SYMBOL(outsb);
+
+void outsw(unsigned long addr, const void *src, unsigned long count)
+{
+       while (count) {
+               count -= 2;
+               outw(*(const short *)src, addr);
+               src += 2;
+               /* addr += 2; */
+       }
+}
+EXPORT_SYMBOL(outsw);
+
+void outsl(unsigned long addr, const void *src, unsigned long count)
+{
+       while (count) {
+               count -= 4;
+               outl(*(const long *)src, addr);
+               src += 4;
+               /* addr += 4; */
+       }
+}
+EXPORT_SYMBOL(outsl);
+
+void insb(unsigned long addr, void *dst, unsigned long count)
+{
+       while (count) {
+               count -= 1;
+               *(unsigned char *)dst = inb(addr);
+               dst += 1;
+               /* addr += 1; */
+       }
+}
+EXPORT_SYMBOL(insb);
+
+void insw(unsigned long addr, void *dst, unsigned long count)
+{
+       while (count) {
+               count -= 2;
+               *(unsigned short *)dst = inw(addr);
+               dst += 2;
+               /* addr += 2; */
+       }
+}
+EXPORT_SYMBOL(insw);
+
+void insl(unsigned long addr, void *dst, unsigned long count)
+{
+       while (count) {
+               count -= 4;
+               /*
+                * XXX I am sure we are in for an unaligned trap here.
+                */
+               *(unsigned long *)dst = inl(addr);
+               dst += 4;
+               /* addr += 4; */
+       }
+}
+EXPORT_SYMBOL(insl);
diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c
new file mode 100644 (file)
index 0000000..44dc093
--- /dev/null
@@ -0,0 +1,897 @@
+/*
+ * leon_pci_grpci2.c: GRPCI2 Host PCI driver
+ *
+ * Copyright (C) 2011 Aeroflex Gaisler AB, Daniel Hellstrom
+ *
+ */
+
+#include <linux/of_device.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <asm/io.h>
+#include <asm/leon.h>
+#include <asm/vaddrs.h>
+#include <asm/sections.h>
+#include <asm/leon_pci.h>
+
+#include "irq.h"
+
+struct grpci2_barcfg {
+       unsigned long pciadr;   /* PCI Space Address */
+       unsigned long ahbadr;   /* PCI Base address mapped to this AHB addr */
+};
+
+/* Device Node Configuration options:
+ *  - barcfgs    : Custom Configuration of Host's 6 target BARs
+ *  - irq_mask   : Limit which PCI interrupts are enabled
+ *  - do_reset   : Force PCI Reset on startup
+ *
+ * barcfgs
+ * =======
+ *
+ * Optional custom Target BAR configuration (see struct grpci2_barcfg). All
+ * addresses are physical. Array always contains 6 elements (len=2*4*6 bytes)
+ *
+ * -1 means not configured (let host driver do default setup).
+ *
+ * [i*2+0] = PCI Address of BAR[i] on target interface
+ * [i*2+1] = Accessing PCI address of BAR[i] result in this AMBA address
+ *
+ *
+ * irq_mask
+ * ========
+ *
+ * Limit which PCI interrupts are enabled. 0=Disable, 1=Enable. By default
+ * all are enabled. Use this when PCI interrupt pins are floating on PCB.
+ * int, len=4.
+ *  bit0 = PCI INTA#
+ *  bit1 = PCI INTB#
+ *  bit2 = PCI INTC#
+ *  bit3 = PCI INTD#
+ *
+ *
+ * reset
+ * =====
+ *
+ * Force PCI reset on startup. int, len=4
+ */
+
+/* Enable Debugging Configuration Space Access */
+#undef GRPCI2_DEBUG_CFGACCESS
+
+/*
+ * GRPCI2 APB Register MAP
+ */
+struct grpci2_regs {
+       unsigned int ctrl;              /* 0x00 Control */
+       unsigned int sts_cap;           /* 0x04 Status / Capabilities */
+       int res1;                       /* 0x08 */
+       unsigned int io_map;            /* 0x0C I/O Map address */
+       unsigned int dma_ctrl;          /* 0x10 DMA */
+       unsigned int dma_bdbase;        /* 0x14 DMA */
+       int res2[2];                    /* 0x18 */
+       unsigned int bars[6];           /* 0x20 read-only PCI BARs */
+       int res3[2];                    /* 0x38 */
+       unsigned int ahbmst_map[16];    /* 0x40 AHB->PCI Map per AHB Master */
+
+       /* PCI Trace Buffer Registers (OPTIONAL) */
+       unsigned int t_ctrl;            /* 0x80 */
+       unsigned int t_cnt;             /* 0x84 */
+       unsigned int t_adpat;           /* 0x88 */
+       unsigned int t_admask;          /* 0x8C */
+       unsigned int t_sigpat;          /* 0x90 */
+       unsigned int t_sigmask;         /* 0x94 */
+       unsigned int t_adstate;         /* 0x98 */
+       unsigned int t_sigstate;        /* 0x9C */
+};
+
+#define REGLOAD(a)     (be32_to_cpu(__raw_readl(&(a))))
+#define REGSTORE(a, v) (__raw_writel(cpu_to_be32(v), &(a)))
+
+#define CTRL_BUS_BIT 16
+
+#define CTRL_RESET (1<<31)
+#define CTRL_SI (1<<27)
+#define CTRL_PE (1<<26)
+#define CTRL_EI (1<<25)
+#define CTRL_ER (1<<24)
+#define CTRL_BUS (0xff<<CTRL_BUS_BIT)
+#define CTRL_HOSTINT 0xf
+
+#define STS_HOST_BIT   31
+#define STS_MST_BIT    30
+#define STS_TAR_BIT    29
+#define STS_DMA_BIT    28
+#define STS_DI_BIT     27
+#define STS_HI_BIT     26
+#define STS_IRQMODE_BIT        24
+#define STS_TRACE_BIT  23
+#define STS_CFGERRVALID_BIT 20
+#define STS_CFGERR_BIT 19
+#define STS_INTTYPE_BIT        12
+#define STS_INTSTS_BIT 8
+#define STS_FDEPTH_BIT 2
+#define STS_FNUM_BIT   0
+
+#define STS_HOST       (1<<STS_HOST_BIT)
+#define STS_MST                (1<<STS_MST_BIT)
+#define STS_TAR                (1<<STS_TAR_BIT)
+#define STS_DMA                (1<<STS_DMA_BIT)
+#define STS_DI         (1<<STS_DI_BIT)
+#define STS_HI         (1<<STS_HI_BIT)
+#define STS_IRQMODE    (0x3<<STS_IRQMODE_BIT)
+#define STS_TRACE      (1<<STS_TRACE_BIT)
+#define STS_CFGERRVALID        (1<<STS_CFGERRVALID_BIT)
+#define STS_CFGERR     (1<<STS_CFGERR_BIT)
+#define STS_INTTYPE    (0x3f<<STS_INTTYPE_BIT)
+#define STS_INTSTS     (0xf<<STS_INTSTS_BIT)
+#define STS_FDEPTH     (0x7<<STS_FDEPTH_BIT)
+#define STS_FNUM       (0x3<<STS_FNUM_BIT)
+
+#define STS_ISYSERR    (1<<17)
+#define STS_IDMA       (1<<16)
+#define STS_IDMAERR    (1<<15)
+#define STS_IMSTABRT   (1<<14)
+#define STS_ITGTABRT   (1<<13)
+#define STS_IPARERR    (1<<12)
+
+#define STS_ERR_IRQ (STS_ISYSERR | STS_IMSTABRT | STS_ITGTABRT | STS_IPARERR)
+
+struct grpci2_bd_chan {
+       unsigned int ctrl;      /* 0x00 DMA Control */
+       unsigned int nchan;     /* 0x04 Next DMA Channel Address */
+       unsigned int nbd;       /* 0x08 Next Data Descriptor in chan */
+       unsigned int res;       /* 0x0C Reserved */
+};
+
+#define BD_CHAN_EN             0x80000000
+#define BD_CHAN_TYPE           0x00300000
+#define BD_CHAN_BDCNT          0x0000ffff
+#define BD_CHAN_EN_BIT         31
+#define BD_CHAN_TYPE_BIT       20
+#define BD_CHAN_BDCNT_BIT      0
+
+struct grpci2_bd_data {
+       unsigned int ctrl;      /* 0x00 DMA Data Control */
+       unsigned int pci_adr;   /* 0x04 PCI Start Address */
+       unsigned int ahb_adr;   /* 0x08 AHB Start address */
+       unsigned int next;      /* 0x0C Next Data Descriptor in chan */
+};
+
+#define BD_DATA_EN             0x80000000
+#define BD_DATA_IE             0x40000000
+#define BD_DATA_DR             0x20000000
+#define BD_DATA_TYPE           0x00300000
+#define BD_DATA_ER             0x00080000
+#define BD_DATA_LEN            0x0000ffff
+#define BD_DATA_EN_BIT         31
+#define BD_DATA_IE_BIT         30
+#define BD_DATA_DR_BIT         29
+#define BD_DATA_TYPE_BIT       20
+#define BD_DATA_ER_BIT         19
+#define BD_DATA_LEN_BIT                0
+
+/* GRPCI2 Capability */
+struct grpci2_cap_first {
+       unsigned int ctrl;
+       unsigned int pci2ahb_map[6];
+       unsigned int ext2ahb_map;
+       unsigned int io_map;
+       unsigned int pcibar_size[6];
+};
+#define CAP9_CTRL_OFS 0
+#define CAP9_BAR_OFS 0x4
+#define CAP9_IOMAP_OFS 0x20
+#define CAP9_BARSIZE_OFS 0x24
+
+struct grpci2_priv {
+       struct leon_pci_info    info; /* must be on top of this structure */
+       struct grpci2_regs      *regs;
+       char                    irq;
+       char                    irq_mode; /* IRQ Mode from CAPSTS REG */
+       char                    bt_enabled;
+       char                    do_reset;
+       char                    irq_mask;
+       u32                     pciid; /* PCI ID of Host */
+       unsigned char           irq_map[4];
+
+       /* Virtual IRQ numbers */
+       unsigned int            virq_err;
+       unsigned int            virq_dma;
+
+       /* AHB PCI Windows */
+       unsigned long           pci_area;       /* MEMORY */
+       unsigned long           pci_area_end;
+       unsigned long           pci_io;         /* I/O */
+       unsigned long           pci_conf;       /* CONFIGURATION */
+       unsigned long           pci_conf_end;
+       unsigned long           pci_io_va;
+
+       struct grpci2_barcfg    tgtbars[6];
+};
+
+DEFINE_SPINLOCK(grpci2_dev_lock);
+struct grpci2_priv *grpci2priv;
+
+int grpci2_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+       struct grpci2_priv *priv = dev->bus->sysdata;
+       int irq_group;
+
+       /* Use default IRQ decoding on PCI BUS0 according slot numbering */
+       irq_group = slot & 0x3;
+       pin = ((pin - 1) + irq_group) & 0x3;
+
+       return priv->irq_map[pin];
+}
+
+static int grpci2_cfg_r32(struct grpci2_priv *priv, unsigned int bus,
+                               unsigned int devfn, int where, u32 *val)
+{
+       unsigned int *pci_conf;
+       unsigned long flags;
+       u32 tmp;
+
+       if (where & 0x3)
+               return -EINVAL;
+
+       if (bus == 0 && PCI_SLOT(devfn) != 0)
+               devfn += (0x8 * 6);
+
+       /* Select bus */
+       spin_lock_irqsave(&grpci2_dev_lock, flags);
+       REGSTORE(priv->regs->ctrl, (REGLOAD(priv->regs->ctrl) & ~(0xff << 16)) |
+                                  (bus << 16));
+       spin_unlock_irqrestore(&grpci2_dev_lock, flags);
+
+       /* clear old status */
+       REGSTORE(priv->regs->sts_cap, (STS_CFGERR | STS_CFGERRVALID));
+
+       pci_conf = (unsigned int *) (priv->pci_conf |
+                                               (devfn << 8) | (where & 0xfc));
+       tmp = LEON3_BYPASS_LOAD_PA(pci_conf);
+
+       /* Wait until GRPCI2 signals that CFG access is done, it should be
+        * done instantaneously unless a DMA operation is ongoing...
+        */
+       while ((REGLOAD(priv->regs->sts_cap) & STS_CFGERRVALID) == 0)
+               ;
+
+       if (REGLOAD(priv->regs->sts_cap) & STS_CFGERR) {
+               *val = 0xffffffff;
+       } else {
+               /* Bus always little endian (unaffected by byte-swapping) */
+               *val = flip_dword(tmp);
+       }
+
+       return 0;
+}
+
+static int grpci2_cfg_r16(struct grpci2_priv *priv, unsigned int bus,
+                               unsigned int devfn, int where, u32 *val)
+{
+       u32 v;
+       int ret;
+
+       if (where & 0x1)
+               return -EINVAL;
+       ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v);
+       *val = 0xffff & (v >> (8 * (where & 0x3)));
+       return ret;
+}
+
+static int grpci2_cfg_r8(struct grpci2_priv *priv, unsigned int bus,
+                               unsigned int devfn, int where, u32 *val)
+{
+       u32 v;
+       int ret;
+
+       ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v);
+       *val = 0xff & (v >> (8 * (where & 3)));
+
+       return ret;
+}
+
+static int grpci2_cfg_w32(struct grpci2_priv *priv, unsigned int bus,
+                               unsigned int devfn, int where, u32 val)
+{
+       unsigned int *pci_conf;
+       unsigned long flags;
+
+       if (where & 0x3)
+               return -EINVAL;
+
+       if (bus == 0 && PCI_SLOT(devfn) != 0)
+               devfn += (0x8 * 6);
+
+       /* Select bus */
+       spin_lock_irqsave(&grpci2_dev_lock, flags);
+       REGSTORE(priv->regs->ctrl, (REGLOAD(priv->regs->ctrl) & ~(0xff << 16)) |
+                                  (bus << 16));
+       spin_unlock_irqrestore(&grpci2_dev_lock, flags);
+
+       /* clear old status */
+       REGSTORE(priv->regs->sts_cap, (STS_CFGERR | STS_CFGERRVALID));
+
+       pci_conf = (unsigned int *) (priv->pci_conf |
+                                               (devfn << 8) | (where & 0xfc));
+       LEON3_BYPASS_STORE_PA(pci_conf, flip_dword(val));
+
+       /* Wait until GRPCI2 signals that CFG access is done, it should be
+        * done instantaneously unless a DMA operation is ongoing...
+        */
+       while ((REGLOAD(priv->regs->sts_cap) & STS_CFGERRVALID) == 0)
+               ;
+
+       return 0;
+}
+
+static int grpci2_cfg_w16(struct grpci2_priv *priv, unsigned int bus,
+                               unsigned int devfn, int where, u32 val)
+{
+       int ret;
+       u32 v;
+
+       if (where & 0x1)
+               return -EINVAL;
+       ret = grpci2_cfg_r32(priv, bus, devfn, where&~3, &v);
+       if (ret)
+               return ret;
+       v = (v & ~(0xffff << (8 * (where & 0x3)))) |
+           ((0xffff & val) << (8 * (where & 0x3)));
+       return grpci2_cfg_w32(priv, bus, devfn, where & ~0x3, v);
+}
+
+static int grpci2_cfg_w8(struct grpci2_priv *priv, unsigned int bus,
+                               unsigned int devfn, int where, u32 val)
+{
+       int ret;
+       u32 v;
+
+       ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v);
+       if (ret != 0)
+               return ret;
+       v = (v & ~(0xff << (8 * (where & 0x3)))) |
+           ((0xff & val) << (8 * (where & 0x3)));
+       return grpci2_cfg_w32(priv, bus, devfn, where & ~0x3, v);
+}
+
+/* Read from Configuration Space. When entering here the PCI layer has taken
+ * the pci_lock spinlock and IRQ is off.
+ */
+static int grpci2_read_config(struct pci_bus *bus, unsigned int devfn,
+                             int where, int size, u32 *val)
+{
+       struct grpci2_priv *priv = grpci2priv;
+       unsigned int busno = bus->number;
+       int ret;
+
+       if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0)) {
+               *val = ~0;
+               return 0;
+       }
+
+       switch (size) {
+       case 1:
+               ret = grpci2_cfg_r8(priv, busno, devfn, where, val);
+               break;
+       case 2:
+               ret = grpci2_cfg_r16(priv, busno, devfn, where, val);
+               break;
+       case 4:
+               ret = grpci2_cfg_r32(priv, busno, devfn, where, val);
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+#ifdef GRPCI2_DEBUG_CFGACCESS
+       printk(KERN_INFO "grpci2_read_config: [%02x:%02x:%x] ofs=%d val=%x "
+               "size=%d\n", busno, PCI_SLOT(devfn), PCI_FUNC(devfn), where,
+               *val, size);
+#endif
+
+       return ret;
+}
+
+/* Write to Configuration Space. When entering here the PCI layer has taken
+ * the pci_lock spinlock and IRQ is off.
+ */
+static int grpci2_write_config(struct pci_bus *bus, unsigned int devfn,
+                              int where, int size, u32 val)
+{
+       struct grpci2_priv *priv = grpci2priv;
+       unsigned int busno = bus->number;
+
+       if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0))
+               return 0;
+
+#ifdef GRPCI2_DEBUG_CFGACCESS
+       printk(KERN_INFO "grpci2_write_config: [%02x:%02x:%x] ofs=%d size=%d "
+               "val=%x\n", busno, PCI_SLOT(devfn), PCI_FUNC(devfn),
+               where, size, val);
+#endif
+
+       switch (size) {
+       default:
+               return -EINVAL;
+       case 1:
+               return grpci2_cfg_w8(priv, busno, devfn, where, val);
+       case 2:
+               return grpci2_cfg_w16(priv, busno, devfn, where, val);
+       case 4:
+               return grpci2_cfg_w32(priv, busno, devfn, where, val);
+       }
+}
+
+static struct pci_ops grpci2_ops = {
+       .read =         grpci2_read_config,
+       .write =        grpci2_write_config,
+};
+
+/* GENIRQ IRQ chip implementation for GRPCI2 irqmode=0..2. In configuration
+ * 3 where all PCI Interrupts has a separate IRQ on the system IRQ controller
+ * this is not needed and the standard IRQ controller can be used.
+ */
+
+static void grpci2_mask_irq(struct irq_data *data)
+{
+       unsigned long flags;
+       unsigned int irqidx;
+       struct grpci2_priv *priv = grpci2priv;
+
+       irqidx = (unsigned int)data->chip_data - 1;
+       if (irqidx > 3) /* only mask PCI interrupts here */
+               return;
+
+       spin_lock_irqsave(&grpci2_dev_lock, flags);
+       REGSTORE(priv->regs->ctrl, REGLOAD(priv->regs->ctrl) & ~(1 << irqidx));
+       spin_unlock_irqrestore(&grpci2_dev_lock, flags);
+}
+
+static void grpci2_unmask_irq(struct irq_data *data)
+{
+       unsigned long flags;
+       unsigned int irqidx;
+       struct grpci2_priv *priv = grpci2priv;
+
+       irqidx = (unsigned int)data->chip_data - 1;
+       if (irqidx > 3) /* only unmask PCI interrupts here */
+               return;
+
+       spin_lock_irqsave(&grpci2_dev_lock, flags);
+       REGSTORE(priv->regs->ctrl, REGLOAD(priv->regs->ctrl) | (1 << irqidx));
+       spin_unlock_irqrestore(&grpci2_dev_lock, flags);
+}
+
+static unsigned int grpci2_startup_irq(struct irq_data *data)
+{
+       grpci2_unmask_irq(data);
+       return 0;
+}
+
+static void grpci2_shutdown_irq(struct irq_data *data)
+{
+       grpci2_mask_irq(data);
+}
+
+static struct irq_chip grpci2_irq = {
+       .name           = "grpci2",
+       .irq_startup    = grpci2_startup_irq,
+       .irq_shutdown   = grpci2_shutdown_irq,
+       .irq_mask       = grpci2_mask_irq,
+       .irq_unmask     = grpci2_unmask_irq,
+};
+
+/* Handle one or multiple IRQs from the PCI core */
+static void grpci2_pci_flow_irq(unsigned int irq, struct irq_desc *desc)
+{
+       struct grpci2_priv *priv = grpci2priv;
+       int i, ack = 0;
+       unsigned int ctrl, sts_cap, pci_ints;
+
+       ctrl = REGLOAD(priv->regs->ctrl);
+       sts_cap = REGLOAD(priv->regs->sts_cap);
+
+       /* Error Interrupt? */
+       if (sts_cap & STS_ERR_IRQ) {
+               generic_handle_irq(priv->virq_err);
+               ack = 1;
+       }
+
+       /* PCI Interrupt? */
+       pci_ints = ((~sts_cap) >> STS_INTSTS_BIT) & ctrl & CTRL_HOSTINT;
+       if (pci_ints) {
+               /* Call respective PCI Interrupt handler */
+               for (i = 0; i < 4; i++) {
+                       if (pci_ints & (1 << i))
+                               generic_handle_irq(priv->irq_map[i]);
+               }
+               ack = 1;
+       }
+
+       /*
+        * Decode DMA Interrupt only when shared with Err and PCI INTX#, when
+        * the DMA is a unique IRQ the DMA interrupts doesn't end up here, they
+        * goes directly to DMA ISR.
+        */
+       if ((priv->irq_mode == 0) && (sts_cap & (STS_IDMA | STS_IDMAERR))) {
+               generic_handle_irq(priv->virq_dma);
+               ack = 1;
+       }
+
+       /*
+        * Call "first level" IRQ chip end-of-irq handler. It will ACK LEON IRQ
+        * Controller, this must be done after IRQ sources have been handled to
+        * avoid double IRQ generation
+        */
+       if (ack)
+               desc->irq_data.chip->irq_eoi(&desc->irq_data);
+}
+
+/* Create a virtual IRQ */
+static unsigned int grpci2_build_device_irq(unsigned int irq)
+{
+       unsigned int virq = 0, pil;
+
+       pil = 1 << 8;
+       virq = irq_alloc(irq, pil);
+       if (virq == 0)
+               goto out;
+
+       irq_set_chip_and_handler_name(virq, &grpci2_irq, handle_simple_irq,
+                                     "pcilvl");
+       irq_set_chip_data(virq, (void *)irq);
+
+out:
+       return virq;
+}
+
+void grpci2_hw_init(struct grpci2_priv *priv)
+{
+       u32 ahbadr, pciadr, bar_sz, capptr, io_map, data;
+       struct grpci2_regs *regs = priv->regs;
+       int i;
+       struct grpci2_barcfg *barcfg = priv->tgtbars;
+
+       /* Reset any earlier setup */
+       if (priv->do_reset) {
+               printk(KERN_INFO "GRPCI2: Resetting PCI bus\n");
+               REGSTORE(regs->ctrl, CTRL_RESET);
+               ssleep(1); /* Wait for boards to settle */
+       }
+       REGSTORE(regs->ctrl, 0);
+       REGSTORE(regs->sts_cap, ~0); /* Clear Status */
+       REGSTORE(regs->dma_ctrl, 0);
+       REGSTORE(regs->dma_bdbase, 0);
+
+       /* Translate I/O accesses to 0, I/O Space always @ PCI low 64Kbytes */
+       REGSTORE(regs->io_map, REGLOAD(regs->io_map) & 0x0000ffff);
+
+       /* set 1:1 mapping between AHB -> PCI memory space, for all Masters
+        * Each AHB master has it's own mapping registers. Max 16 AHB masters.
+        */
+       for (i = 0; i < 16; i++)
+               REGSTORE(regs->ahbmst_map[i], priv->pci_area);
+
+       /* Get the GRPCI2 Host PCI ID */
+       grpci2_cfg_r32(priv, 0, 0, PCI_VENDOR_ID, &priv->pciid);
+
+       /* Get address to first (always defined) capability structure */
+       grpci2_cfg_r8(priv, 0, 0, PCI_CAPABILITY_LIST, &capptr);
+
+       /* Enable/Disable Byte twisting */
+       grpci2_cfg_r32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, &io_map);
+       io_map = (io_map & ~0x1) | (priv->bt_enabled ? 1 : 0);
+       grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, io_map);
+
+       /* Setup the Host's PCI Target BARs for other peripherals to access,
+        * and do DMA to the host's memory. The target BARs can be sized and
+        * enabled individually.
+        *
+        * User may set custom target BARs, but default is:
+        * The first BARs is used to map kernel low (DMA is part of normal
+        * region on sparc which is SRMMU_MAXMEM big) main memory 1:1 to the
+        * PCI bus, the other BARs are disabled. We assume that the first BAR
+        * is always available.
+        */
+       for (i = 0; i < 6; i++) {
+               if (barcfg[i].pciadr != ~0 && barcfg[i].ahbadr != ~0) {
+                       /* Target BARs must have the proper alignment */
+                       ahbadr = barcfg[i].ahbadr;
+                       pciadr = barcfg[i].pciadr;
+                       bar_sz = ((pciadr - 1) & ~pciadr) + 1;
+               } else {
+                       if (i == 0) {
+                               /* Map main memory */
+                               bar_sz = 0xf0000008; /* 256MB prefetchable */
+                               ahbadr = 0xf0000000 & (u32)__pa(PAGE_ALIGN(
+                                       (unsigned long) &_end));
+                               pciadr = ahbadr;
+                       } else {
+                               bar_sz = 0;
+                               ahbadr = 0;
+                               pciadr = 0;
+                       }
+               }
+               grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BARSIZE_OFS+i*4, bar_sz);
+               grpci2_cfg_w32(priv, 0, 0, PCI_BASE_ADDRESS_0+i*4, pciadr);
+               grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr);
+               printk(KERN_INFO "        TGT BAR[%d]: 0x%08x (PCI)-> 0x%08x\n",
+                       i, pciadr, ahbadr);
+       }
+
+       /* set as bus master and enable pci memory responses */
+       grpci2_cfg_r32(priv, 0, 0, PCI_COMMAND, &data);
+       data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
+       grpci2_cfg_w32(priv, 0, 0, PCI_COMMAND, data);
+
+       /* Enable Error respone (CPU-TRAP) on illegal memory access. */
+       REGSTORE(regs->ctrl, CTRL_ER | CTRL_PE);
+}
+
+static irqreturn_t grpci2_jump_interrupt(int irq, void *arg)
+{
+       printk(KERN_ERR "GRPCI2: Jump IRQ happened\n");
+       return IRQ_NONE;
+}
+
+/* Handle GRPCI2 Error Interrupt */
+static irqreturn_t grpci2_err_interrupt(int irq, void *arg)
+{
+       struct grpci2_priv *priv = arg;
+       struct grpci2_regs *regs = priv->regs;
+       unsigned int status;
+
+       status = REGLOAD(regs->sts_cap);
+       if ((status & STS_ERR_IRQ) == 0)
+               return IRQ_NONE;
+
+       if (status & STS_IPARERR)
+               printk(KERN_ERR "GRPCI2: Parity Error\n");
+
+       if (status & STS_ITGTABRT)
+               printk(KERN_ERR "GRPCI2: Target Abort\n");
+
+       if (status & STS_IMSTABRT)
+               printk(KERN_ERR "GRPCI2: Master Abort\n");
+
+       if (status & STS_ISYSERR)
+               printk(KERN_ERR "GRPCI2: System Error\n");
+
+       /* Clear handled INT TYPE IRQs */
+       REGSTORE(regs->sts_cap, status & STS_ERR_IRQ);
+
+       return IRQ_HANDLED;
+}
+
+static int __devinit grpci2_of_probe(struct platform_device *ofdev)
+{
+       struct grpci2_regs *regs;
+       struct grpci2_priv *priv;
+       int err, i, len;
+       const int *tmp;
+       unsigned int capability;
+
+       if (grpci2priv) {
+               printk(KERN_ERR "GRPCI2: only one GRPCI2 core supported\n");
+               return -ENODEV;
+       }
+
+       if (ofdev->num_resources < 3) {
+               printk(KERN_ERR "GRPCI2: not enough APB/AHB resources\n");
+               return -EIO;
+       }
+
+       /* Find Device Address */
+       regs = of_ioremap(&ofdev->resource[0], 0,
+                         resource_size(&ofdev->resource[0]),
+                         "grlib-grpci2 regs");
+       if (regs == NULL) {
+               printk(KERN_ERR "GRPCI2: ioremap failed\n");
+               return -EIO;
+       }
+
+       /*
+        * Check that we're in Host Slot and that we can act as a Host Bridge
+        * and not only as target.
+        */
+       capability = REGLOAD(regs->sts_cap);
+       if ((capability & STS_HOST) || !(capability & STS_MST)) {
+               printk(KERN_INFO "GRPCI2: not in host system slot\n");
+               err = -EIO;
+               goto err1;
+       }
+
+       priv = grpci2priv = kzalloc(sizeof(struct grpci2_priv), GFP_KERNEL);
+       if (grpci2priv == NULL) {
+               err = -ENOMEM;
+               goto err1;
+       }
+       memset(grpci2priv, 0, sizeof(*grpci2priv));
+       priv->regs = regs;
+       priv->irq = ofdev->archdata.irqs[0]; /* BASE IRQ */
+       priv->irq_mode = (capability & STS_IRQMODE) >> STS_IRQMODE_BIT;
+
+       printk(KERN_INFO "GRPCI2: host found at %p, irq%d\n", regs, priv->irq);
+
+       /* Byte twisting should be made configurable from kernel command line */
+       priv->bt_enabled = 1;
+
+       /* Let user do custom Target BAR assignment */
+       tmp = of_get_property(ofdev->dev.of_node, "barcfg", &len);
+       if (tmp && (len == 2*4*6))
+               memcpy(priv->tgtbars, tmp, 2*4*6);
+       else
+               memset(priv->tgtbars, -1, 2*4*6);
+
+       /* Limit IRQ unmasking in irq_mode 2 and 3 */
+       tmp = of_get_property(ofdev->dev.of_node, "irq_mask", &len);
+       if (tmp && (len == 4))
+               priv->do_reset = *tmp;
+       else
+               priv->irq_mask = 0xf;
+
+       /* Optional PCI reset. Force PCI reset on startup */
+       tmp = of_get_property(ofdev->dev.of_node, "reset", &len);
+       if (tmp && (len == 4))
+               priv->do_reset = *tmp;
+       else
+               priv->do_reset = 0;
+
+       /* Find PCI Memory, I/O and Configuration Space Windows */
+       priv->pci_area = ofdev->resource[1].start;
+       priv->pci_area_end = ofdev->resource[1].end+1;
+       priv->pci_io = ofdev->resource[2].start;
+       priv->pci_conf = ofdev->resource[2].start + 0x10000;
+       priv->pci_conf_end = priv->pci_conf + 0x10000;
+       priv->pci_io_va = (unsigned long)ioremap(priv->pci_io, 0x10000);
+       if (!priv->pci_io_va) {
+               err = -EIO;
+               goto err2;
+       }
+
+       printk(KERN_INFO
+               "GRPCI2: MEMORY SPACE [0x%08lx - 0x%08lx]\n"
+               "        I/O    SPACE [0x%08lx - 0x%08lx]\n"
+               "        CONFIG SPACE [0x%08lx - 0x%08lx]\n",
+               priv->pci_area, priv->pci_area_end-1,
+               priv->pci_io, priv->pci_conf-1,
+               priv->pci_conf, priv->pci_conf_end-1);
+
+       /*
+        * I/O Space resources in I/O Window mapped into Virtual Adr Space
+        * We never use low 4KB because some devices seem have problems using
+        * address 0.
+        */
+       memset(&priv->info.io_space, 0, sizeof(struct resource));
+       priv->info.io_space.name = "GRPCI2 PCI I/O Space";
+       priv->info.io_space.start = priv->pci_io_va + 0x1000;
+       priv->info.io_space.end = priv->pci_io_va + 0x10000 - 1;
+       priv->info.io_space.flags = IORESOURCE_IO;
+
+       /*
+        * GRPCI2 has no prefetchable memory, map everything as
+        * non-prefetchable memory
+        */
+       memset(&priv->info.mem_space, 0, sizeof(struct resource));
+       priv->info.mem_space.name = "GRPCI2 PCI MEM Space";
+       priv->info.mem_space.start = priv->pci_area;
+       priv->info.mem_space.end = priv->pci_area_end - 1;
+       priv->info.mem_space.flags = IORESOURCE_MEM;
+
+       if (request_resource(&iomem_resource, &priv->info.mem_space) < 0)
+               goto err3;
+       if (request_resource(&ioport_resource, &priv->info.io_space) < 0)
+               goto err4;
+
+       grpci2_hw_init(priv);
+
+       /*
+        * Get PCI Interrupt to System IRQ mapping and setup IRQ handling
+        * Error IRQ always on PCI INTA.
+        */
+       if (priv->irq_mode < 2) {
+               /* All PCI interrupts are shared using the same system IRQ */
+               leon_update_virq_handling(priv->irq, grpci2_pci_flow_irq,
+                                        "pcilvl", 0);
+
+               priv->irq_map[0] = grpci2_build_device_irq(1);
+               priv->irq_map[1] = grpci2_build_device_irq(2);
+               priv->irq_map[2] = grpci2_build_device_irq(3);
+               priv->irq_map[3] = grpci2_build_device_irq(4);
+
+               priv->virq_err = grpci2_build_device_irq(5);
+               if (priv->irq_mode & 1)
+                       priv->virq_dma = ofdev->archdata.irqs[1];
+               else
+                       priv->virq_dma = grpci2_build_device_irq(6);
+
+               /* Enable IRQs on LEON IRQ controller */
+               err = request_irq(priv->irq, grpci2_jump_interrupt, 0,
+                                       "GRPCI2_JUMP", priv);
+               if (err)
+                       printk(KERN_ERR "GRPCI2: ERR IRQ request failed\n");
+       } else {
+               /* All PCI interrupts have an unique IRQ interrupt */
+               for (i = 0; i < 4; i++) {
+                       /* Make LEON IRQ layer handle level IRQ by acking */
+                       leon_update_virq_handling(ofdev->archdata.irqs[i],
+                                                handle_fasteoi_irq, "pcilvl",
+                                                1);
+                       priv->irq_map[i] = ofdev->archdata.irqs[i];
+               }
+               priv->virq_err = priv->irq_map[0];
+               if (priv->irq_mode & 1)
+                       priv->virq_dma = ofdev->archdata.irqs[4];
+               else
+                       priv->virq_dma = priv->irq_map[0];
+
+               /* Unmask all PCI interrupts, request_irq will not do that */
+               REGSTORE(regs->ctrl, REGLOAD(regs->ctrl)|(priv->irq_mask&0xf));
+       }
+
+       /* Setup IRQ handler for non-configuration space access errors */
+       err = request_irq(priv->virq_err, grpci2_err_interrupt, IRQF_SHARED,
+                               "GRPCI2_ERR", priv);
+       if (err) {
+               printk(KERN_DEBUG "GRPCI2: ERR VIRQ request failed: %d\n", err);
+               goto err5;
+       }
+
+       /*
+        * Enable Error Interrupts. PCI interrupts are unmasked once request_irq
+        * is called by the PCI Device drivers
+        */
+       REGSTORE(regs->ctrl, REGLOAD(regs->ctrl) | CTRL_EI | CTRL_SI);
+
+       /* Init common layer and scan buses */
+       priv->info.ops = &grpci2_ops;
+       priv->info.map_irq = grpci2_map_irq;
+       leon_pci_init(ofdev, &priv->info);
+
+       return 0;
+
+err5:
+       release_resource(&priv->info.io_space);
+err4:
+       release_resource(&priv->info.mem_space);
+err3:
+       err = -ENOMEM;
+       iounmap((void *)priv->pci_io_va);
+err2:
+       kfree(priv);
+err1:
+       of_iounmap(&ofdev->resource[0], regs,
+               resource_size(&ofdev->resource[0]));
+       return err;
+}
+
+static struct of_device_id grpci2_of_match[] = {
+       {
+        .name = "GAISLER_GRPCI2",
+        },
+       {
+        .name = "01_07c",
+        },
+       {},
+};
+
+static struct platform_driver grpci2_of_driver = {
+       .driver = {
+               .name = "grpci2",
+               .owner = THIS_MODULE,
+               .of_match_table = grpci2_of_match,
+       },
+       .probe = grpci2_of_probe,
+};
+
+static int __init grpci2_init(void)
+{
+       return platform_driver_register(&grpci2_of_driver);
+}
+
+subsys_initcall(grpci2_init);
index 8d348c474a2f3b7ffde344246c0e41856f3988fe..99ba5baa9497da77a773a2fc9da7600b2ee16816 100644 (file)
@@ -214,7 +214,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
                               me->name,
                               (int) (ELF_R_TYPE(rel[i].r_info) & 0xff));
                        return -ENOEXEC;
-               };
+               }
        }
        return 0;
 }
index 6e3874b64488056ba060e21a57b5796863cd66b3..a6895987fb70254b7db258c2eff6fd65262aba46 100644 (file)
@@ -281,7 +281,7 @@ static int sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
        case 4:
                *value = ret & 0xffffffff;
                break;
-       };
+       }
 
 
        return PCIBIOS_SUCCESSFUL;
@@ -456,7 +456,7 @@ void pci_determine_mem_io_space(struct pci_pbm_info *pbm)
 
                default:
                        break;
-               };
+               }
        }
 
        if (!saw_io || !saw_mem) {
index 283fbc329a4397c9d8ff9b159611fe1bf62d4e8b..f030b02eddddd18f04db1c268735c37f040ca0e2 100644 (file)
@@ -264,7 +264,7 @@ static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm,
                default:
                        type_string = "ECC Error";
                        break;
-               };
+               }
                printk("%s: IOMMU Error, type[%s]\n",
                       pbm->name, type_string);
 
@@ -319,7 +319,7 @@ static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm,
                        default:
                                type_string = "ECC Error";
                                break;
-                       };
+                       }
                        printk("%s: IOMMU TAG(%d)[error(%s) ctx(%x) wr(%d) str(%d) "
                               "sz(%dK) vpg(%08lx)]\n",
                               pbm->name, i, type_string,
@@ -1328,7 +1328,7 @@ static int __devinit schizo_pbm_init(struct pci_pbm_info *pbm,
        default:
                chipset_name = "SCHIZO";
                break;
-       };
+       }
 
        /* For SCHIZO, three OBP regs:
         * 1) PBM controller regs
index 570b98f6e89794b9ae21ab5bf2fafc9209c03f34..40e4936bd47995486039d256463cc514f23d8350 100644 (file)
@@ -694,7 +694,7 @@ static unsigned int sbus_of_build_irq(struct device_node *dp,
                case 3:
                        iclr = reg_base + SYSIO_ICLR_SLOT3;
                        break;
-               };
+               }
 
                iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
        }
index fe2af66bb1988ee3eff896223b5f131e06ca06e6..8db48e808ed42a969e45dec6c92d1c92b05930df 100644 (file)
@@ -228,7 +228,7 @@ void psycho_check_iommu_error(struct pci_pbm_info *pbm,
                default:
                        type_str = "ECC Error";
                        break;
-               };
+               }
                printk(KERN_ERR "%s: IOMMU Error, type[%s]\n",
                       pbm->name, type_str);
 
index 2ca32d13abcfdbbfbe0a81de61e706d292248b0f..a161b9c77f055617553fb32a2e25fdac279922f0 100644 (file)
@@ -97,7 +97,7 @@ void sbus_set_sbus64(struct device *dev, int bursts)
 
        default:
                return;
-       };
+       }
 
        val = upa_readq(cfg_reg);
        if (val & (1UL << 14UL)) {
@@ -244,7 +244,7 @@ static unsigned int sbus_build_irq(struct platform_device *op, unsigned int ino)
                case 3:
                        iclr = reg_base + SYSIO_ICLR_SLOT3;
                        break;
-               };
+               }
 
                iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
        }
index 3249d3f3234d5535fa9381e47702eeeba809a10c..d26e1f6c717aa1f5fd3dffe2689594e1bce3b29a 100644 (file)
@@ -267,7 +267,7 @@ void __init setup_arch(char **cmdline_p)
        default:
                printk("UNKNOWN!\n");
                break;
-       };
+       }
 
 #ifdef CONFIG_DUMMY_CONSOLE
        conswitchp = &dummy_con;
index f3b6850cc8db4e4e59e5b975fcd6184af2c2d70c..c4dd0999da86b633568f278cada4420593b55940 100644 (file)
@@ -209,7 +209,7 @@ void __init per_cpu_patch(void)
                default:
                        prom_printf("Unknown cpu type, halting.\n");
                        prom_halt();
-               };
+               }
 
                *(unsigned int *) (addr +  0) = insns[0];
                wmb();
index d5b3958be0b40ac25fdcb7d209d10d950c7df80e..21b125341bf79a5dbe8282d22751c4f805ab5d11 100644 (file)
@@ -114,7 +114,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
                printk("UNKNOWN!\n");
                BUG();
                break;
-       };
+       }
 }
 
 void cpu_panic(void)
@@ -374,7 +374,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
                printk("UNKNOWN!\n");
                BUG();
                break;
-       };
+       }
 }
 
 /* Set this up early so that things like the scheduler can init
@@ -447,7 +447,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
                printk("UNKNOWN!\n");
                BUG();
                break;
-       };
+       }
 
        if (!ret) {
                cpumask_set_cpu(cpu, &smp_commenced_mask);
index a9ea60eb2c10f21e01cf007143e47942b6a768ad..1d13c5bda0b15c3042ce69e406311a449e93e4df 100644 (file)
@@ -103,10 +103,9 @@ static void sun4d_sbus_handler_irq(int sbusl)
 
        sbil = (sbusl << 2);
        /* Loop for each pending SBI */
-       for (sbino = 0; bus_mask; sbino++) {
+       for (sbino = 0; bus_mask; sbino++, bus_mask >>= 1) {
                unsigned int idx, mask;
 
-               bus_mask >>= 1;
                if (!(bus_mask & 1))
                        continue;
                /* XXX This seems to ACK the irq twice.  acquire_sbi()
@@ -118,19 +117,16 @@ static void sun4d_sbus_handler_irq(int sbusl)
                mask &= (0xf << sbil);
 
                /* Loop for each pending SBI slot */
-               idx = 0;
                slot = (1 << sbil);
-               while (mask != 0) {
+               for (idx = 0; mask != 0; idx++, slot <<= 1) {
                        unsigned int pil;
                        struct irq_bucket *p;
 
-                       idx++;
-                       slot <<= 1;
                        if (!(mask & slot))
                                continue;
 
                        mask &= ~slot;
-                       pil = sun4d_encode_irq(sbino, sbil, idx);
+                       pil = sun4d_encode_irq(sbino, sbusl, idx);
 
                        p = irq_map[pil];
                        while (p) {
@@ -218,10 +214,10 @@ static void sun4d_unmask_irq(struct irq_data *data)
 
 #ifdef CONFIG_SMP
        spin_lock_irqsave(&sun4d_imsk_lock, flags);
-       cc_set_imsk_other(cpuid, cc_get_imsk_other(cpuid) | ~(1 << real_irq));
+       cc_set_imsk_other(cpuid, cc_get_imsk_other(cpuid) & ~(1 << real_irq));
        spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
 #else
-       cc_set_imsk(cc_get_imsk() | ~(1 << real_irq));
+       cc_set_imsk(cc_get_imsk() & ~(1 << real_irq));
 #endif
 }
 
@@ -299,26 +295,68 @@ static void __init sun4d_load_profile_irqs(void)
        }
 }
 
+unsigned int _sun4d_build_device_irq(unsigned int real_irq,
+                                     unsigned int pil,
+                                     unsigned int board)
+{
+       struct sun4d_handler_data *handler_data;
+       unsigned int irq;
+
+       irq = irq_alloc(real_irq, pil);
+       if (irq == 0) {
+               prom_printf("IRQ: allocate for %d %d %d failed\n",
+                       real_irq, pil, board);
+               goto err_out;
+       }
+
+       handler_data = irq_get_handler_data(irq);
+       if (unlikely(handler_data))
+               goto err_out;
+
+       handler_data = kzalloc(sizeof(struct sun4d_handler_data), GFP_ATOMIC);
+       if (unlikely(!handler_data)) {
+               prom_printf("IRQ: kzalloc(sun4d_handler_data) failed.\n");
+               prom_halt();
+       }
+       handler_data->cpuid    = board_to_cpu[board];
+       handler_data->real_irq = real_irq;
+       irq_set_chip_and_handler_name(irq, &sun4d_irq,
+                                     handle_level_irq, "level");
+       irq_set_handler_data(irq, handler_data);
+
+err_out:
+       return irq;
+}
+
+
+
 unsigned int sun4d_build_device_irq(struct platform_device *op,
                                     unsigned int real_irq)
 {
        struct device_node *dp = op->dev.of_node;
-       struct device_node *io_unit, *sbi = dp->parent;
+       struct device_node *board_parent, *bus = dp->parent;
+       char *bus_connection;
        const struct linux_prom_registers *regs;
-       struct sun4d_handler_data *handler_data;
        unsigned int pil;
        unsigned int irq;
        int board, slot;
        int sbusl;
 
-       irq = 0;
-       while (sbi) {
-               if (!strcmp(sbi->name, "sbi"))
+       irq = real_irq;
+       while (bus) {
+               if (!strcmp(bus->name, "sbi")) {
+                       bus_connection = "io-unit";
+                       break;
+               }
+
+               if (!strcmp(bus->name, "bootbus")) {
+                       bus_connection = "cpu-unit";
                        break;
+               }
 
-               sbi = sbi->parent;
+               bus = bus->parent;
        }
-       if (!sbi)
+       if (!bus)
                goto err_out;
 
        regs = of_get_property(dp, "reg", NULL);
@@ -328,17 +366,19 @@ unsigned int sun4d_build_device_irq(struct platform_device *op,
        slot = regs->which_io;
 
        /*
-        *  If SBI's parent is not io-unit or the io-unit lacks
-        * a "board#" property, something is very wrong.
+        * If Bus nodes parent is not io-unit/cpu-unit or the io-unit/cpu-unit
+        * lacks a "board#" property, something is very wrong.
         */
-       if (!sbi->parent || strcmp(sbi->parent->name, "io-unit")) {
-               printk("%s: Error, parent is not io-unit.\n", sbi->full_name);
+       if (!bus->parent || strcmp(bus->parent->name, bus_connection)) {
+               printk(KERN_ERR "%s: Error, parent is not %s.\n",
+                       bus->full_name, bus_connection);
                goto err_out;
        }
-       io_unit = sbi->parent;
-       board = of_getintprop_default(io_unit, "board#", -1);
+       board_parent = bus->parent;
+       board = of_getintprop_default(board_parent, "board#", -1);
        if (board == -1) {
-               printk("%s: Error, lacks board# property.\n", io_unit->full_name);
+               printk(KERN_ERR "%s: Error, lacks board# property.\n",
+                       board_parent->full_name);
                goto err_out;
        }
 
@@ -348,29 +388,17 @@ unsigned int sun4d_build_device_irq(struct platform_device *op,
        else
                pil = real_irq;
 
-       irq = irq_alloc(real_irq, pil);
-       if (irq == 0)
-               goto err_out;
-
-       handler_data = irq_get_handler_data(irq);
-       if (unlikely(handler_data))
-               goto err_out;
-
-       handler_data = kzalloc(sizeof(struct sun4d_handler_data), GFP_ATOMIC);
-       if (unlikely(!handler_data)) {
-               prom_printf("IRQ: kzalloc(sun4d_handler_data) failed.\n");
-               prom_halt();
-       }
-       handler_data->cpuid    = board_to_cpu[board];
-       handler_data->real_irq = real_irq;
-       irq_set_chip_and_handler_name(irq, &sun4d_irq,
-                                     handle_level_irq, "level");
-       irq_set_handler_data(irq, handler_data);
-
+       irq = _sun4d_build_device_irq(real_irq, pil, board);
 err_out:
-       return real_irq;
+       return irq;
 }
 
+unsigned int sun4d_build_timer_irq(unsigned int board, unsigned int real_irq)
+{
+       return _sun4d_build_device_irq(real_irq, real_irq, board);
+}
+
+
 static void __init sun4d_fixup_trap_table(void)
 {
 #ifdef CONFIG_SMP
@@ -402,6 +430,7 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn)
        unsigned int irq;
        const u32 *reg;
        int err;
+       int board;
 
        dp = of_find_node_by_name(NULL, "cpu-unit");
        if (!dp) {
@@ -414,12 +443,19 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn)
         * bootbus.
         */
        reg = of_get_property(dp, "reg", NULL);
-       of_node_put(dp);
        if (!reg) {
                prom_printf("sun4d_init_timers: No reg property\n");
                prom_halt();
        }
 
+       board = of_getintprop_default(dp, "board#", -1);
+       if (board == -1) {
+               prom_printf("sun4d_init_timers: No board# property on cpu-unit\n");
+               prom_halt();
+       }
+
+       of_node_put(dp);
+
        res.start = reg[1];
        res.end = reg[2] - 1;
        res.flags = reg[0] & 0xff;
@@ -434,7 +470,7 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn)
 
        master_l10_counter = &sun4d_timers->l10_cur_count;
 
-       irq = sun4d_build_device_irq(NULL, SUN4D_TIMER_IRQ);
+       irq = sun4d_build_timer_irq(board, SUN4D_TIMER_IRQ);
        err = request_irq(irq, counter_fn, IRQF_TIMER, "timer", NULL);
        if (err) {
                prom_printf("sun4d_init_timers: request_irq() failed with %d\n",
index 6db18c6927fbee68a20125e1eb31e486798eeac0..170cd8e8eb2a25d178d0b949380ebf56c640ed86 100644 (file)
@@ -109,7 +109,7 @@ asmlinkage long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compa
 
        default:
                return -ENOSYS;
-       };
+       }
 
        return -ENOSYS;
 }
index 96082d30def094021b75754981e05c511d098935..908b47a5ee2431cca7294ae1b81b961586838964 100644 (file)
@@ -460,7 +460,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
                default:
                        err = -ENOSYS;
                        goto out;
-               };
+               }
        }
        if (call <= MSGCTL) {
                switch (call) {
@@ -481,7 +481,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
                default:
                        err = -ENOSYS;
                        goto out;
-               };
+               }
        }
        if (call <= SHMCTL) {
                switch (call) {
@@ -507,7 +507,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
                default:
                        err = -ENOSYS;
                        goto out;
-               };
+               }
        } else {
                err = -ENOSYS;
        }
index 2b8d54b2d850b18a3eb8b4756bbcda3eb3a8dbc3..1db6b18964d22cef79a22330fec92e22b3df7c98 100644 (file)
@@ -708,7 +708,7 @@ static void sparc64_timer_setup(enum clock_event_mode mode,
        case CLOCK_EVT_MODE_UNUSED:
                WARN_ON(1);
                break;
-       };
+       }
 }
 
 static struct clock_event_device sparc64_clockevent = {
index 1ed547bd850f8abd937529b0752c60ddcd8027ac..0cbdaa41cd1eb8fa19f4597b834da3c9a489b4ee 100644 (file)
@@ -1804,7 +1804,7 @@ static const char *sun4v_err_type_to_str(u32 type)
                return "warning resumable";
        default:
                return "unknown";
-       };
+       }
 }
 
 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
index c752c4c479bd8457da289b3da9797980d8dec7da..b2b019ea8caab36b346cd6a0ed766bfe075eb264 100644 (file)
@@ -211,7 +211,7 @@ static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr,
                default:
                        BUG();
                        break;
-               };
+               }
        }
        return __do_int_store(dst_addr, size, src_val, asi);
 }
@@ -328,7 +328,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
                case ASI_SNFL:
                        asi &= ~0x08;
                        break;
-               };
+               }
                switch (dir) {
                case load:
                        reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs);
@@ -351,7 +351,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
                                default:
                                        BUG();
                                        break;
-                               };
+                               }
                                *reg_addr = val_in;
                        }
                        break;
index 531d54fc9829271efb36c572a9b32129fddded59..489fc15f3194b4e74f21541cf72523cf7ee3d85e 100644 (file)
@@ -176,7 +176,7 @@ static unsigned long index_to_estar_mode(unsigned int index)
 
        default:
                BUG();
-       };
+       }
 }
 
 static unsigned long index_to_divisor(unsigned int index)
@@ -199,7 +199,7 @@ static unsigned long index_to_divisor(unsigned int index)
 
        default:
                BUG();
-       };
+       }
 }
 
 static unsigned long estar_to_divisor(unsigned long estar)
@@ -224,7 +224,7 @@ static unsigned long estar_to_divisor(unsigned long estar)
                break;
        default:
                BUG();
-       };
+       }
 
        return ret;
 }
index 9a8ceb7008330a9fb695c1ddb2d2377cb15bb68f..eb1624b931d903fa4ea8955a270c111d2ba7b3d4 100644 (file)
@@ -71,7 +71,7 @@ static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg
                break;
        default:
                BUG();
-       };
+       }
 
        return ret;
 }
@@ -125,7 +125,7 @@ static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index)
 
        default:
                BUG();
-       };
+       }
 
        reg = read_safari_cfg();
 
index aa6ac70d4fd501099aa8082fd3da0f877e435291..29348ea139c33a7b1221bd0ce484a99ca3255f9d 100644 (file)
@@ -363,7 +363,7 @@ static int process_ver(struct vio_driver_state *vio, struct vio_ver_info *pkt)
 
        default:
                return handshake_failure(vio);
-       };
+       }
 }
 
 static int process_attr(struct vio_driver_state *vio, void *pkt)
index 9dfd2ebcb157143bb69fe988dd1ced391ab8e874..36357717d691019ff4772c7a4aa2993c68f13c35 100644 (file)
@@ -334,7 +334,7 @@ static void edge(struct pt_regs *regs, unsigned int insn, unsigned int opf)
                left = edge32_tab_l[(rs1 >> 2) & 0x1].left;
                right = edge32_tab_l[(rs2 >> 2) & 0x1].right;
                break;
-       };
+       }
 
        if ((rs1 & ~0x7UL) == (rs2 & ~0x7UL))
                rd_val = right & left;
@@ -360,7 +360,7 @@ static void edge(struct pt_regs *regs, unsigned int insn, unsigned int opf)
                tstate = regs->tstate & ~(TSTATE_XCC | TSTATE_ICC);
                regs->tstate = tstate | (ccr << 32UL);
        }
-       };
+       }
 }
 
 static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf)
@@ -392,7 +392,7 @@ static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf)
 
        case ARRAY32_OPF:
                rd_val <<= 2;
-       };
+       }
 
        store_reg(regs, rd_val, RD(insn));
 }
@@ -577,7 +577,7 @@ static void pformat(struct pt_regs *regs, unsigned int insn, unsigned int opf)
                *fpd_regaddr(f, RD(insn)) = rd_val;
                break;
        }
-       };
+       }
 }
 
 static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf)
@@ -693,7 +693,7 @@ static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf)
                *fpd_regaddr(f, RD(insn)) = rd_val;
                break;
        }
-       };
+       }
 }
 
 static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
@@ -786,7 +786,7 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
                                rd_val |= 1 << i;
                }
                break;
-       };
+       }
 
        maybe_flush_windows(0, 0, RD(insn), 0);
        store_reg(regs, rd_val, RD(insn));
@@ -885,7 +885,7 @@ int vis_emul(struct pt_regs *regs, unsigned int insn)
        case BSHUFFLE_OPF:
                bshuffle(regs, insn);
                break;
-       };
+       }
 
        regs->tpc = regs->tnpc;
        regs->tnpc += 4;
index b10ac4d62378a3705e52a010feb5e326f41b2d2f..7543ddbdadb271b18e79ccbaf126ed3ba3650991 100644 (file)
@@ -135,7 +135,7 @@ asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
 
        default:
                break;
-       };
+       }
 
        memset(&regs, 0, sizeof (regs));
        regs.pc = pc;
index ca217327e8d27bb41c7fe1dac29be90e65919d41..7b00de61c5f1f73cf2e8c18c3cf07dfc1e190c50 100644 (file)
@@ -340,7 +340,7 @@ void __init paging_init(void)
                prom_printf("paging_init: sparc_cpu_model = %d\n", sparc_cpu_model);
                prom_printf("paging_init: Halting...\n");
                prom_halt();
-       };
+       }
 
        /* Initialize the protection map with non-constant, MMU dependent values. */
        protection_map[0] = PAGE_NONE;
index e10cd03fab801648ef2e5da6361a0a23867560aa..3fd8e18bed80a4ea06b960572b664bfed1d23a23 100644 (file)
@@ -1625,7 +1625,7 @@ static void __init sun4v_ktsb_init(void)
                ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
                ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
                break;
-       };
+       }
 
        ktsb_descr[0].assoc = 1;
        ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
@@ -2266,7 +2266,7 @@ unsigned long pte_sz_bits(unsigned long sz)
                        return _PAGE_SZ512K_4V;
                case 4 * 1024 * 1024:
                        return _PAGE_SZ4MB_4V;
-               };
+               }
        } else {
                switch (sz) {
                case 8 * 1024:
@@ -2278,7 +2278,7 @@ unsigned long pte_sz_bits(unsigned long sz)
                        return _PAGE_SZ512K_4U;
                case 4 * 1024 * 1024:
                        return _PAGE_SZ4MB_4U;
-               };
+               }
        }
 }
 
index fe09fd8be6956e5f00fe0fddc5c9bbf6a11882c5..cbef74e793b8df9c9f3b7dc474f7a64feb082b3d 100644 (file)
@@ -1665,7 +1665,7 @@ static void __init init_swift(void)
        default:
                srmmu_modtype = Swift_ok;
                break;
-       };
+       }
 
        BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM);
        BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM);
@@ -2069,7 +2069,7 @@ static void __init get_srmmu_type(void)
                        /* Some other Cypress revision, assume a 605. */
                        init_cypress_605(mod_rev);
                        break;
-               };
+               }
                return;
        }
        
index a2350b5e68aa66c237999dca7790a7e7e0282c48..1cf4f198709a2d798679b39f3e64629d4f24ac9a 100644 (file)
@@ -318,7 +318,7 @@ void __init sun4c_probe_vac(void)
                prom_printf("probe_vac: Didn't expect vac-linesize of %d, halting\n",
                            sun4c_vacinfo.linesize);
                prom_halt();
-       };
+       }
 
        sun4c_flush_all();
        sun4c_enable_vac();
@@ -364,7 +364,7 @@ static void __init patch_kernel_fault_handler(void)
                        prom_printf("Unhandled number of segmaps: %d\n",
                                    num_segmaps);
                        prom_halt();
-       };
+       }
        switch (num_contexts) {
                case 8:
                        /* Default, nothing to do. */
@@ -377,7 +377,7 @@ static void __init patch_kernel_fault_handler(void)
                        prom_printf("Unhandled number of contexts: %d\n",
                                    num_contexts);
                        prom_halt();
-       };
+       }
 
        if (sun4c_vacinfo.do_hwflushes != 0) {
                PATCH_INSN(vac_hwflush_patch1_on, vac_hwflush_patch1);
@@ -394,7 +394,7 @@ static void __init patch_kernel_fault_handler(void)
                        prom_printf("Impossible VAC linesize %d, halting...\n",
                                    sun4c_vacinfo.linesize);
                        prom_halt();
-               };
+               }
        }
 }
 
index 94846151349991387435c0a61a925b93b09debb2..a5f51b22fcbe9eb4654a5d33d38054c886d75761 100644 (file)
@@ -180,7 +180,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
                printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n",
                       current->comm, current->pid, tsb_bytes);
                do_exit(SIGSEGV);
-       };
+       }
        tte |= pte_sz_bits(page_sz);
 
        if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
@@ -215,7 +215,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
 #endif
                default:
                        BUG();
-               };
+               }
                hp->assoc = 1;
                hp->num_ttes = tsb_bytes / 16;
                hp->ctx_idx = 0;
@@ -230,7 +230,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
 #endif
                default:
                        BUG();
-               };
+               }
                hp->tsb_base = tsb_paddr;
                hp->resv = 0;
        }
index b05e3db5fa63c766718c79578d200d6ad99967b6..a00f47b16c10a7ca15190e41f47c28489916ecc3 100644 (file)
@@ -38,7 +38,7 @@ static int prom_nbputchar(const char *buf)
                break;
        default:
                break;
-       };
+       }
        restore_current();
        spin_unlock_irqrestore(&prom_lock, flags);
        return i; /* Ugh, we could spin forever on unsupported proms ;( */
index 0a601b300639ff6ce7a0909c7b3a2ff544dbd8c3..26c64cea3c9c082e701f672e299a50ff2f70ad09 100644 (file)
@@ -53,7 +53,7 @@ void __init prom_init(struct linux_romvec *rp)
                            romvec->pv_romvers);
                prom_halt();
                break;
-       };
+       }
 
        prom_rev = romvec->pv_plugin_revision;
        prom_prev = romvec->pv_printrev;
index 97c44c9ddbc85bd87a400a9560170f1313656635..0da8256cf76f286a6e2aa2d4c60e219e18a1e911 100644 (file)
@@ -35,7 +35,7 @@ prom_startcpu(int cpunode, struct linux_prom_registers *ctable_reg, int ctx, cha
        case PROM_V3:
                ret = (*(romvec->v3_cpustart))(cpunode, (int) ctable_reg, ctx, pc);
                break;
-       };
+       }
        restore_current();
        spin_unlock_irqrestore(&prom_lock, flags);
 
index d3a303246c9f0967ff1ac86e9359eded8fae15da..e57dcce9bfda4da6ea6be362a542bf891289f0c0 100644 (file)
@@ -231,10 +231,6 @@ config PUV3_PWM
        help
          Enable support for NB0916 PWM controllers
 
-config PUV3_RTC
-       tristate "PKUnity v3 RTC Support"
-       depends on !ARCH_FPGA
-
 if PUV3_NB0916
 
 menu "PKUnity NetBook-0916 Features"
index 76a8beec7d03e454d49e9da70a6c6dbc734566ef..6af4bc415f2b34ec19fc024c1242a470a4feef73 100644 (file)
@@ -40,42 +40,10 @@ core-y                      += arch/unicore32/mm/
 
 libs-y                 += arch/unicore32/lib/
 
-ASM_GENERATED_DIR      := $(srctree)/arch/unicore32/include/generated
-LINUXINCLUDE           += -I$(ASM_GENERATED_DIR)
-
-ASM_GENERIC_HEADERS    := atomic.h auxvec.h
-ASM_GENERIC_HEADERS    += bitsperlong.h bug.h bugs.h
-ASM_GENERIC_HEADERS    += cputime.h current.h
-ASM_GENERIC_HEADERS    += device.h div64.h
-ASM_GENERIC_HEADERS    += emergency-restart.h errno.h
-ASM_GENERIC_HEADERS    += fb.h fcntl.h ftrace.h futex.h
-ASM_GENERIC_HEADERS    += hardirq.h hw_irq.h
-ASM_GENERIC_HEADERS    += ioctl.h ioctls.h ipcbuf.h irq_regs.h
-ASM_GENERIC_HEADERS    += kdebug.h kmap_types.h
-ASM_GENERIC_HEADERS    += local.h
-ASM_GENERIC_HEADERS    += mman.h module.h msgbuf.h
-ASM_GENERIC_HEADERS    += param.h parport.h percpu.h poll.h posix_types.h
-ASM_GENERIC_HEADERS    += resource.h
-ASM_GENERIC_HEADERS    += scatterlist.h sections.h segment.h sembuf.h serial.h
-ASM_GENERIC_HEADERS    += setup.h shmbuf.h shmparam.h
-ASM_GENERIC_HEADERS    += siginfo.h signal.h sizes.h
-ASM_GENERIC_HEADERS    += socket.h sockios.h stat.h statfs.h swab.h syscalls.h
-ASM_GENERIC_HEADERS    += termbits.h termios.h topology.h types.h
-ASM_GENERIC_HEADERS    += ucontext.h unaligned.h user.h
-ASM_GENERIC_HEADERS    += vga.h
-ASM_GENERIC_HEADERS    += xor.h
-
-archprepare:
-ifneq ($(ASM_GENERATED_DIR), $(wildcard $(ASM_GENERATED_DIR)))
-       $(Q)mkdir -p $(ASM_GENERATED_DIR)/asm
-       $(Q)$(foreach a, $(ASM_GENERIC_HEADERS),        \
-               echo '#include <asm-generic/$a>'        \
-                       > $(ASM_GENERATED_DIR)/asm/$a; )
-endif
-
 boot                   := arch/unicore32/boot
 
-# Default target when executing plain make
+# Default defconfig and target when executing plain make
+KBUILD_DEFCONFIG       := $(ARCH)_defconfig
 KBUILD_IMAGE           := zImage
 
 all:   $(KBUILD_IMAGE)
@@ -83,8 +51,6 @@ all:  $(KBUILD_IMAGE)
 zImage Image uImage: vmlinux
        $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
 
-MRPROPER_DIRS          += $(ASM_GENERATED_DIR)
-
 archclean:
        $(Q)$(MAKE) $(clean)=$(boot)
 
index 95373428cb3d846029312a737669c5a522895c8e..b0954a2d23cfaf5b4db0bbe1b6e95493d75ebc03 100644 (file)
@@ -59,7 +59,7 @@ $(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/head.o $(obj)/piggy.o \
 # We now have a PIC decompressor implementation.  Decompressors running
 # from RAM should not define ZTEXTADDR.  Decompressors running directly
 # from ROM or Flash must define ZTEXTADDR (preferably via the config)
-ZTEXTADDR      := 0
+ZTEXTADDR      := 0x03000000
 ZBSSADDR       := ALIGN(4)
 
 SEDFLAGS_lds   = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/
similarity index 97%
rename from arch/unicore32/configs/debug_defconfig
rename to arch/unicore32/configs/unicore32_defconfig
index b5fbde9f1cb2cf8623e911178916e7595b6acbde..c9dd3198b6f701d3e07b50c81528fb240da7b9ca 100644 (file)
@@ -1,6 +1,6 @@
 ### General setup
 CONFIG_EXPERIMENTAL=y
-CONFIG_LOCALVERSION="-debug"
+CONFIG_LOCALVERSION="-unicore32"
 CONFIG_SWAP=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
@@ -64,7 +64,6 @@ CONFIG_I2C_BATTERY_BQ27200=n
 CONFIG_I2C_EEPROM_AT24=n
 CONFIG_LCD_BACKLIGHT=n
 
-CONFIG_PUV3_RTC=y
 CONFIG_PUV3_UMAL=y
 CONFIG_PUV3_MUSB=n
 CONFIG_PUV3_AC97=n
@@ -167,8 +166,9 @@ CONFIG_LEDS_TRIGGER_IDE_DISK=y
 CONFIG_LEDS_TRIGGER_HEARTBEAT=y
 
 #      Real Time Clock
-CONFIG_RTC_LIB=m
-CONFIG_RTC_CLASS=m
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_PUV3=y
 
 ### File systems
 CONFIG_EXT2_FS=m
index b200fdaca44de8061175e02d95c47c58489abc0b..ca113d6999c5279df35ad1d47a275ace8fd18463 100644 (file)
@@ -1,2 +1,61 @@
 include include/asm-generic/Kbuild.asm
 
+generic-y += atomic.h
+generic-y += auxvec.h
+generic-y += bitsperlong.h
+generic-y += bug.h
+generic-y += bugs.h
+generic-y += cputime.h
+generic-y += current.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += fb.h
+generic-y += fcntl.h
+generic-y += ftrace.h
+generic-y += futex.h
+generic-y += hardirq.h
+generic-y += hw_irq.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += kdebug.h
+generic-y += kmap_types.h
+generic-y += local.h
+generic-y += mman.h
+generic-y += module.h
+generic-y += msgbuf.h
+generic-y += param.h
+generic-y += parport.h
+generic-y += percpu.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += resource.h
+generic-y += scatterlist.h
+generic-y += sections.h
+generic-y += segment.h
+generic-y += sembuf.h
+generic-y += serial.h
+generic-y += setup.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
+generic-y += siginfo.h
+generic-y += signal.h
+generic-y += sizes.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += stat.h
+generic-y += statfs.h
+generic-y += swab.h
+generic-y += syscalls.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += topology.h
+generic-y += types.h
+generic-y += ucontext.h
+generic-y += unaligned.h
+generic-y += user.h
+generic-y += vga.h
+generic-y += xor.h
index ec23a2fb2f508646d38c76ce7bbd89ed20bc8168..aeb0f181568e39f5537f70d7078163c09f29c2cb 100644 (file)
@@ -16,7 +16,6 @@ obj-$(CONFIG_UNICORE_FPU_F64) += fpu-ucf64.o
 obj-$(CONFIG_ARCH_PUV3)                += clock.o irq.o time.o
 
 obj-$(CONFIG_PUV3_GPIO)                += gpio.o
-obj-$(CONFIG_PUV3_RTC)         += rtc.o
 obj-$(CONFIG_PUV3_PWM)         += pwm.o
 obj-$(CONFIG_PUV3_PM)          += pm.o sleep.o
 obj-$(CONFIG_HIBERNATION)      += hibernate.o hibernate_asm.o
index 9bf7f7af52c529f10566dbf8a9a816ecf0b65567..77e407e49a632c84cfb4957580c13dfbbebd1df4 100644 (file)
@@ -30,7 +30,7 @@ SECTIONS
        HEAD_TEXT_SECTION
        INIT_TEXT_SECTION(PAGE_SIZE)
        INIT_DATA_SECTION(16)
-       PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
+       PERCPU_SECTION(L1_CACHE_BYTES)
        __init_end = .;
 
        _stext = .;
index 19ae14ba69780c38defd3fbcfe7fdcb05859f9c0..0cd3800f33b9dcf68e39a1288ebf7ccd442f3a97 100644 (file)
@@ -4,7 +4,6 @@
 #define ARCH_DISCARD_MEMBLOCK
 
 u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align);
-void memblock_x86_to_bootmem(u64 start, u64 end);
 
 void memblock_x86_reserve_range(u64 start, u64 end, char *name);
 void memblock_x86_free_range(u64 start, u64 end);
@@ -19,5 +18,6 @@ u64 memblock_x86_hole_size(u64 start, u64 end);
 u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align);
 u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit);
 u64 memblock_x86_memory_in_range(u64 addr, u64 limit);
+bool memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align);
 
 #endif
index cd8cbeb5fa34f4ed685d9247995846e4c2d0fd32..7c3a95e54ec57cae2f9dd96a831bc5b7f53a6f69 100644 (file)
@@ -30,6 +30,7 @@
 #include <asm/proto.h>
 #include <asm/iommu.h>
 #include <asm/gart.h>
+#include <asm/dma.h>
 #include <asm/amd_iommu_proto.h>
 #include <asm/amd_iommu_types.h>
 #include <asm/amd_iommu.h>
@@ -154,6 +155,10 @@ static int iommu_init_device(struct device *dev)
        pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff);
        if (pdev)
                dev_data->alias = &pdev->dev;
+       else {
+               kfree(dev_data);
+               return -ENOTSUPP;
+       }
 
        atomic_set(&dev_data->bind, 0);
 
@@ -163,6 +168,20 @@ static int iommu_init_device(struct device *dev)
        return 0;
 }
 
+static void iommu_ignore_device(struct device *dev)
+{
+       u16 devid, alias;
+
+       devid = get_device_id(dev);
+       alias = amd_iommu_alias_table[devid];
+
+       memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
+       memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
+
+       amd_iommu_rlookup_table[devid] = NULL;
+       amd_iommu_rlookup_table[alias] = NULL;
+}
+
 static void iommu_uninit_device(struct device *dev)
 {
        kfree(dev->archdata.iommu);
@@ -192,7 +211,9 @@ int __init amd_iommu_init_devices(void)
                        continue;
 
                ret = iommu_init_device(&pdev->dev);
-               if (ret)
+               if (ret == -ENOTSUPP)
+                       iommu_ignore_device(&pdev->dev);
+               else if (ret)
                        goto out_free;
        }
 
@@ -2383,6 +2404,23 @@ static struct dma_map_ops amd_iommu_dma_ops = {
        .dma_supported = amd_iommu_dma_supported,
 };
 
+static unsigned device_dma_ops_init(void)
+{
+       struct pci_dev *pdev = NULL;
+       unsigned unhandled = 0;
+
+       for_each_pci_dev(pdev) {
+               if (!check_device(&pdev->dev)) {
+                       unhandled += 1;
+                       continue;
+               }
+
+               pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops;
+       }
+
+       return unhandled;
+}
+
 /*
  * The function which clues the AMD IOMMU driver into dma_ops.
  */
@@ -2395,7 +2433,7 @@ void __init amd_iommu_init_api(void)
 int __init amd_iommu_init_dma_ops(void)
 {
        struct amd_iommu *iommu;
-       int ret;
+       int ret, unhandled;
 
        /*
         * first allocate a default protection domain for every IOMMU we
@@ -2421,7 +2459,11 @@ int __init amd_iommu_init_dma_ops(void)
        swiotlb = 0;
 
        /* Make the driver finally visible to the drivers */
-       dma_ops = &amd_iommu_dma_ops;
+       unhandled = device_dma_ops_init();
+       if (unhandled && max_pfn > MAX_DMA32_PFN) {
+               /* There are unhandled devices - initialize swiotlb for them */
+               swiotlb = 1;
+       }
 
        amd_iommu_stats_init();
 
index 9179c21120a88ea5e764297fc5c91c6a488fb51c..bfc8453bd98dfae4bd62620e1ebd20a9151b9ea3 100644 (file)
@@ -731,8 +731,8 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
 {
        u8 *p = (u8 *)h;
        u8 *end = p, flags = 0;
-       u16 dev_i, devid = 0, devid_start = 0, devid_to = 0;
-       u32 ext_flags = 0;
+       u16 devid = 0, devid_start = 0, devid_to = 0;
+       u32 dev_i, ext_flags = 0;
        bool alias = false;
        struct ivhd_entry *e;
 
@@ -887,7 +887,7 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
 /* Initializes the device->iommu mapping for the driver */
 static int __init init_iommu_devices(struct amd_iommu *iommu)
 {
-       u16 i;
+       u32 i;
 
        for (i = iommu->first_device; i <= iommu->last_device; ++i)
                set_iommu_for_device(iommu, i);
@@ -1177,7 +1177,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
  */
 static void init_device_table(void)
 {
-       u16 devid;
+       u32 devid;
 
        for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
                set_dev_entry_bit(devid, DEV_ENTRY_VALID);
index b511a011b7d08f31405371445c00271abfdeed54..adc66c3a1fef2417be8741d334d5f8460c23ff10 100644 (file)
@@ -632,14 +632,14 @@ late_initcall(uv_init_heartbeat);
 
 /* Direct Legacy VGA I/O traffic to designated IOH */
 int uv_set_vga_state(struct pci_dev *pdev, bool decode,
-                     unsigned int command_bits, bool change_bridge)
+                     unsigned int command_bits, u32 flags)
 {
        int domain, bus, rc;
 
-       PR_DEVEL("devfn %x decode %d cmd %x chg_brdg %d\n",
-                       pdev->devfn, decode, command_bits, change_bridge);
+       PR_DEVEL("devfn %x decode %d cmd %x flags %d\n",
+                       pdev->devfn, decode, command_bits, flags);
 
-       if (!change_bridge)
+       if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
                return 0;
 
        if ((command_bits & PCI_COMMAND_IO) == 0)
index 690bc8461835bc01fb6f6cf436fcd94a960db9f2..9aeb78a23de4658fb279787a75c391a6c603c1e9 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/slab.h>
 #include <linux/pci.h>
 #include <linux/of_pci.h>
+#include <linux/initrd.h>
 
 #include <asm/hpet.h>
 #include <asm/irq_controller.h>
@@ -98,6 +99,16 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
        return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS));
 }
 
+#ifdef CONFIG_BLK_DEV_INITRD
+void __init early_init_dt_setup_initrd_arch(unsigned long start,
+                                           unsigned long end)
+{
+       initrd_start = (unsigned long)__va(start);
+       initrd_end = (unsigned long)__va(end);
+       initrd_below_start_ok = 1;
+}
+#endif
+
 void __init add_dtb(u64 data)
 {
        initial_dtb = data + offsetof(struct setup_data, data);
index 2e4928d45a2dd476d89904778df6ac2dbf5e681c..e1ba8cb24e4edb6629a9107312c071739c32de75 100644 (file)
@@ -337,7 +337,7 @@ EXPORT_SYMBOL(boot_option_idle_override);
  * Powermanagement idle function, if any..
  */
 void (*pm_idle)(void);
-#if defined(CONFIG_APM_MODULE) && defined(CONFIG_APM_CPU_IDLE)
+#ifdef CONFIG_APM_MODULE
 EXPORT_SYMBOL(pm_idle);
 #endif
 
@@ -399,7 +399,7 @@ void default_idle(void)
                cpu_relax();
        }
 }
-#if defined(CONFIG_APM_MODULE) && defined(CONFIG_APM_CPU_IDLE)
+#ifdef CONFIG_APM_MODULE
 EXPORT_SYMBOL(default_idle);
 #endif
 
index 8d128783af47374e56412d01d0488aa12af1647b..a3d0dc59067be542d7423d2a8abbc99801c0d3f6 100644 (file)
@@ -245,7 +245,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
 {
        set_user_gs(regs, 0);
        regs->fs                = 0;
-       set_fs(USER_DS);
        regs->ds                = __USER_DS;
        regs->es                = __USER_DS;
        regs->ss                = __USER_DS;
index 6c9dd922ac0d8b34ff9cac883ef86bf8843cb912..ca6f7ab8df332992166d51c802216d350c8729cc 100644 (file)
@@ -338,7 +338,6 @@ start_thread_common(struct pt_regs *regs, unsigned long new_ip,
        regs->cs                = _cs;
        regs->ss                = _ss;
        regs->flags             = X86_EFLAGS_IF;
-       set_fs(USER_DS);
        /*
         * Free the old FP and other extended state
         */
index 33a0c11797de758004b399225eec21db5a1e932e..9fd3137230d46af2053cdebb88c3a6ef4867984f 100644 (file)
@@ -285,6 +285,19 @@ notrace static void __cpuinit start_secondary(void *unused)
        per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
        x86_platform.nmi_init();
 
+       /*
+        * Wait until the cpu which brought this one up marked it
+        * online before enabling interrupts. If we don't do that then
+        * we can end up waking up the softirq thread before this cpu
+        * reached the active state, which makes the scheduler unhappy
+        * and schedule the softirq thread on the wrong cpu. This is
+        * only observable with forced threaded interrupts, but in
+        * theory it could also happen w/o them. It's just way harder
+        * to achieve.
+        */
+       while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask))
+               cpu_relax();
+
        /* enable local interrupts */
        local_irq_enable();
 
index d6e2477feb180e3787b57758f3e0151eb4a3f0b6..6df88c7885c0e24bbbdfcad54682d52cbfb085fb 100644 (file)
 #define DstDI       (5<<1)     /* Destination is in ES:(E)DI */
 #define DstMem64    (6<<1)     /* 64bit memory operand */
 #define DstImmUByte (7<<1)     /* 8-bit unsigned immediate operand */
-#define DstMask     (7<<1)
+#define DstDX       (8<<1)     /* Destination is in DX register */
+#define DstMask     (0xf<<1)
 /* Source operand type. */
-#define SrcNone     (0<<4)     /* No source operand. */
-#define SrcReg      (1<<4)     /* Register operand. */
-#define SrcMem      (2<<4)     /* Memory operand. */
-#define SrcMem16    (3<<4)     /* Memory operand (16-bit). */
-#define SrcMem32    (4<<4)     /* Memory operand (32-bit). */
-#define SrcImm      (5<<4)     /* Immediate operand. */
-#define SrcImmByte  (6<<4)     /* 8-bit sign-extended immediate operand. */
-#define SrcOne      (7<<4)     /* Implied '1' */
-#define SrcImmUByte (8<<4)      /* 8-bit unsigned immediate operand. */
-#define SrcImmU     (9<<4)      /* Immediate operand, unsigned */
-#define SrcSI       (0xa<<4)   /* Source is in the DS:RSI */
-#define SrcImmFAddr (0xb<<4)   /* Source is immediate far address */
-#define SrcMemFAddr (0xc<<4)   /* Source is far address in memory */
-#define SrcAcc      (0xd<<4)   /* Source Accumulator */
-#define SrcImmU16   (0xe<<4)    /* Immediate operand, unsigned, 16 bits */
-#define SrcMask     (0xf<<4)
+#define SrcNone     (0<<5)     /* No source operand. */
+#define SrcReg      (1<<5)     /* Register operand. */
+#define SrcMem      (2<<5)     /* Memory operand. */
+#define SrcMem16    (3<<5)     /* Memory operand (16-bit). */
+#define SrcMem32    (4<<5)     /* Memory operand (32-bit). */
+#define SrcImm      (5<<5)     /* Immediate operand. */
+#define SrcImmByte  (6<<5)     /* 8-bit sign-extended immediate operand. */
+#define SrcOne      (7<<5)     /* Implied '1' */
+#define SrcImmUByte (8<<5)      /* 8-bit unsigned immediate operand. */
+#define SrcImmU     (9<<5)      /* Immediate operand, unsigned */
+#define SrcSI       (0xa<<5)   /* Source is in the DS:RSI */
+#define SrcImmFAddr (0xb<<5)   /* Source is immediate far address */
+#define SrcMemFAddr (0xc<<5)   /* Source is far address in memory */
+#define SrcAcc      (0xd<<5)   /* Source Accumulator */
+#define SrcImmU16   (0xe<<5)    /* Immediate operand, unsigned, 16 bits */
+#define SrcDX       (0xf<<5)   /* Source is in DX register */
+#define SrcMask     (0xf<<5)
 /* Generic ModRM decode. */
-#define ModRM       (1<<8)
+#define ModRM       (1<<9)
 /* Destination is only written; never read. */
-#define Mov         (1<<9)
-#define BitOp       (1<<10)
-#define MemAbs      (1<<11)      /* Memory operand is absolute displacement */
-#define String      (1<<12)     /* String instruction (rep capable) */
-#define Stack       (1<<13)     /* Stack instruction (push/pop) */
-#define GroupMask   (7<<14)     /* Opcode uses one of the group mechanisms */
-#define Group       (1<<14)     /* Bits 3:5 of modrm byte extend opcode */
-#define GroupDual   (2<<14)     /* Alternate decoding of mod == 3 */
-#define Prefix      (3<<14)     /* Instruction varies with 66/f2/f3 prefix */
-#define RMExt       (4<<14)     /* Opcode extension in ModRM r/m if mod == 3 */
-#define Sse         (1<<17)     /* SSE Vector instruction */
+#define Mov         (1<<10)
+#define BitOp       (1<<11)
+#define MemAbs      (1<<12)      /* Memory operand is absolute displacement */
+#define String      (1<<13)     /* String instruction (rep capable) */
+#define Stack       (1<<14)     /* Stack instruction (push/pop) */
+#define GroupMask   (7<<15)     /* Opcode uses one of the group mechanisms */
+#define Group       (1<<15)     /* Bits 3:5 of modrm byte extend opcode */
+#define GroupDual   (2<<15)     /* Alternate decoding of mod == 3 */
+#define Prefix      (3<<15)     /* Instruction varies with 66/f2/f3 prefix */
+#define RMExt       (4<<15)     /* Opcode extension in ModRM r/m if mod == 3 */
+#define Sse         (1<<18)     /* SSE Vector instruction */
 /* Misc flags */
 #define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
 #define VendorSpecific (1<<22) /* Vendor specific instruction */
@@ -3154,8 +3156,8 @@ static struct opcode opcode_table[256] = {
        I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
        I(SrcImmByte | Mov | Stack, em_push),
        I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
-       D2bvIP(DstDI | Mov | String, ins, check_perm_in), /* insb, insw/insd */
-       D2bvIP(SrcSI | ImplicitOps | String, outs, check_perm_out), /* outsb, outsw/outsd */
+       D2bvIP(DstDI | SrcDX | Mov | String, ins, check_perm_in), /* insb, insw/insd */
+       D2bvIP(SrcSI | DstDX | String, outs, check_perm_out), /* outsb, outsw/outsd */
        /* 0x70 - 0x7F */
        X16(D(SrcImmByte)),
        /* 0x80 - 0x87 */
@@ -3212,8 +3214,8 @@ static struct opcode opcode_table[256] = {
        /* 0xE8 - 0xEF */
        D(SrcImm | Stack), D(SrcImm | ImplicitOps),
        D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps),
-       D2bvIP(SrcNone | DstAcc,     in,  check_perm_in),
-       D2bvIP(SrcAcc | ImplicitOps, out, check_perm_out),
+       D2bvIP(SrcDX | DstAcc, in,  check_perm_in),
+       D2bvIP(SrcAcc | DstDX, out, check_perm_out),
        /* 0xF0 - 0xF7 */
        N, DI(ImplicitOps, icebp), N, N,
        DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
@@ -3613,6 +3615,12 @@ done_prefixes:
                memop.bytes = c->op_bytes + 2;
                goto srcmem_common;
                break;
+       case SrcDX:
+               c->src.type = OP_REG;
+               c->src.bytes = 2;
+               c->src.addr.reg = &c->regs[VCPU_REGS_RDX];
+               fetch_register_operand(&c->src);
+               break;
        }
 
        if (rc != X86EMUL_CONTINUE)
@@ -3682,6 +3690,12 @@ done_prefixes:
                c->dst.addr.mem.seg = VCPU_SREG_ES;
                c->dst.val = 0;
                break;
+       case DstDX:
+               c->dst.type = OP_REG;
+               c->dst.bytes = 2;
+               c->dst.addr.reg = &c->regs[VCPU_REGS_RDX];
+               fetch_register_operand(&c->dst);
+               break;
        case ImplicitOps:
                /* Special instructions do their own operand decoding. */
        default:
@@ -4027,7 +4041,6 @@ special_insn:
                break;
        case 0xec: /* in al,dx */
        case 0xed: /* in (e/r)ax,dx */
-               c->src.val = c->regs[VCPU_REGS_RDX];
        do_io_in:
                if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
                                     &c->dst.val))
@@ -4035,7 +4048,6 @@ special_insn:
                break;
        case 0xee: /* out dx,al */
        case 0xef: /* out dx,(e/r)ax */
-               c->dst.val = c->regs[VCPU_REGS_RDX];
        do_io_out:
                ops->pio_out_emulated(ctxt, c->src.bytes, c->dst.val,
                                      &c->src.val, 1);
index aa1169392b83eb44350ec5da13cd23bfc3477986..992da5ec5a64d69ddc3381d9e8508e4d6061d4ef 100644 (file)
@@ -8,7 +8,7 @@
 #include <linux/range.h>
 
 /* Check for already reserved areas */
-static bool __init check_with_memblock_reserved_size(u64 *addrp, u64 *sizep, u64 align)
+bool __init memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align)
 {
        struct memblock_region *r;
        u64 addr = *addrp, last;
@@ -59,7 +59,7 @@ u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align)
                if (addr >= ei_last)
                        continue;
                *sizep = ei_last - addr;
-               while (check_with_memblock_reserved_size(&addr, sizep, align))
+               while (memblock_x86_check_reserved_size(&addr, sizep, align))
                        ;
 
                if (*sizep)
index 0d3a4fa34560018c55a2619251bcb0db6546c4d1..474356b98ede32e647d4343c32a7e7893fe4beff 100644 (file)
@@ -310,14 +310,31 @@ void __init efi_reserve_boot_services(void)
 
        for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
                efi_memory_desc_t *md = p;
-               unsigned long long start = md->phys_addr;
-               unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
+               u64 start = md->phys_addr;
+               u64 size = md->num_pages << EFI_PAGE_SHIFT;
 
                if (md->type != EFI_BOOT_SERVICES_CODE &&
                    md->type != EFI_BOOT_SERVICES_DATA)
                        continue;
-
-               memblock_x86_reserve_range(start, start + size, "EFI Boot");
+               /* Only reserve where possible:
+                * - Not within any already allocated areas
+                * - Not over any memory area (really needed, if above?)
+                * - Not within any part of the kernel
+                * - Not the bios reserved area
+               */
+               if ((start+size >= virt_to_phys(_text)
+                               && start <= virt_to_phys(_end)) ||
+                       !e820_all_mapped(start, start+size, E820_RAM) ||
+                       memblock_x86_check_reserved_size(&start, &size,
+                                                       1<<EFI_PAGE_SHIFT)) {
+                       /* Could not reserve, skip it */
+                       md->num_pages = 0;
+                       memblock_dbg(PFX "Could not reserve boot range "
+                                       "[0x%010llx-0x%010llx]\n",
+                                               start, start+size-1);
+               } else
+                       memblock_x86_reserve_range(start, start+size,
+                                                       "EFI Boot");
        }
 }
 
@@ -334,6 +351,10 @@ static void __init efi_free_boot_services(void)
                    md->type != EFI_BOOT_SERVICES_DATA)
                        continue;
 
+               /* Could not reserve boot area */
+               if (!size)
+                       continue;
+
                free_bootmem_late(start, size);
        }
 }
index 8bff7e7c290b7d30ef805061115690e7267dc4a9..1b2b73ff0a6e52176ba4a08d324ade8785382bd4 100644 (file)
@@ -189,10 +189,10 @@ struct multicall_space __xen_mc_entry(size_t args)
        unsigned argidx = roundup(b->argidx, sizeof(u64));
 
        BUG_ON(preemptible());
-       BUG_ON(b->argidx > MC_ARGS);
+       BUG_ON(b->argidx >= MC_ARGS);
 
        if (b->mcidx == MC_BATCH ||
-           (argidx + args) > MC_ARGS) {
+           (argidx + args) >= MC_ARGS) {
                mc_stats_flush(b->mcidx == MC_BATCH ? FL_SLOTS : FL_ARGS);
                xen_mc_flush();
                argidx = roundup(b->argidx, sizeof(u64));
@@ -206,7 +206,7 @@ struct multicall_space __xen_mc_entry(size_t args)
        ret.args = &b->args[argidx];
        b->argidx = argidx + args;
 
-       BUG_ON(b->argidx > MC_ARGS);
+       BUG_ON(b->argidx >= MC_ARGS);
        return ret;
 }
 
@@ -216,7 +216,7 @@ struct multicall_space xen_mc_extend_args(unsigned long op, size_t size)
        struct multicall_space ret = { NULL, NULL };
 
        BUG_ON(preemptible());
-       BUG_ON(b->argidx > MC_ARGS);
+       BUG_ON(b->argidx >= MC_ARGS);
 
        if (b->mcidx == 0)
                return ret;
@@ -224,14 +224,14 @@ struct multicall_space xen_mc_extend_args(unsigned long op, size_t size)
        if (b->entries[b->mcidx - 1].op != op)
                return ret;
 
-       if ((b->argidx + size) > MC_ARGS)
+       if ((b->argidx + size) >= MC_ARGS)
                return ret;
 
        ret.mc = &b->entries[b->mcidx - 1];
        ret.args = &b->args[b->argidx];
        b->argidx += size;
 
-       BUG_ON(b->argidx > MC_ARGS);
+       BUG_ON(b->argidx >= MC_ARGS);
        return ret;
 }
 
index c898049dafd54d8ea9deed98885b02db10ce43e7..342eae9b0d3cf982762987dfc5efb9a59f9b7275 100644 (file)
@@ -21,7 +21,7 @@ static void cfq_dtor(struct io_context *ioc)
        if (!hlist_empty(&ioc->cic_list)) {
                struct cfq_io_context *cic;
 
-               cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
+               cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
                                                                cic_list);
                cic->dtor(ioc);
        }
@@ -57,7 +57,7 @@ static void cfq_exit(struct io_context *ioc)
        if (!hlist_empty(&ioc->cic_list)) {
                struct cfq_io_context *cic;
 
-               cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
+               cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
                                                                cic_list);
                cic->exit(ioc);
        }
index 7c52d6888924c53215579b1b663083be376dfa48..3c7b537bf9081f9f43b90c7cdcd9a95073f1c02b 100644 (file)
@@ -185,7 +185,7 @@ struct cfq_group {
        int nr_cfqq;
 
        /*
-        * Per group busy queus average. Useful for workload slice calc. We
+        * Per group busy queues average. Useful for workload slice calc. We
         * create the array for each prio class but at run time it is used
         * only for RT and BE class and slot for IDLE class remains unused.
         * This is primarily done to avoid confusion and a gcc warning.
@@ -369,16 +369,16 @@ CFQ_CFQQ_FNS(wait_busy);
 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
        blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
                        cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
-                       blkg_path(&(cfqq)->cfqg->blkg), ##args);
+                       blkg_path(&(cfqq)->cfqg->blkg), ##args)
 
 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)                         \
        blk_add_trace_msg((cfqd)->queue, "%s " fmt,                     \
-                               blkg_path(&(cfqg)->blkg), ##args);      \
+                               blkg_path(&(cfqg)->blkg), ##args)       \
 
 #else
 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
        blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
-#define cfq_log_cfqg(cfqd, cfqg, fmt, args...)         do {} while (0);
+#define cfq_log_cfqg(cfqd, cfqg, fmt, args...)         do {} while (0)
 #endif
 #define cfq_log(cfqd, fmt, args...)    \
        blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
@@ -3786,9 +3786,6 @@ new_queue:
        return 0;
 
 queue_fail:
-       if (cic)
-               put_io_context(cic->ioc);
-
        cfq_schedule_dispatch(cfqd);
        spin_unlock_irqrestore(q->queue_lock, flags);
        cfq_log(cfqd, "set_request fail");
index dfb6e9d3d759921419d8713fc7c48764ad943207..7f099d6e4e0bca601f26f83597e01ec185f03d54 100644 (file)
@@ -2802,10 +2802,11 @@ int ata_eh_reset(struct ata_link *link, int classify,
        }
 
        /*
-        * Some controllers can't be frozen very well and may set
-        * spuruious error conditions during reset.  Clear accumulated
-        * error information.  As reset is the final recovery action,
-        * nothing is lost by doing this.
+        * Some controllers can't be frozen very well and may set spurious
+        * error conditions during reset.  Clear accumulated error
+        * information and re-thaw the port if frozen.  As reset is the
+        * final recovery action and we cross check link onlineness against
+        * device classification later, no hotplug event is lost by this.
         */
        spin_lock_irqsave(link->ap->lock, flags);
        memset(&link->eh_info, 0, sizeof(link->eh_info));
@@ -2814,6 +2815,9 @@ int ata_eh_reset(struct ata_link *link, int classify,
        ap->pflags &= ~ATA_PFLAG_EH_PENDING;
        spin_unlock_irqrestore(link->ap->lock, flags);
 
+       if (ap->pflags & ATA_PFLAG_FROZEN)
+               ata_eh_thaw_port(ap);
+
        /*
         * Make sure onlineness and classification result correspond.
         * Hotplug could have happened during reset and some
index c0dd09df7be891a113e94bd2a53cc7a6de8bd1e5..eaa8a854af03f6d4adc902fece195e7aabb69fd6 100644 (file)
@@ -291,7 +291,7 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
 {
        struct pm_clk_notifier_block *clknb;
        struct device *dev = data;
-       char *con_id;
+       char **con_id;
        int error;
 
        dev_dbg(dev, "%s() %ld\n", __func__, action);
@@ -309,8 +309,8 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
 
                dev->pwr_domain = clknb->pwr_domain;
                if (clknb->con_ids[0]) {
-                       for (con_id = clknb->con_ids[0]; *con_id; con_id++)
-                               pm_runtime_clk_add(dev, con_id);
+                       for (con_id = clknb->con_ids; *con_id; con_id++)
+                               pm_runtime_clk_add(dev, *con_id);
                } else {
                        pm_runtime_clk_add(dev, NULL);
                }
@@ -380,7 +380,7 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
 {
        struct pm_clk_notifier_block *clknb;
        struct device *dev = data;
-       char *con_id;
+       char **con_id;
 
        dev_dbg(dev, "%s() %ld\n", __func__, action);
 
@@ -389,16 +389,16 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
        switch (action) {
        case BUS_NOTIFY_ADD_DEVICE:
                if (clknb->con_ids[0]) {
-                       for (con_id = clknb->con_ids[0]; *con_id; con_id++)
-                               enable_clock(dev, con_id);
+                       for (con_id = clknb->con_ids; *con_id; con_id++)
+                               enable_clock(dev, *con_id);
                } else {
                        enable_clock(dev, NULL);
                }
                break;
        case BUS_NOTIFY_DEL_DEVICE:
                if (clknb->con_ids[0]) {
-                       for (con_id = clknb->con_ids[0]; *con_id; con_id++)
-                               disable_clock(dev, con_id);
+                       for (con_id = clknb->con_ids; *con_id; con_id++)
+                               disable_clock(dev, *con_id);
                } else {
                        disable_clock(dev, NULL);
                }
index e6fc716aca4521cc7537e09a393b6213b0a3ea88..f533f3375e24751feaf4e065b920291c7abe149d 100644 (file)
@@ -192,7 +192,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
                        if (lo->xmit_timeout)
                                del_timer_sync(&ti);
                } else
-                       result = kernel_recvmsg(sock, &msg, &iov, 1, size, 0);
+                       result = kernel_recvmsg(sock, &msg, &iov, 1, size,
+                                               msg.msg_flags);
 
                if (signal_pending(current)) {
                        siginfo_t info;
@@ -753,9 +754,26 @@ static int __init nbd_init(void)
                return -ENOMEM;
 
        part_shift = 0;
-       if (max_part > 0)
+       if (max_part > 0) {
                part_shift = fls(max_part);
 
+               /*
+                * Adjust max_part according to part_shift as it is exported
+                * to user space so that user can know the max number of
+                * partition kernel should be able to manage.
+                *
+                * Note that -1 is required because partition 0 is reserved
+                * for the whole disk.
+                */
+               max_part = (1UL << part_shift) - 1;
+       }
+
+       if ((1UL << part_shift) > DISK_MAX_PARTS)
+               return -EINVAL;
+
+       if (nbds_max > 1UL << (MINORBITS - part_shift))
+               return -EINVAL;
+
        for (i = 0; i < nbds_max; i++) {
                struct gendisk *disk = alloc_disk(1 << part_shift);
                if (!disk)
index c73910cc28c919bfdcdc6f1972af1f1f95822138..5cf2993a8338c9e325fb6126d5a6ec3ca371f6e3 100644 (file)
@@ -809,11 +809,13 @@ static int __init xen_blkif_init(void)
  failed_init:
        kfree(blkbk->pending_reqs);
        kfree(blkbk->pending_grant_handles);
-       for (i = 0; i < mmap_pages; i++) {
-               if (blkbk->pending_pages[i])
-                       __free_page(blkbk->pending_pages[i]);
+       if (blkbk->pending_pages) {
+               for (i = 0; i < mmap_pages; i++) {
+                       if (blkbk->pending_pages[i])
+                               __free_page(blkbk->pending_pages[i]);
+               }
+               kfree(blkbk->pending_pages);
        }
-       kfree(blkbk->pending_pages);
        kfree(blkbk);
        blkbk = NULL;
        return rc;
index 34570823355be5715c35bc86eabfc33d7ea41b83..6cc0db1bf52252f2a981544116bf419d2a6a28e6 100644 (file)
@@ -357,14 +357,13 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
        }
 
        vbd->bdev = bdev;
-       vbd->size = vbd_sz(vbd);
-
        if (vbd->bdev->bd_disk == NULL) {
                DPRINTK("xen_vbd_create: device %08x doesn't exist.\n",
                        vbd->pdevice);
                xen_vbd_free(vbd);
                return -ENOENT;
        }
+       vbd->size = vbd_sz(vbd);
 
        if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom)
                vbd->type |= VDISK_CDROM;
index b3f01996318f04c0f432a38112226dbbeba0efcf..48ad2a7ab080ba9a58c425da7cc959dba0e1e3de 100644 (file)
@@ -355,29 +355,24 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty)
  *             flags        pointer to flags for data
  *             count        count of received data in bytes
  *     
- * Return Value:    Number of bytes received
+ * Return Value:    None
  */
-static unsigned int hci_uart_tty_receive(struct tty_struct *tty,
-               const u8 *data, char *flags, int count)
+static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *flags, int count)
 {
        struct hci_uart *hu = (void *)tty->disc_data;
-       int received;
 
        if (!hu || tty != hu->tty)
-               return -ENODEV;
+               return;
 
        if (!test_bit(HCI_UART_PROTO_SET, &hu->flags))
-               return -EINVAL;
+               return;
 
        spin_lock(&hu->rx_lock);
-       received = hu->proto->recv(hu, (void *) data, count);
-       if (received > 0)
-               hu->hdev->stat.byte_rx += received;
+       hu->proto->recv(hu, (void *) data, count);
+       hu->hdev->stat.byte_rx += count;
        spin_unlock(&hu->rx_lock);
 
        tty_unthrottle(tty);
-
-       return received;
 }
 
 static int hci_uart_register_dev(struct hci_uart *hu)
index 051474c65b783c5e9f3315de2fb3732516810027..34d6a1cab8def622d95c46b138b02a7d8a0eeeec 100644 (file)
@@ -163,11 +163,32 @@ static irqreturn_t hpet_interrupt(int irq, void *data)
         * This has the effect of treating non-periodic like periodic.
         */
        if ((devp->hd_flags & (HPET_IE | HPET_PERIODIC)) == HPET_IE) {
-               unsigned long m, t;
+               unsigned long m, t, mc, base, k;
+               struct hpet __iomem *hpet = devp->hd_hpet;
+               struct hpets *hpetp = devp->hd_hpets;
 
                t = devp->hd_ireqfreq;
                m = read_counter(&devp->hd_timer->hpet_compare);
-               write_counter(t + m, &devp->hd_timer->hpet_compare);
+               mc = read_counter(&hpet->hpet_mc);
+               /* The time for the next interrupt would logically be t + m,
+                * however, if we are very unlucky and the interrupt is delayed
+                * for longer than t then we will completely miss the next
+                * interrupt if we set t + m and an application will hang.
+                * Therefore we need to make a more complex computation assuming
+                * that there exists a k for which the following is true:
+                * k * t + base < mc + delta
+                * (k + 1) * t + base > mc + delta
+                * where t is the interval in hpet ticks for the given freq,
+                * base is the theoretical start value 0 < base < t,
+                * mc is the main counter value at the time of the interrupt,
+                * delta is the time it takes to write the a value to the
+                * comparator.
+                * k may then be computed as (mc - base + delta) / t .
+                */
+               base = mc % t;
+               k = (mc - base + hpetp->hp_delta) / t;
+               write_counter(t * (k + 1) + base,
+                             &devp->hd_timer->hpet_compare);
        }
 
        if (devp->hd_flags & HPET_SHARED_IRQ)
index 036e5865eb40005f026f571fd679a7db09bd4859..dc7c033ef587142ce080e33a2b2711a94ece1d89 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/ioport.h>
 #include <linux/io.h>
 #include <linux/clk.h>
-#include <linux/pm_runtime.h>
 #include <linux/irq.h>
 #include <linux/err.h>
 #include <linux/clocksource.h>
@@ -153,12 +152,10 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
 {
        int ret;
 
-       /* wake up device and enable clock */
-       pm_runtime_get_sync(&p->pdev->dev);
+       /* enable clock */
        ret = clk_enable(p->clk);
        if (ret) {
                dev_err(&p->pdev->dev, "cannot enable clock\n");
-               pm_runtime_put_sync(&p->pdev->dev);
                return ret;
        }
 
@@ -190,9 +187,8 @@ static void sh_cmt_disable(struct sh_cmt_priv *p)
        /* disable interrupts in CMT block */
        sh_cmt_write(p, CMCSR, 0);
 
-       /* stop clock and mark device as idle */
+       /* stop clock */
        clk_disable(p->clk);
-       pm_runtime_put_sync(&p->pdev->dev);
 }
 
 /* private flags */
@@ -664,7 +660,6 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev)
 
        if (p) {
                dev_info(&pdev->dev, "kept as earlytimer\n");
-               pm_runtime_enable(&pdev->dev);
                return 0;
        }
 
@@ -679,9 +674,6 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev)
                kfree(p);
                platform_set_drvdata(pdev, NULL);
        }
-
-       if (!is_early_platform_device(pdev))
-               pm_runtime_enable(&pdev->dev);
        return ret;
 }
 
index 17296288a2052ef757a5e22b6a0d3e52d37036ee..80813576861781194f235f52507480e1d44a92f7 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/delay.h>
 #include <linux/io.h>
 #include <linux/clk.h>
-#include <linux/pm_runtime.h>
 #include <linux/irq.h>
 #include <linux/err.h>
 #include <linux/clocksource.h>
@@ -110,12 +109,10 @@ static int sh_tmu_enable(struct sh_tmu_priv *p)
 {
        int ret;
 
-       /* wake up device and enable clock */
-       pm_runtime_get_sync(&p->pdev->dev);
+       /* enable clock */
        ret = clk_enable(p->clk);
        if (ret) {
                dev_err(&p->pdev->dev, "cannot enable clock\n");
-               pm_runtime_put_sync(&p->pdev->dev);
                return ret;
        }
 
@@ -144,9 +141,8 @@ static void sh_tmu_disable(struct sh_tmu_priv *p)
        /* disable interrupts in TMU block */
        sh_tmu_write(p, TCR, 0x0000);
 
-       /* stop clock and mark device as idle */
+       /* stop clock */
        clk_disable(p->clk);
-       pm_runtime_put_sync(&p->pdev->dev);
 }
 
 static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta,
@@ -415,7 +411,6 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev)
 
        if (p) {
                dev_info(&pdev->dev, "kept as earlytimer\n");
-               pm_runtime_enable(&pdev->dev);
                return 0;
        }
 
@@ -430,9 +425,6 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev)
                kfree(p);
                platform_set_drvdata(pdev, NULL);
        }
-
-       if (!is_early_platform_device(pdev))
-               pm_runtime_enable(&pdev->dev);
        return ret;
 }
 
index b60a4c263686ab56693c5df6993e084e8f9f7e29..faf7c521784874c0dbbce7a65b9b05d3ea379f2f 100644 (file)
@@ -298,11 +298,13 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
        old_index = stat->last_index;
        new_index = freq_table_get_index(stat, freq->new);
 
-       cpufreq_stats_update(freq->cpu);
-       if (old_index == new_index)
+       /* We can't do stat->time_in_state[-1]= .. */
+       if (old_index == -1 || new_index == -1)
                return 0;
 
-       if (old_index == -1 || new_index == -1)
+       cpufreq_stats_update(freq->cpu);
+
+       if (old_index == new_index)
                return 0;
 
        spin_lock(&cpufreq_stats_lock);
@@ -387,6 +389,7 @@ static void __exit cpufreq_stats_exit(void)
        unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
        for_each_online_cpu(cpu) {
                cpufreq_stats_free_table(cpu);
+               cpufreq_stats_free_sysfs(cpu);
        }
 }
 
index 83479b6fb9a14fc9ef9826c18274560951c45f19..bce576d7478ed41f9b69ac727cc5d143d850bb83 100644 (file)
@@ -1079,6 +1079,9 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
        }
 
        res = transition_fid_vid(data, fid, vid);
+       if (res)
+               return res;
+
        freqs.new = find_khz_freq_from_fid(data->currfid);
 
        for_each_cpu(i, data->available_cores) {
@@ -1101,7 +1104,8 @@ static int transition_frequency_pstate(struct powernow_k8_data *data,
        /* get MSR index for hardware pstate transition */
        pstate = index & HW_PSTATE_MASK;
        if (pstate > data->max_hw_pstate)
-               return 0;
+               return -EINVAL;
+
        freqs.old = find_khz_freq_from_pstate(data->powernow_table,
                        data->currpstate);
        freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
index 636e40925b165e544618692fce8640503ddd65a6..02833004420151fac42fdbb07e6cebd86d29df70 100644 (file)
@@ -343,7 +343,7 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
 
                dmae_set_dmars(sh_chan, cfg->mid_rid);
                dmae_set_chcr(sh_chan, cfg->chcr);
-       } else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) {
+       } else {
                dmae_init(sh_chan);
        }
 
@@ -1144,6 +1144,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
        /* platform data */
        shdev->pdata = pdata;
 
+       platform_set_drvdata(pdev, shdev);
+
        pm_runtime_enable(&pdev->dev);
        pm_runtime_get_sync(&pdev->dev);
 
@@ -1219,6 +1221,11 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
        } else {
                do {
                        for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
+                               if (irq_cnt >= SH_DMAC_MAX_CHANNELS) {
+                                       irq_cap = 1;
+                                       break;
+                               }
+
                                if ((errirq_res->flags & IORESOURCE_BITS) ==
                                    IORESOURCE_IRQ_SHAREABLE)
                                        chan_flag[irq_cnt] = IRQF_SHARED;
@@ -1228,15 +1235,11 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
                                        "Found IRQ %d for channel %d\n",
                                        i, irq_cnt);
                                chan_irq[irq_cnt++] = i;
-
-                               if (irq_cnt >= SH_DMAC_MAX_CHANNELS)
-                                       break;
                        }
 
-                       if (irq_cnt >= SH_DMAC_MAX_CHANNELS) {
-                               irq_cap = 1;
+                       if (irq_cnt >= SH_DMAC_MAX_CHANNELS)
                                break;
-                       }
+
                        chanirq_res = platform_get_resource(pdev,
                                                IORESOURCE_IRQ, ++irqres);
                } while (irq_cnt < pdata->channel_num && chanirq_res);
@@ -1256,7 +1259,6 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
 
        pm_runtime_put(&pdev->dev);
 
-       platform_set_drvdata(pdev, shdev);
        dma_async_device_register(&shdev->common);
 
        return err;
@@ -1278,6 +1280,8 @@ rst_err:
 
        if (dmars)
                iounmap(shdev->dmars);
+
+       platform_set_drvdata(pdev, NULL);
 emapdmars:
        iounmap(shdev->chan_reg);
        synchronize_rcu();
@@ -1316,6 +1320,8 @@ static int __exit sh_dmae_remove(struct platform_device *pdev)
                iounmap(shdev->dmars);
        iounmap(shdev->chan_reg);
 
+       platform_set_drvdata(pdev, NULL);
+
        synchronize_rcu();
        kfree(shdev);
 
index f032e446fc11bb8a0c943e519b0987c8cdd02d01..bfe723266fd89bb84726d88cf15c206c020ee707 100644 (file)
@@ -108,7 +108,9 @@ done:
  */
 unsigned long __init find_ibft_region(unsigned long *sizep)
 {
+#ifdef CONFIG_ACPI
        int i;
+#endif
        ibft_addr = NULL;
 
 #ifdef CONFIG_ACPI
index 4a7f6314345506c1b347c505ad15783c61b7f17b..2967002a9f820b2894dd1f8f1110a47a9694e250 100644 (file)
@@ -87,32 +87,20 @@ config GPIO_IT8761E
          Say yes here to support GPIO functionality of IT8761E super I/O chip.
 
 config GPIO_EXYNOS4
-       bool "Samsung Exynos4 GPIO library support"
-       default y if CPU_EXYNOS4210
-       depends on ARM
-       help
-         Say yes here to support Samsung Exynos4 series SoCs GPIO library
+       def_bool y
+       depends on CPU_EXYNOS4210
 
 config GPIO_PLAT_SAMSUNG
-       bool "Samsung SoCs GPIO library support"
-       default y if SAMSUNG_GPIOLIB_4BIT
-       depends on ARM
-       help
-         Say yes here to support Samsung SoCs GPIO library
+       def_bool y
+       depends on SAMSUNG_GPIOLIB_4BIT
 
 config GPIO_S5PC100
-       bool "Samsung S5PC100 GPIO library support"
-       default y if CPU_S5PC100
-       depends on ARM
-       help
-         Say yes here to support Samsung S5PC100 SoCs GPIO library
+       def_bool y
+       depends on CPU_S5PC100
 
 config GPIO_S5PV210
-       bool "Samsung S5PV210/S5PC110 GPIO library support"
-       default y if CPU_S5PV210
-       depends on ARM
-       help
-         Say yes here to support Samsung S5PV210/S5PC110 SoCs GPIO library
+       def_bool y
+       depends on CPU_S5PV210
 
 config GPIO_PL061
        bool "PrimeCell PL061 GPIO support"
index d54ca6adb660be5c92a320c829a4361492ef226e..9029835112e7e0989321a346f95d89c3da63087d 100644 (file)
 #include <plat/gpio-cfg.h>
 #include <plat/gpio-cfg-helpers.h>
 
+int s3c_gpio_setpull_exynos4(struct s3c_gpio_chip *chip,
+                               unsigned int off, s3c_gpio_pull_t pull)
+{
+       if (pull == S3C_GPIO_PULL_UP)
+               pull = 3;
+
+       return s3c_gpio_setpull_updown(chip, off, pull);
+}
+
+s3c_gpio_pull_t s3c_gpio_getpull_exynos4(struct s3c_gpio_chip *chip,
+                                               unsigned int off)
+{
+       s3c_gpio_pull_t pull;
+
+       pull = s3c_gpio_getpull_updown(chip, off);
+       if (pull == 3)
+               pull = S3C_GPIO_PULL_UP;
+
+       return pull;
+}
+
 static struct s3c_gpio_cfg gpio_cfg = {
        .set_config     = s3c_gpio_setcfg_s3c64xx_4bit,
-       .set_pull       = s3c_gpio_setpull_updown,
-       .get_pull       = s3c_gpio_getpull_updown,
+       .set_pull       = s3c_gpio_setpull_exynos4,
+       .get_pull       = s3c_gpio_getpull_exynos4,
 };
 
 static struct s3c_gpio_cfg gpio_cfg_noint = {
        .set_config     = s3c_gpio_setcfg_s3c64xx_4bit,
-       .set_pull       = s3c_gpio_setpull_updown,
-       .get_pull       = s3c_gpio_getpull_updown,
+       .set_pull       = s3c_gpio_setpull_exynos4,
+       .get_pull       = s3c_gpio_getpull_exynos4,
 };
 
 /*
index 4961ef9bc1533777d0f4bb36adbbae2e51f84ed6..2c212c732d76e5e0996fb90a354f33e383ab7297 100644 (file)
@@ -4,6 +4,7 @@
  * Copyright (C) 2008,2009 STMicroelectronics
  * Copyright (C) 2009 Alessandro Rubini <rubini@unipv.it>
  *   Rewritten based on work by Prafulla WADASKAR <prafulla.wadaskar@st.com>
+ * Copyright (C) 2011 Linus Walleij <linus.walleij@linaro.org>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -49,6 +50,7 @@ struct nmk_gpio_chip {
        u32 (*get_secondary_status)(unsigned int bank);
        void (*set_ioforce)(bool enable);
        spinlock_t lock;
+       bool sleepmode;
        /* Keep track of configured edges */
        u32 edge_rising;
        u32 edge_falling;
@@ -393,14 +395,25 @@ EXPORT_SYMBOL(nmk_config_pins_sleep);
  * @gpio: pin number
  * @mode: NMK_GPIO_SLPM_INPUT or NMK_GPIO_SLPM_NOCHANGE,
  *
- * Sets the sleep mode of a pin.  If @mode is NMK_GPIO_SLPM_INPUT, the pin is
- * changed to an input (with pullup/down enabled) in sleep and deep sleep.  If
- * @mode is NMK_GPIO_SLPM_NOCHANGE, the pin remains in the state it was
- * configured even when in sleep and deep sleep.
+ * This register is actually in the pinmux layer, not the GPIO block itself.
+ * The GPIO1B_SLPM register defines the GPIO mode when SLEEP/DEEP-SLEEP
+ * mode is entered (i.e. when signal IOFORCE is HIGH by the platform code).
+ * Each GPIO can be configured to be forced into GPIO mode when IOFORCE is
+ * HIGH, overriding the normal setting defined by GPIO_AFSELx registers.
+ * When IOFORCE returns LOW (by software, after SLEEP/DEEP-SLEEP exit),
+ * the GPIOs return to the normal setting defined by GPIO_AFSELx registers.
  *
- * On DB8500v2 onwards, this setting loses the previous meaning and instead
- * indicates if wakeup detection is enabled on the pin.  Note that
- * enable_irq_wake() will automatically enable wakeup detection.
+ * If @mode is NMK_GPIO_SLPM_INPUT, the corresponding GPIO is switched to GPIO
+ * mode when signal IOFORCE is HIGH (i.e. when SLEEP/DEEP-SLEEP mode is
+ * entered) regardless of the altfunction selected. Also wake-up detection is
+ * ENABLED.
+ *
+ * If @mode is NMK_GPIO_SLPM_NOCHANGE, the corresponding GPIO remains
+ * controlled by NMK_GPIO_DATC, NMK_GPIO_DATS, NMK_GPIO_DIR, NMK_GPIO_PDIS
+ * (for altfunction GPIO) or respective on-chip peripherals (for other
+ * altfuncs) when IOFORCE is HIGH. Also wake-up detection DISABLED.
+ *
+ * Note that enable_irq_wake() will automatically enable wakeup detection.
  */
 int nmk_gpio_set_slpm(int gpio, enum nmk_gpio_slpm mode)
 {
@@ -551,6 +564,12 @@ static void __nmk_gpio_irq_modify(struct nmk_gpio_chip *nmk_chip,
 static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip,
                                int gpio, bool on)
 {
+       if (nmk_chip->sleepmode) {
+               __nmk_gpio_set_slpm(nmk_chip, gpio - nmk_chip->chip.base,
+                                   on ? NMK_GPIO_SLPM_WAKEUP_ENABLE
+                                   : NMK_GPIO_SLPM_WAKEUP_DISABLE);
+       }
+
        __nmk_gpio_irq_modify(nmk_chip, gpio, WAKE, on);
 }
 
@@ -901,7 +920,7 @@ void nmk_gpio_wakeups_suspend(void)
                writel(chip->fwimsc & chip->real_wake,
                       chip->addr + NMK_GPIO_FWIMSC);
 
-               if (cpu_is_u8500v2()) {
+               if (chip->sleepmode) {
                        chip->slpm = readl(chip->addr + NMK_GPIO_SLPC);
 
                        /* 0 -> wakeup enable */
@@ -923,7 +942,7 @@ void nmk_gpio_wakeups_resume(void)
                writel(chip->rwimsc, chip->addr + NMK_GPIO_RWIMSC);
                writel(chip->fwimsc, chip->addr + NMK_GPIO_FWIMSC);
 
-               if (cpu_is_u8500v2())
+               if (chip->sleepmode)
                        writel(chip->slpm, chip->addr + NMK_GPIO_SLPC);
        }
 }
@@ -1010,6 +1029,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
        nmk_chip->secondary_parent_irq = secondary_irq;
        nmk_chip->get_secondary_status = pdata->get_secondary_status;
        nmk_chip->set_ioforce = pdata->set_ioforce;
+       nmk_chip->sleepmode = pdata->supports_sleepmode;
        spin_lock_init(&nmk_chip->lock);
 
        chip = &nmk_chip->chip;
@@ -1065,5 +1085,3 @@ core_initcall(nmk_gpio_init);
 MODULE_AUTHOR("Prafulla WADASKAR and Alessandro Rubini");
 MODULE_DESCRIPTION("Nomadik GPIO Driver");
 MODULE_LICENSE("GPL");
-
-
index 6c51191da567e1fba91e774573f08f087e90daee..35bebde23e835c7674aaaa5e6bbf0e998ab0fd32 100644 (file)
@@ -432,7 +432,6 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
 {
        void __iomem *base = bank->base;
        u32 gpio_bit = 1 << gpio;
-       u32 val;
 
        if (cpu_is_omap44xx()) {
                MOD_REG_BIT(OMAP4_GPIO_LEVELDETECT0, gpio_bit,
@@ -455,15 +454,8 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
        }
        if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
                if (cpu_is_omap44xx()) {
-                       if (trigger != 0)
-                               __raw_writel(1 << gpio, bank->base+
-                                               OMAP4_GPIO_IRQWAKEN0);
-                       else {
-                               val = __raw_readl(bank->base +
-                                                       OMAP4_GPIO_IRQWAKEN0);
-                               __raw_writel(val & (~(1 << gpio)), bank->base +
-                                                        OMAP4_GPIO_IRQWAKEN0);
-                       }
+                       MOD_REG_BIT(OMAP4_GPIO_IRQWAKEN0, gpio_bit,
+                               trigger != 0);
                } else {
                        /*
                         * GPIO wakeup request can only be generated on edge
@@ -477,8 +469,9 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
                                        + OMAP24XX_GPIO_CLEARWKUENA);
                }
        }
-       /* This part needs to be executed always for OMAP34xx */
-       if (cpu_is_omap34xx() || (bank->non_wakeup_gpios & gpio_bit)) {
+       /* This part needs to be executed always for OMAP{34xx, 44xx} */
+       if (cpu_is_omap34xx() || cpu_is_omap44xx() ||
+                       (bank->non_wakeup_gpios & gpio_bit)) {
                /*
                 * Log the edge gpio and manually trigger the IRQ
                 * after resume if the input level changes
@@ -1134,8 +1127,11 @@ static void gpio_irq_shutdown(struct irq_data *d)
 {
        unsigned int gpio = d->irq - IH_GPIO_BASE;
        struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
+       unsigned long flags;
 
+       spin_lock_irqsave(&bank->lock, flags);
        _reset_gpio(bank, gpio);
+       spin_unlock_irqrestore(&bank->lock, flags);
 }
 
 static void gpio_ack_irq(struct irq_data *d)
@@ -1150,9 +1146,12 @@ static void gpio_mask_irq(struct irq_data *d)
 {
        unsigned int gpio = d->irq - IH_GPIO_BASE;
        struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
+       unsigned long flags;
 
+       spin_lock_irqsave(&bank->lock, flags);
        _set_gpio_irqenable(bank, gpio, 0);
        _set_gpio_triggering(bank, get_gpio_index(gpio), IRQ_TYPE_NONE);
+       spin_unlock_irqrestore(&bank->lock, flags);
 }
 
 static void gpio_unmask_irq(struct irq_data *d)
@@ -1161,7 +1160,9 @@ static void gpio_unmask_irq(struct irq_data *d)
        struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
        unsigned int irq_mask = 1 << get_gpio_index(gpio);
        u32 trigger = irqd_get_trigger_type(d);
+       unsigned long flags;
 
+       spin_lock_irqsave(&bank->lock, flags);
        if (trigger)
                _set_gpio_triggering(bank, get_gpio_index(gpio), trigger);
 
@@ -1173,6 +1174,7 @@ static void gpio_unmask_irq(struct irq_data *d)
        }
 
        _set_gpio_irqenable(bank, gpio, 1);
+       spin_unlock_irqrestore(&bank->lock, flags);
 }
 
 static struct irq_chip gpio_irq_chip = {
@@ -1524,7 +1526,7 @@ static void omap_gpio_mod_init(struct gpio_bank *bank, int id)
        }
 }
 
-static void __init omap_gpio_chip_init(struct gpio_bank *bank)
+static void __devinit omap_gpio_chip_init(struct gpio_bank *bank)
 {
        int j;
        static int gpio;
index 3e257a50bf56f447563318d86bff3f6b2650c517..61e1ef90d4e5190cc9ca515f5f043b55a0465c5f 100644 (file)
@@ -46,10 +46,11 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
        list_for_each_entry(entry, &dev->maplist, head) {
                /*
                 * Because the kernel-userspace ABI is fixed at a 32-bit offset
-                * while PCI resources may live above that, we ignore the map
-                * offset for maps of type _DRM_FRAMEBUFFER or _DRM_REGISTERS.
-                * It is assumed that each driver will have only one resource of
-                * each type.
+                * while PCI resources may live above that, we only compare the
+                * lower 32 bits of the map offset for maps of type
+                * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
+                * It is assumed that if a driver have more than one resource
+                * of each type, the lower 32 bits are different.
                 */
                if (!entry->map ||
                    map->type != entry->map->type ||
@@ -59,9 +60,12 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
                case _DRM_SHM:
                        if (map->flags != _DRM_CONTAINS_LOCK)
                                break;
+                       return entry;
                case _DRM_REGISTERS:
                case _DRM_FRAME_BUFFER:
-                       return entry;
+                       if ((entry->map->offset & 0xffffffff) ==
+                           (map->offset & 0xffffffff))
+                               return entry;
                default: /* Make gcc happy */
                        ;
                }
@@ -182,9 +186,6 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
                        kfree(map);
                        return -EINVAL;
                }
-#endif
-#ifdef __alpha__
-               map->offset += dev->hose->mem_space->start;
 #endif
                /* Some drivers preinitialize some maps, without the X Server
                 * needing to be aware of it.  Therefore, we just return success
index 872747c5a544a08ffb57c1e0b55578b9d3b3596f..21058e6ad2b80ccbc10be90795402701270d1585 100644 (file)
@@ -1113,7 +1113,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
        if (card_res->count_fbs >= fb_count) {
                copied = 0;
                fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
-               list_for_each_entry(fb, &file_priv->fbs, head) {
+               list_for_each_entry(fb, &file_priv->fbs, filp_head) {
                        if (put_user(fb->base.id, fb_id + copied)) {
                                ret = -EFAULT;
                                goto out;
index 0a9357c66ff8703b76123487c8ef8d05b373c136..09292193dafe0b28466e37061acc7bee25e348f5 100644 (file)
@@ -184,9 +184,9 @@ drm_edid_block_valid(u8 *raw_edid)
 
 bad:
        if (raw_edid) {
-               DRM_ERROR("Raw EDID:\n");
+               printk(KERN_ERR "Raw EDID:\n");
                print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH);
-               printk("\n");
+               printk(KERN_ERR "\n");
        }
        return 0;
 }
@@ -258,6 +258,17 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
        return ret == 2 ? 0 : -1;
 }
 
+static bool drm_edid_is_zero(u8 *in_edid, int length)
+{
+       int i;
+       u32 *raw_edid = (u32 *)in_edid;
+
+       for (i = 0; i < length / 4; i++)
+               if (*(raw_edid + i) != 0)
+                       return false;
+       return true;
+}
+
 static u8 *
 drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
 {
@@ -273,6 +284,10 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
                        goto out;
                if (drm_edid_block_valid(block))
                        break;
+               if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
+                       connector->null_edid_counter++;
+                       goto carp;
+               }
        }
        if (i == 4)
                goto carp;
index d61d185cf040499232d508d4daa66e2f85113fdf..4a058c7af6c096f6dd69dd533701a9adb3091b09 100644 (file)
@@ -28,6 +28,7 @@
  * IN THE SOFTWARE.
  */
 #include <linux/compat.h>
+#include <linux/ratelimit.h>
 
 #include "drmP.h"
 #include "drm_core.h"
@@ -253,10 +254,10 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd,
                return -EFAULT;
 
        m32.handle = (unsigned long)handle;
-       if (m32.handle != (unsigned long)handle && printk_ratelimit())
-               printk(KERN_ERR "compat_drm_addmap truncated handle"
-                      " %p for type %d offset %x\n",
-                      handle, m32.type, m32.offset);
+       if (m32.handle != (unsigned long)handle)
+               printk_ratelimited(KERN_ERR "compat_drm_addmap truncated handle"
+                                  " %p for type %d offset %x\n",
+                                  handle, m32.type, m32.offset);
 
        if (copy_to_user(argp, &m32, sizeof(m32)))
                return -EFAULT;
index e1aee4f6a7c69dacdc59ff2a057340ee914bc4b5..b6a19cb07cafc3591f2ff946532851f63a14270d 100644 (file)
@@ -251,7 +251,7 @@ err:
 }
 
 
-int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
+static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
 {
        if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
            (p->busnum & 0xff) != dev->pdev->bus->number ||
@@ -292,6 +292,7 @@ static struct drm_bus drm_pci_bus = {
        .get_name = drm_pci_get_name,
        .set_busid = drm_pci_set_busid,
        .set_unique = drm_pci_set_unique,
+       .irq_by_busid = drm_pci_irq_by_busid,
        .agp_init = drm_pci_agp_init,
 };
 
index 2c3fcbdfd8ff64f8c5a53ff35884f59ce51bd621..5db96d45fc71677fcf0b6432e0f32dfd22b774d4 100644 (file)
@@ -526,7 +526,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
 static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
 {
 #ifdef __alpha__
-       return dev->hose->dense_mem_base - dev->hose->mem_space->start;
+       return dev->hose->dense_mem_base;
 #else
        return 0;
 #endif
index 51c2257b11e6d7cfb990b6a667b8fa00e59dd92e..4d46441cbe2d830de73ffacdb5821dbdd11f6340 100644 (file)
@@ -776,7 +776,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
        seq_printf(m, "  INSTPM: 0x%08x\n", error->instpm);
        seq_printf(m, "  seqno: 0x%08x\n", error->seqno);
 
-       for (i = 0; i < 16; i++)
+       for (i = 0; i < dev_priv->num_fence_regs; i++)
                seq_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
 
        if (error->active_bo)
index ee660355ae6839839713ae3ecca369cf60f81a6d..f63ee162f1245dcffc5844c6c401df7ce9b885c8 100644 (file)
@@ -716,6 +716,7 @@ typedef struct drm_i915_private {
        struct intel_fbdev *fbdev;
 
        struct drm_property *broadcast_rgb_property;
+       struct drm_property *force_audio_property;
 
        atomic_t forcewake_count;
 } drm_i915_private_t;
@@ -909,13 +910,6 @@ struct drm_i915_file_private {
        } mm;
 };
 
-enum intel_chip_family {
-       CHIP_I8XX = 0x01,
-       CHIP_I9XX = 0x02,
-       CHIP_I915 = 0x04,
-       CHIP_I965 = 0x08,
-};
-
 #define INTEL_INFO(dev)        (((struct drm_i915_private *) (dev)->dev_private)->info)
 
 #define IS_I830(dev)           ((dev)->pci_device == 0x3577)
index 0b2e167d2bce856c9b9ef55ea73e22174abf7d2f..94c84d7441007f029ebad7b7d87eb158de5f971a 100644 (file)
@@ -354,7 +354,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev,
                 * page_offset = offset within page
                 * page_length = bytes to copy for this page
                 */
-               page_offset = offset & (PAGE_SIZE-1);
+               page_offset = offset_in_page(offset);
                page_length = remain;
                if ((page_offset + remain) > PAGE_SIZE)
                        page_length = PAGE_SIZE - page_offset;
@@ -453,9 +453,9 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
                 * data_page_offset = offset with data_page_index page.
                 * page_length = bytes to copy for this page
                 */
-               shmem_page_offset = offset & ~PAGE_MASK;
+               shmem_page_offset = offset_in_page(offset);
                data_page_index = data_ptr / PAGE_SIZE - first_data_page;
-               data_page_offset = data_ptr & ~PAGE_MASK;
+               data_page_offset = offset_in_page(data_ptr);
 
                page_length = remain;
                if ((shmem_page_offset + page_length) > PAGE_SIZE)
@@ -465,8 +465,10 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
 
                page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
                                           GFP_HIGHUSER | __GFP_RECLAIMABLE);
-               if (IS_ERR(page))
-                       return PTR_ERR(page);
+               if (IS_ERR(page)) {
+                       ret = PTR_ERR(page);
+                       goto out;
+               }
 
                if (do_bit17_swizzling) {
                        slow_shmem_bit17_copy(page,
@@ -638,8 +640,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
                 * page_offset = offset within page
                 * page_length = bytes to copy for this page
                 */
-               page_base = (offset & ~(PAGE_SIZE-1));
-               page_offset = offset & (PAGE_SIZE-1);
+               page_base = offset & PAGE_MASK;
+               page_offset = offset_in_page(offset);
                page_length = remain;
                if ((page_offset + remain) > PAGE_SIZE)
                        page_length = PAGE_SIZE - page_offset;
@@ -650,7 +652,6 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
                 */
                if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
                                    page_offset, user_data, page_length))
-
                        return -EFAULT;
 
                remain -= page_length;
@@ -730,9 +731,9 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev,
                 * page_length = bytes to copy for this page
                 */
                gtt_page_base = offset & PAGE_MASK;
-               gtt_page_offset = offset & ~PAGE_MASK;
+               gtt_page_offset = offset_in_page(offset);
                data_page_index = data_ptr / PAGE_SIZE - first_data_page;
-               data_page_offset = data_ptr & ~PAGE_MASK;
+               data_page_offset = offset_in_page(data_ptr);
 
                page_length = remain;
                if ((gtt_page_offset + page_length) > PAGE_SIZE)
@@ -791,7 +792,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev,
                 * page_offset = offset within page
                 * page_length = bytes to copy for this page
                 */
-               page_offset = offset & (PAGE_SIZE-1);
+               page_offset = offset_in_page(offset);
                page_length = remain;
                if ((page_offset + remain) > PAGE_SIZE)
                        page_length = PAGE_SIZE - page_offset;
@@ -896,9 +897,9 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
                 * data_page_offset = offset with data_page_index page.
                 * page_length = bytes to copy for this page
                 */
-               shmem_page_offset = offset & ~PAGE_MASK;
+               shmem_page_offset = offset_in_page(offset);
                data_page_index = data_ptr / PAGE_SIZE - first_data_page;
-               data_page_offset = data_ptr & ~PAGE_MASK;
+               data_page_offset = offset_in_page(data_ptr);
 
                page_length = remain;
                if ((shmem_page_offset + page_length) > PAGE_SIZE)
@@ -1450,8 +1451,9 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
         * edge of an even tile row (where tile rows are counted as if the bo is
         * placed in a fenced gtt region).
         */
-       if (IS_GEN2(dev) ||
-           (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
+       if (IS_GEN2(dev))
+               tile_height = 16;
+       else if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
                tile_height = 32;
        else
                tile_height = 8;
index b79619a7b78880f9823ba87de0ec888608630515..9e34a1abeb61b8d43d49553d5fb2299a4429a026 100644 (file)
@@ -517,7 +517,7 @@ irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
        if (de_iir & DE_PIPEA_VBLANK_IVB)
                drm_handle_vblank(dev, 0);
 
-       if (de_iir & DE_PIPEB_VBLANK_IVB);
+       if (de_iir & DE_PIPEB_VBLANK_IVB)
                drm_handle_vblank(dev, 1);
 
        /* check event from PCH */
@@ -1740,6 +1740,16 @@ void ironlake_irq_preinstall(struct drm_device *dev)
                INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
 
        I915_WRITE(HWSTAM, 0xeffe);
+       if (IS_GEN6(dev)) {
+               /* Workaround stalls observed on Sandy Bridge GPUs by
+                * making the blitter command streamer generate a
+                * write to the Hardware Status Page for
+                * MI_USER_INTERRUPT.  This appears to serialize the
+                * previous seqno write out before the interrupt
+                * happens.
+                */
+               I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT);
+       }
 
        /* XXX hotplug from PCH */
 
index e93f93cc7e78725b4fbe52ba4e5c0b2f522a3768..0979d8877880d11acacd692de840660e6e60022c 100644 (file)
@@ -288,6 +288,8 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
                 * This may be a DVI-I connector with a shared DDC
                 * link between analog and digital outputs, so we
                 * have to check the EDID input spec of the attached device.
+                *
+                * On the other hand, what should we do if it is a broken EDID?
                 */
                if (edid != NULL) {
                        is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
@@ -298,6 +300,8 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
                if (!is_digital) {
                        DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
                        return true;
+               } else {
+                       DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
                }
        }
 
index f553ddfdc16819fa343b4780c27de14fa37d1337..81a9059b6a94ea0278bd691342294f4c0893cf21 100644 (file)
@@ -3983,54 +3983,6 @@ static void i830_update_wm(struct drm_device *dev)
 #define ILK_LP0_PLANE_LATENCY          700
 #define ILK_LP0_CURSOR_LATENCY         1300
 
-static bool ironlake_compute_wm0(struct drm_device *dev,
-                                int pipe,
-                                const struct intel_watermark_params *display,
-                                int display_latency_ns,
-                                const struct intel_watermark_params *cursor,
-                                int cursor_latency_ns,
-                                int *plane_wm,
-                                int *cursor_wm)
-{
-       struct drm_crtc *crtc;
-       int htotal, hdisplay, clock, pixel_size;
-       int line_time_us, line_count;
-       int entries, tlb_miss;
-
-       crtc = intel_get_crtc_for_pipe(dev, pipe);
-       if (crtc->fb == NULL || !crtc->enabled)
-               return false;
-
-       htotal = crtc->mode.htotal;
-       hdisplay = crtc->mode.hdisplay;
-       clock = crtc->mode.clock;
-       pixel_size = crtc->fb->bits_per_pixel / 8;
-
-       /* Use the small buffer method to calculate plane watermark */
-       entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
-       tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
-       if (tlb_miss > 0)
-               entries += tlb_miss;
-       entries = DIV_ROUND_UP(entries, display->cacheline_size);
-       *plane_wm = entries + display->guard_size;
-       if (*plane_wm > (int)display->max_wm)
-               *plane_wm = display->max_wm;
-
-       /* Use the large buffer method to calculate cursor watermark */
-       line_time_us = ((htotal * 1000) / clock);
-       line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
-       entries = line_count * 64 * pixel_size;
-       tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
-       if (tlb_miss > 0)
-               entries += tlb_miss;
-       entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
-       *cursor_wm = entries + cursor->guard_size;
-       if (*cursor_wm > (int)cursor->max_wm)
-               *cursor_wm = (int)cursor->max_wm;
-
-       return true;
-}
-
 /*
  * Check the wm result.
  *
@@ -4139,12 +4091,12 @@ static void ironlake_update_wm(struct drm_device *dev)
        unsigned int enabled;
 
        enabled = 0;
-       if (ironlake_compute_wm0(dev, 0,
-                                &ironlake_display_wm_info,
-                                ILK_LP0_PLANE_LATENCY,
-                                &ironlake_cursor_wm_info,
-                                ILK_LP0_CURSOR_LATENCY,
-                                &plane_wm, &cursor_wm)) {
+       if (g4x_compute_wm0(dev, 0,
+                           &ironlake_display_wm_info,
+                           ILK_LP0_PLANE_LATENCY,
+                           &ironlake_cursor_wm_info,
+                           ILK_LP0_CURSOR_LATENCY,
+                           &plane_wm, &cursor_wm)) {
                I915_WRITE(WM0_PIPEA_ILK,
                           (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
                DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
@@ -4153,12 +4105,12 @@ static void ironlake_update_wm(struct drm_device *dev)
                enabled |= 1;
        }
 
-       if (ironlake_compute_wm0(dev, 1,
-                                &ironlake_display_wm_info,
-                                ILK_LP0_PLANE_LATENCY,
-                                &ironlake_cursor_wm_info,
-                                ILK_LP0_CURSOR_LATENCY,
-                                &plane_wm, &cursor_wm)) {
+       if (g4x_compute_wm0(dev, 1,
+                           &ironlake_display_wm_info,
+                           ILK_LP0_PLANE_LATENCY,
+                           &ironlake_cursor_wm_info,
+                           ILK_LP0_CURSOR_LATENCY,
+                           &plane_wm, &cursor_wm)) {
                I915_WRITE(WM0_PIPEB_ILK,
                           (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
                DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
@@ -4223,10 +4175,10 @@ static void sandybridge_update_wm(struct drm_device *dev)
        unsigned int enabled;
 
        enabled = 0;
-       if (ironlake_compute_wm0(dev, 0,
-                                &sandybridge_display_wm_info, latency,
-                                &sandybridge_cursor_wm_info, latency,
-                                &plane_wm, &cursor_wm)) {
+       if (g4x_compute_wm0(dev, 0,
+                           &sandybridge_display_wm_info, latency,
+                           &sandybridge_cursor_wm_info, latency,
+                           &plane_wm, &cursor_wm)) {
                I915_WRITE(WM0_PIPEA_ILK,
                           (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
                DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
@@ -4235,10 +4187,10 @@ static void sandybridge_update_wm(struct drm_device *dev)
                enabled |= 1;
        }
 
-       if (ironlake_compute_wm0(dev, 1,
-                                &sandybridge_display_wm_info, latency,
-                                &sandybridge_cursor_wm_info, latency,
-                                &plane_wm, &cursor_wm)) {
+       if (g4x_compute_wm0(dev, 1,
+                           &sandybridge_display_wm_info, latency,
+                           &sandybridge_cursor_wm_info, latency,
+                           &plane_wm, &cursor_wm)) {
                I915_WRITE(WM0_PIPEB_ILK,
                           (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
                DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
@@ -7675,6 +7627,7 @@ static void intel_init_display(struct drm_device *dev)
                        dev_priv->display.update_wm = NULL;
                } else
                        dev_priv->display.update_wm = pineview_update_wm;
+               dev_priv->display.init_clock_gating = gen3_init_clock_gating;
        } else if (IS_G4X(dev)) {
                dev_priv->display.update_wm = g4x_update_wm;
                dev_priv->display.init_clock_gating = g4x_init_clock_gating;
index a4d80314e7f8cf5edb13bce97328a60f1f26f6ce..391b55f1cc7496e2e313d77332fafb35ecd4aa15 100644 (file)
@@ -59,8 +59,6 @@ struct intel_dp {
        bool is_pch_edp;
        uint8_t train_set[4];
        uint8_t link_status[DP_LINK_STATUS_SIZE];
-
-       struct drm_property *force_audio_property;
 };
 
 /**
@@ -1702,7 +1700,7 @@ intel_dp_set_property(struct drm_connector *connector,
        if (ret)
                return ret;
 
-       if (property == intel_dp->force_audio_property) {
+       if (property == dev_priv->force_audio_property) {
                int i = val;
                bool has_audio;
 
@@ -1841,16 +1839,7 @@ bool intel_dpd_is_edp(struct drm_device *dev)
 static void
 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
 {
-       struct drm_device *dev = connector->dev;
-
-       intel_dp->force_audio_property =
-               drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
-       if (intel_dp->force_audio_property) {
-               intel_dp->force_audio_property->values[0] = -1;
-               intel_dp->force_audio_property->values[1] = 1;
-               drm_connector_attach_property(connector, intel_dp->force_audio_property, 0);
-       }
-
+       intel_attach_force_audio_property(connector);
        intel_attach_broadcast_rgb_property(connector);
 }
 
index 831d7a4a0d18ce4eb52c86c76cacde8ac804f22e..9ffa61eb4d7efab156819980452e22e5507c3c76 100644 (file)
@@ -236,6 +236,7 @@ struct intel_unpin_work {
 int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
 extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
 
+extern void intel_attach_force_audio_property(struct drm_connector *connector);
 extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
 
 extern void intel_crt_init(struct drm_device *dev);
index f289b86429762e7b644e3b454c7f8b1a0d345987..aa0a8e83142e1c8ee5562d0884ad92c8656b30ed 100644 (file)
@@ -45,7 +45,6 @@ struct intel_hdmi {
        bool has_hdmi_sink;
        bool has_audio;
        int force_audio;
-       struct drm_property *force_audio_property;
 };
 
 static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
@@ -194,7 +193,7 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector,
        if (mode->clock > 165000)
                return MODE_CLOCK_HIGH;
        if (mode->clock < 20000)
-               return MODE_CLOCK_HIGH;
+               return MODE_CLOCK_LOW;
 
        if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
                return MODE_NO_DBLESCAN;
@@ -287,7 +286,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
        if (ret)
                return ret;
 
-       if (property == intel_hdmi->force_audio_property) {
+       if (property == dev_priv->force_audio_property) {
                int i = val;
                bool has_audio;
 
@@ -365,16 +364,7 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
 static void
 intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
 {
-       struct drm_device *dev = connector->dev;
-
-       intel_hdmi->force_audio_property =
-               drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
-       if (intel_hdmi->force_audio_property) {
-               intel_hdmi->force_audio_property->values[0] = -1;
-               intel_hdmi->force_audio_property->values[1] = 1;
-               drm_connector_attach_property(connector, intel_hdmi->force_audio_property, 0);
-       }
-
+       intel_attach_force_audio_property(connector);
        intel_attach_broadcast_rgb_property(connector);
 }
 
index d3b903bce7c5b3a25845c570aaf21f064d416224..d98cee60b602650063225ce9fbbf4f01220de562 100644 (file)
@@ -401,8 +401,7 @@ int intel_setup_gmbus(struct drm_device *dev)
                bus->reg0 = i | GMBUS_RATE_100KHZ;
 
                /* XXX force bit banging until GMBUS is fully debugged */
-               if (IS_GEN2(dev))
-                       bus->force_bit = intel_gpio_create(dev_priv, i);
+               bus->force_bit = intel_gpio_create(dev_priv, i);
        }
 
        intel_i2c_reset(dev_priv->dev);
index 67cb076d271b5eb00a4c87a9022f0957f3ff569e..b28f7bd9f88a1fca15fdb0ade73cb724658d1cf8 100644 (file)
@@ -727,6 +727,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "U800"),
                },
        },
+       {
+               .callback = intel_no_lvds_dmi_callback,
+               .ident = "Asus EeeBox PC EB1007",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
+               },
+       },
 
        { }     /* terminating entry */
 };
index 9034dd8f33c75646eedceeba9d010e26f198ae5c..3b26a3ba02dd349c9715ad16768a1a06a52b7bdb 100644 (file)
@@ -81,6 +81,36 @@ int intel_ddc_get_modes(struct drm_connector *connector,
        return ret;
 }
 
+static const char *force_audio_names[] = {
+       "off",
+       "auto",
+       "on",
+};
+
+void
+intel_attach_force_audio_property(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_property *prop;
+       int i;
+
+       prop = dev_priv->force_audio_property;
+       if (prop == NULL) {
+               prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+                                          "audio",
+                                          ARRAY_SIZE(force_audio_names));
+               if (prop == NULL)
+                       return;
+
+               for (i = 0; i < ARRAY_SIZE(force_audio_names); i++)
+                       drm_property_add_enum(prop, i, i-1, force_audio_names[i]);
+
+               dev_priv->force_audio_property = prop;
+       }
+       drm_connector_attach_property(connector, prop, 0);
+}
+
 static const char *broadcast_rgb_names[] = {
        "Full",
        "Limited 16:235",
index 754086f83941148027633e001988ebdbef6ecda1..30fe554d8936a8cd965b083a0de9064f44865db6 100644 (file)
@@ -148,8 +148,6 @@ struct intel_sdvo_connector {
        int   format_supported_num;
        struct drm_property *tv_format;
 
-       struct drm_property *force_audio_property;
-
        /* add the property for the SDVO-TV */
        struct drm_property *left;
        struct drm_property *right;
@@ -1712,7 +1710,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
        if (ret)
                return ret;
 
-       if (property == intel_sdvo_connector->force_audio_property) {
+       if (property == dev_priv->force_audio_property) {
                int i = val;
                bool has_audio;
 
@@ -2037,15 +2035,7 @@ intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector)
 {
        struct drm_device *dev = connector->base.base.dev;
 
-       connector->force_audio_property =
-               drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
-       if (connector->force_audio_property) {
-               connector->force_audio_property->values[0] = -1;
-               connector->force_audio_property->values[1] = 1;
-               drm_connector_attach_property(&connector->base.base,
-                                             connector->force_audio_property, 0);
-       }
-
+       intel_attach_force_audio_property(&connector->base.base);
        if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
                intel_attach_broadcast_rgb_property(&connector->base.base);
 }
index 1084fa4d261b7c9d6638d965a5a4c401c9949720..54558a01969ae02d6875c99fafb605f27a717ce0 100644 (file)
@@ -195,29 +195,10 @@ extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
 
 #define mga_flush_write_combine()      DRM_WRITEMEMORYBARRIER()
 
-#if defined(__linux__) && defined(__alpha__)
-#define MGA_BASE(reg)          ((unsigned long)(dev_priv->mmio->handle))
-#define MGA_ADDR(reg)          (MGA_BASE(reg) + reg)
-
-#define MGA_DEREF(reg)         (*(volatile u32 *)MGA_ADDR(reg))
-#define MGA_DEREF8(reg)                (*(volatile u8 *)MGA_ADDR(reg))
-
-#define MGA_READ(reg)          (_MGA_READ((u32 *)MGA_ADDR(reg)))
-#define MGA_READ8(reg)         (_MGA_READ((u8 *)MGA_ADDR(reg)))
-#define MGA_WRITE(reg, val)    do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF(reg) = val; } while (0)
-#define MGA_WRITE8(reg, val)   do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF8(reg) = val; } while (0)
-
-static inline u32 _MGA_READ(u32 *addr)
-{
-       DRM_MEMORYBARRIER();
-       return *(volatile u32 *)addr;
-}
-#else
 #define MGA_READ8(reg)         DRM_READ8(dev_priv->mmio, (reg))
 #define MGA_READ(reg)          DRM_READ32(dev_priv->mmio, (reg))
 #define MGA_WRITE8(reg, val)   DRM_WRITE8(dev_priv->mmio, (reg), (val))
 #define MGA_WRITE(reg, val)    DRM_WRITE32(dev_priv->mmio, (reg), (val))
-#endif
 
 #define DWGREG0                0x1c00
 #define DWGREG0_END    0x1dff
index 053edf9d2f67db4c6ed86536ea08f89d8c5b8b51..ba896e54b799bfd43b92291358234197c4cd56b3 100644 (file)
@@ -900,6 +900,7 @@ nv_save_state_ext(struct drm_device *dev, int head,
        }
        /* NV11 and NV20 don't have this, they stop at 0x52. */
        if (nv_gf4_disp_arch(dev)) {
+               rd_cio_state(dev, head, regp, NV_CIO_CRE_42);
                rd_cio_state(dev, head, regp, NV_CIO_CRE_53);
                rd_cio_state(dev, head, regp, NV_CIO_CRE_54);
 
@@ -1003,6 +1004,7 @@ nv_load_state_ext(struct drm_device *dev, int head,
                        nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
                }
 
+               wr_cio_state(dev, head, regp, NV_CIO_CRE_42);
                wr_cio_state(dev, head, regp, NV_CIO_CRE_53);
                wr_cio_state(dev, head, regp, NV_CIO_CRE_54);
 
index 2960f583dc389fac112de1d4ad7ee615a0d6ca31..5ee14d216ce88474eb3d903b5b3519a69019d218 100644 (file)
@@ -397,7 +397,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
                if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
                        dma_bits = 40;
        } else
-       if (drm_pci_device_is_pcie(dev) &&
+       if (0 && drm_pci_device_is_pcie(dev) &&
            dev_priv->chipset  > 0x40 &&
            dev_priv->chipset != 0x45) {
                if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
@@ -868,7 +868,9 @@ nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
                nouveau_vm_unmap(&node->tmp_vma);
                nouveau_vm_put(&node->tmp_vma);
        }
+
        mem->mm_node = NULL;
+       kfree(node);
 }
 
 static int
index c77111eca6acefc8ab31acb7bb060547b64b5f7a..82fad914e648441340a836bf306d761d09afc904 100644 (file)
@@ -458,7 +458,7 @@ nouveau_sgdma_init(struct drm_device *dev)
                dev_priv->gart_info.type = NOUVEAU_GART_HW;
                dev_priv->gart_info.func = &nv50_sgdma_backend;
        } else
-       if (drm_pci_device_is_pcie(dev) &&
+       if (0 && drm_pci_device_is_pcie(dev) &&
            dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) {
                if (nv44_graph_class(dev)) {
                        dev_priv->gart_info.func = &nv44_sgdma_backend;
index 38ea662568c1062a6486426408ef5648265e360a..80218887e0a004fc19337f180a93fe4ecb4196d2 100644 (file)
@@ -371,6 +371,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->vram.flags_valid        = nv50_vram_flags_valid;
                break;
        case 0xC0:
+       case 0xD0:
                engine->instmem.init            = nvc0_instmem_init;
                engine->instmem.takedown        = nvc0_instmem_takedown;
                engine->instmem.suspend         = nvc0_instmem_suspend;
@@ -563,68 +564,68 @@ nouveau_card_init(struct drm_device *dev)
        if (ret)
                goto out_timer;
 
-       switch (dev_priv->card_type) {
-       case NV_04:
-               nv04_graph_create(dev);
-               break;
-       case NV_10:
-               nv10_graph_create(dev);
-               break;
-       case NV_20:
-       case NV_30:
-               nv20_graph_create(dev);
-               break;
-       case NV_40:
-               nv40_graph_create(dev);
-               break;
-       case NV_50:
-               nv50_graph_create(dev);
-               break;
-       case NV_C0:
-               nvc0_graph_create(dev);
-               break;
-       default:
-               break;
-       }
-
-       switch (dev_priv->chipset) {
-       case 0x84:
-       case 0x86:
-       case 0x92:
-       case 0x94:
-       case 0x96:
-       case 0xa0:
-               nv84_crypt_create(dev);
-               break;
-       }
+       if (!nouveau_noaccel) {
+               switch (dev_priv->card_type) {
+               case NV_04:
+                       nv04_graph_create(dev);
+                       break;
+               case NV_10:
+                       nv10_graph_create(dev);
+                       break;
+               case NV_20:
+               case NV_30:
+                       nv20_graph_create(dev);
+                       break;
+               case NV_40:
+                       nv40_graph_create(dev);
+                       break;
+               case NV_50:
+                       nv50_graph_create(dev);
+                       break;
+               case NV_C0:
+                       nvc0_graph_create(dev);
+                       break;
+               default:
+                       break;
+               }
 
-       switch (dev_priv->card_type) {
-       case NV_50:
                switch (dev_priv->chipset) {
-               case 0xa3:
-               case 0xa5:
-               case 0xa8:
-               case 0xaf:
-                       nva3_copy_create(dev);
+               case 0x84:
+               case 0x86:
+               case 0x92:
+               case 0x94:
+               case 0x96:
+               case 0xa0:
+                       nv84_crypt_create(dev);
                        break;
                }
-               break;
-       case NV_C0:
-               nvc0_copy_create(dev, 0);
-               nvc0_copy_create(dev, 1);
-               break;
-       default:
-               break;
-       }
 
-       if (dev_priv->card_type == NV_40)
-               nv40_mpeg_create(dev);
-       else
-       if (dev_priv->card_type == NV_50 &&
-           (dev_priv->chipset < 0x98 || dev_priv->chipset == 0xa0))
-               nv50_mpeg_create(dev);
+               switch (dev_priv->card_type) {
+               case NV_50:
+                       switch (dev_priv->chipset) {
+                       case 0xa3:
+                       case 0xa5:
+                       case 0xa8:
+                       case 0xaf:
+                               nva3_copy_create(dev);
+                               break;
+                       }
+                       break;
+               case NV_C0:
+                       nvc0_copy_create(dev, 0);
+                       nvc0_copy_create(dev, 1);
+                       break;
+               default:
+                       break;
+               }
+
+               if (dev_priv->card_type == NV_40)
+                       nv40_mpeg_create(dev);
+               else
+               if (dev_priv->card_type == NV_50 &&
+                   (dev_priv->chipset < 0x98 || dev_priv->chipset == 0xa0))
+                       nv50_mpeg_create(dev);
 
-       if (!nouveau_noaccel) {
                for (e = 0; e < NVOBJ_ENGINE_NR; e++) {
                        if (dev_priv->eng[e]) {
                                ret = dev_priv->eng[e]->init(dev, e);
@@ -922,6 +923,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
                dev_priv->card_type = NV_50;
                break;
        case 0xc0:
+       case 0xd0:
                dev_priv->card_type = NV_C0;
                break;
        default:
index 0059e6f58a8b642c9d88f00b7991a97dc68b1772..519a6b4bba466fce9a46577908b411e5cfdbbdfa 100644 (file)
@@ -58,6 +58,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
                        num -= len;
                        pte += len;
                        if (unlikely(end >= max)) {
+                               phys += len << (bits + 12);
                                pde++;
                                pte = 0;
                        }
index 3c78bc81357e0e838ec933e330748d9bd019f661..f1a3ae49199505d7ab1d0b187733a33a6bb6a0a2 100644 (file)
@@ -376,7 +376,10 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
         */
 
        /* framebuffer can be larger than crtc scanout area. */
-       regp->CRTC[NV_CIO_CRE_RPC0_INDEX] = XLATE(fb->pitch / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
+       regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
+               XLATE(fb->pitch / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
+       regp->CRTC[NV_CIO_CRE_42] =
+               XLATE(fb->pitch / 8, 11, NV_CIO_CRE_42_OFFSET_11);
        regp->CRTC[NV_CIO_CRE_RPC1_INDEX] = mode->crtc_hdisplay < 1280 ?
                                            MASK(NV_CIO_CRE_RPC1_LARGE) : 0x00;
        regp->CRTC[NV_CIO_CRE_LSR_INDEX] = XLATE(horizBlankEnd, 6, NV_CIO_CRE_LSR_HBE_6) |
@@ -824,8 +827,11 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
        regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitch >> 3;
        regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
                XLATE(drm_fb->pitch >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
+       regp->CRTC[NV_CIO_CRE_42] =
+               XLATE(drm_fb->pitch / 8, 11, NV_CIO_CRE_42_OFFSET_11);
        crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_RPC0_INDEX);
        crtc_wr_cio_state(crtc, regp, NV_CIO_CR_OFFSET_INDEX);
+       crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_42);
 
        /* Update the framebuffer location. */
        regp->fb_start = nv_crtc->fb.offset & ~3;
index fe0f253089acf6e15de9d1dd93934fd23d315fdb..bbfb1a68fb1170d1a48f45d03f3ce08d38496f31 100644 (file)
 #              define NV_CIO_CRE_EBR_VDE_11            2:2
 #              define NV_CIO_CRE_EBR_VRS_11            4:4
 #              define NV_CIO_CRE_EBR_VBS_11            6:6
+#      define NV_CIO_CRE_42                    0x42
+#              define NV_CIO_CRE_42_OFFSET_11          6:6
 #      define NV_CIO_CRE_43                    0x43
 #      define NV_CIO_CRE_44                    0x44    /* head control */
 #      define NV_CIO_CRE_CSB                   0x45    /* colour saturation boost */
index 9746fee59f567856511b0eae1a2ed9310e3e52d0..ea92bbe3ed37002865f8850255a4e68859a0c8c7 100644 (file)
@@ -28,11 +28,4 @@ config DRM_RADEON_KMS
          The kernel will also perform security check on command stream
          provided by the user, we want to catch and forbid any illegal use
          of the GPU such as DMA into random system memory or into memory
-         not owned by the process supplying the command stream. This part
-         of the code is still incomplete and this why we propose that patch
-         as a staging driver addition, future security might forbid current
-         experimental userspace to run.
-
-         This code support the following hardware : R1XX,R2XX,R3XX,R4XX,R5XX
-         (radeon up to X1950). Works is underway to provide support for R6XX,
-         R7XX and newer hardware (radeon from HD2XXX to HD4XXX).
+         not owned by the process supplying the command stream.
index 49611e2365d984e539fe2f7d5169303e7e052f60..1b50ad8919d55e9e268095dcd54e824006ffd895 100644 (file)
@@ -1200,6 +1200,7 @@ typedef struct _EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3
 #define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF   0x10
 #define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING       0x11
 #define EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION      0x12
+#define EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP              0x14
 
 // ucConfig
 #define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_MASK                             0x03
index ec848787d7d9ee260e8edd6642563f6f7436fe94..9541995e4b21df3d4ba5cb9b3ab496a3255b8034 100644 (file)
@@ -671,6 +671,13 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                                                                DISPPLL_CONFIG_DUAL_LINK;
                                        }
                                }
+                               if (radeon_encoder_is_dp_bridge(encoder)) {
+                                       struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
+                                       struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
+                                       args.v3.sInput.ucExtTransmitterID = ext_radeon_encoder->encoder_id;
+                               } else
+                                       args.v3.sInput.ucExtTransmitterID = 0;
+
                                atom_execute_table(rdev->mode_info.atom_context,
                                                   index, (uint32_t *)&args);
                                adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
@@ -1045,7 +1052,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
        uint64_t fb_location;
        uint32_t fb_format, fb_pitch_pixels, tiling_flags;
        u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
-       u32 tmp;
+       u32 tmp, viewport_w, viewport_h;
        int r;
 
        /* no fb bound */
@@ -1171,8 +1178,10 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
        y &= ~1;
        WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset,
               (x << 16) | y);
+       viewport_w = crtc->mode.hdisplay;
+       viewport_h = (crtc->mode.vdisplay + 1) & ~1;
        WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
-              (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
+              (viewport_w << 16) | viewport_h);
 
        /* pageflip setup */
        /* make sure flip is at vb rather than hb */
@@ -1213,7 +1222,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
        uint64_t fb_location;
        uint32_t fb_format, fb_pitch_pixels, tiling_flags;
        u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE;
-       u32 tmp;
+       u32 tmp, viewport_w, viewport_h;
        int r;
 
        /* no fb bound */
@@ -1338,8 +1347,10 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
        y &= ~1;
        WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset,
               (x << 16) | y);
+       viewport_w = crtc->mode.hdisplay;
+       viewport_h = (crtc->mode.vdisplay + 1) & ~1;
        WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
-              (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
+              (viewport_w << 16) | viewport_h);
 
        /* pageflip setup */
        /* make sure flip is at vb rather than hb */
index e148ab04b80b3adc88d0ac4a0329599f57ac2c00..7b4eeb7b4a8c5c3205e1f5c91cd3e51c53658c42 100644 (file)
 
 const u32 cayman_default_state[] =
 {
-       /* XXX fill in additional blit state */
+       0xc0066900,
+       0x00000000,
+       0x00000060, /* DB_RENDER_CONTROL */
+       0x00000000, /* DB_COUNT_CONTROL */
+       0x00000000, /* DB_DEPTH_VIEW */
+       0x0000002a, /* DB_RENDER_OVERRIDE */
+       0x00000000, /* DB_RENDER_OVERRIDE2 */
+       0x00000000, /* DB_HTILE_DATA_BASE */
 
        0xc0026900,
-       0x00000316,
-       0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
-       0x00000010, /*  */
+       0x0000000a,
+       0x00000000, /* DB_STENCIL_CLEAR */
+       0x00000000, /* DB_DEPTH_CLEAR */
+
+       0xc0036900,
+       0x0000000f,
+       0x00000000, /* DB_DEPTH_INFO */
+       0x00000000, /* DB_Z_INFO */
+       0x00000000, /* DB_STENCIL_INFO */
+
+       0xc0016900,
+       0x00000080,
+       0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+       0xc00d6900,
+       0x00000083,
+       0x0000ffff, /* PA_SC_CLIPRECT_RULE */
+       0x00000000, /* PA_SC_CLIPRECT_0_TL */
+       0x20002000, /* PA_SC_CLIPRECT_0_BR */
+       0x00000000,
+       0x20002000,
+       0x00000000,
+       0x20002000,
+       0x00000000,
+       0x20002000,
+       0xaaaaaaaa, /* PA_SC_EDGERULE */
+       0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
+       0x0000000f, /* CB_TARGET_MASK */
+       0x0000000f, /* CB_SHADER_MASK */
+
+       0xc0226900,
+       0x00000094,
+       0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+       0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+       0x80000000,
+       0x20002000,
+       0x80000000,
+       0x20002000,
+       0x80000000,
+       0x20002000,
+       0x80000000,
+       0x20002000,
+       0x80000000,
+       0x20002000,
+       0x80000000,
+       0x20002000,
+       0x80000000,
+       0x20002000,
+       0x80000000,
+       0x20002000,
+       0x80000000,
+       0x20002000,
+       0x80000000,
+       0x20002000,
+       0x80000000,
+       0x20002000,
+       0x80000000,
+       0x20002000,
+       0x80000000,
+       0x20002000,
+       0x80000000,
+       0x20002000,
+       0x80000000,
+       0x20002000,
+       0x00000000, /* PA_SC_VPORT_ZMIN_0 */
+       0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
+
+       0xc0016900,
+       0x000000d4,
+       0x00000000, /* SX_MISC */
 
        0xc0026900,
        0x000000d9,
        0x00000000, /* CP_RINGID */
        0x00000000, /* CP_VMID */
+
+       0xc0096900,
+       0x00000100,
+       0x00ffffff, /* VGT_MAX_VTX_INDX */
+       0x00000000, /* VGT_MIN_VTX_INDX */
+       0x00000000, /* VGT_INDX_OFFSET */
+       0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
+       0x00000000, /* SX_ALPHA_TEST_CONTROL */
+       0x00000000, /* CB_BLEND_RED */
+       0x00000000, /* CB_BLEND_GREEN */
+       0x00000000, /* CB_BLEND_BLUE */
+       0x00000000, /* CB_BLEND_ALPHA */
+
+       0xc0016900,
+       0x00000187,
+       0x00000100, /* SPI_VS_OUT_ID_0 */
+
+       0xc0026900,
+       0x00000191,
+       0x00000100, /* SPI_PS_INPUT_CNTL_0 */
+       0x00000101, /* SPI_PS_INPUT_CNTL_1 */
+
+       0xc0016900,
+       0x000001b1,
+       0x00000000, /* SPI_VS_OUT_CONFIG */
+
+       0xc0106900,
+       0x000001b3,
+       0x20000001, /* SPI_PS_IN_CONTROL_0 */
+       0x00000000, /* SPI_PS_IN_CONTROL_1 */
+       0x00000000, /* SPI_INTERP_CONTROL_0 */
+       0x00000000, /* SPI_INPUT_Z */
+       0x00000000, /* SPI_FOG_CNTL */
+       0x00100000, /* SPI_BARYC_CNTL */
+       0x00000000, /* SPI_PS_IN_CONTROL_2 */
+       0x00000000, /* SPI_COMPUTE_INPUT_CNTL */
+       0x00000000, /* SPI_COMPUTE_NUM_THREAD_X */
+       0x00000000, /* SPI_COMPUTE_NUM_THREAD_Y */
+       0x00000000, /* SPI_COMPUTE_NUM_THREAD_Z */
+       0x00000000, /* SPI_GPR_MGMT */
+       0x00000000, /* SPI_LDS_MGMT */
+       0x00000000, /* SPI_STACK_MGMT */
+       0x00000000, /* SPI_WAVE_MGMT_1 */
+       0x00000000, /* SPI_WAVE_MGMT_2 */
+
+       0xc0016900,
+       0x000001e0,
+       0x00000000, /* CB_BLEND0_CONTROL */
+
+       0xc00e6900,
+       0x00000200,
+       0x00000000, /* DB_DEPTH_CONTROL */
+       0x00000000, /* DB_EQAA */
+       0x00cc0010, /* CB_COLOR_CONTROL */
+       0x00000210, /* DB_SHADER_CONTROL */
+       0x00010000, /* PA_CL_CLIP_CNTL */
+       0x00000004, /* PA_SU_SC_MODE_CNTL */
+       0x00000100, /* PA_CL_VTE_CNTL */
+       0x00000000, /* PA_CL_VS_OUT_CNTL */
+       0x00000000, /* PA_CL_NANINF_CNTL */
+       0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
+       0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
+       0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
+       0x00000000, /*  */
+       0x00000000, /*  */
+
+       0xc0026900,
+       0x00000229,
+       0x00000000, /* SQ_PGM_START_FS */
+       0x00000000,
+
+       0xc0016900,
+       0x0000023b,
+       0x00000000, /* SQ_LDS_ALLOC_PS */
+
+       0xc0066900,
+       0x00000240,
+       0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+
+       0xc0046900,
+       0x00000247,
+       0x00000000, /* SQ_GS_VERT_ITEMSIZE */
+       0x00000000,
+       0x00000000,
+       0x00000000,
+
+       0xc0116900,
+       0x00000280,
+       0x00000000, /* PA_SU_POINT_SIZE */
+       0x00000000, /* PA_SU_POINT_MINMAX */
+       0x00000008, /* PA_SU_LINE_CNTL */
+       0x00000000, /* PA_SC_LINE_STIPPLE */
+       0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+       0x00000000, /* VGT_HOS_CNTL */
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000, /* VGT_GS_MODE */
+
+       0xc0026900,
+       0x00000292,
+       0x00000000, /* PA_SC_MODE_CNTL_0 */
+       0x00000000, /* PA_SC_MODE_CNTL_1 */
+
+       0xc0016900,
+       0x000002a1,
+       0x00000000, /* VGT_PRIMITIVEID_EN */
+
+       0xc0016900,
+       0x000002a5,
+       0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
+
+       0xc0026900,
+       0x000002a8,
+       0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+       0x00000000,
+
+       0xc0026900,
+       0x000002ad,
+       0x00000000, /* VGT_REUSE_OFF */
+       0x00000000,
+
+       0xc0016900,
+       0x000002d5,
+       0x00000000, /* VGT_SHADER_STAGES_EN */
+
+       0xc0016900,
+       0x000002dc,
+       0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+       0xc0066900,
+       0x000002de,
+       0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+
+       0xc0026900,
+       0x000002e5,
+       0x00000000, /* VGT_STRMOUT_CONFIG */
+       0x00000000,
+
+       0xc01b6900,
+       0x000002f5,
+       0x76543210, /* PA_SC_CENTROID_PRIORITY_0 */
+       0xfedcba98, /* PA_SC_CENTROID_PRIORITY_1 */
+       0x00000000, /* PA_SC_LINE_CNTL */
+       0x00000000, /* PA_SC_AA_CONFIG */
+       0x00000005, /* PA_SU_VTX_CNTL */
+       0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
+       0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
+       0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
+       0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
+       0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */
+       0xffffffff,
+
+       0xc0026900,
+       0x00000316,
+       0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+       0x00000010, /*  */
+};
+
+const u32 cayman_vs[] =
+{
+       0x00000004,
+       0x80400400,
+       0x0000a03c,
+       0x95000688,
+       0x00004000,
+       0x15000688,
+       0x00000000,
+       0x88000000,
+       0x04000000,
+       0x67961001,
+#ifdef __BIG_ENDIAN
+       0x00020000,
+#else
+       0x00000000,
+#endif
+       0x00000000,
+       0x04000000,
+       0x67961000,
+#ifdef __BIG_ENDIAN
+       0x00020008,
+#else
+       0x00000008,
+#endif
+       0x00000000,
+};
+
+const u32 cayman_ps[] =
+{
+       0x00000004,
+       0xa00c0000,
+       0x00000008,
+       0x80400000,
+       0x00000000,
+       0x95000688,
+       0x00000000,
+       0x88000000,
+       0x00380400,
+       0x00146b10,
+       0x00380000,
+       0x20146b10,
+       0x00380400,
+       0x40146b00,
+       0x80380000,
+       0x60146b00,
+       0x00000010,
+       0x000d1000,
+       0xb0800000,
+       0x00000000,
 };
 
+const u32 cayman_ps_size = ARRAY_SIZE(cayman_ps);
+const u32 cayman_vs_size = ARRAY_SIZE(cayman_vs);
 const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state);
index 33b75e5d0fa468e64d1a0c15292fa72a7bc0cee6..f5d0e9a602675d24b135a4491d583d44e53bcbec 100644 (file)
 #ifndef CAYMAN_BLIT_SHADERS_H
 #define CAYMAN_BLIT_SHADERS_H
 
+extern const u32 cayman_ps[];
+extern const u32 cayman_vs[];
 extern const u32 cayman_default_state[];
 
+extern const u32 cayman_ps_size, cayman_vs_size;
 extern const u32 cayman_default_size;
 
 #endif
index 7c37638095f7d1fa01624116d464e1d2a22ce8d9..7e3d96e7ac042814527db77a3bd305d5a3214de4 100644 (file)
@@ -88,21 +88,40 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
 /* get temperature in millidegrees */
 int evergreen_get_temp(struct radeon_device *rdev)
 {
-       u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
-               ASIC_T_SHIFT;
-       u32 actual_temp = 0;
-
-       if (temp & 0x400)
-               actual_temp = -256;
-       else if (temp & 0x200)
-               actual_temp = 255;
-       else if (temp & 0x100) {
-               actual_temp = temp & 0x1ff;
-               actual_temp |= ~0x1ff;
-       } else
-               actual_temp = temp & 0xff;
+       u32 temp, toffset;
+       int actual_temp = 0;
+
+       if (rdev->family == CHIP_JUNIPER) {
+               toffset = (RREG32(CG_THERMAL_CTRL) & TOFFSET_MASK) >>
+                       TOFFSET_SHIFT;
+               temp = (RREG32(CG_TS0_STATUS) & TS0_ADC_DOUT_MASK) >>
+                       TS0_ADC_DOUT_SHIFT;
+
+               if (toffset & 0x100)
+                       actual_temp = temp / 2 - (0x200 - toffset);
+               else
+                       actual_temp = temp / 2 + toffset;
+
+               actual_temp = actual_temp * 1000;
+
+       } else {
+               temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
+                       ASIC_T_SHIFT;
 
-       return (actual_temp * 1000) / 2;
+               if (temp & 0x400)
+                       actual_temp = -256;
+               else if (temp & 0x200)
+                       actual_temp = 255;
+               else if (temp & 0x100) {
+                       actual_temp = temp & 0x1ff;
+                       actual_temp |= ~0x1ff;
+               } else
+                       actual_temp = temp & 0xff;
+
+               actual_temp = (actual_temp * 1000) / 2;
+       }
+
+       return actual_temp;
 }
 
 int sumo_get_temp(struct radeon_device *rdev)
@@ -1415,6 +1434,8 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
        case CHIP_CEDAR:
        case CHIP_REDWOOD:
        case CHIP_PALM:
+       case CHIP_SUMO:
+       case CHIP_SUMO2:
        case CHIP_TURKS:
        case CHIP_CAICOS:
                force_no_swizzle = false;
@@ -1544,6 +1565,8 @@ static void evergreen_program_channel_remap(struct radeon_device *rdev)
        case CHIP_REDWOOD:
        case CHIP_CEDAR:
        case CHIP_PALM:
+       case CHIP_SUMO:
+       case CHIP_SUMO2:
        case CHIP_TURKS:
        case CHIP_CAICOS:
        default:
@@ -1685,6 +1708,54 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                rdev->config.evergreen.max_hw_contexts = 4;
                rdev->config.evergreen.sq_num_cf_insts = 1;
 
+               rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+               rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+               rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+               break;
+       case CHIP_SUMO:
+               rdev->config.evergreen.num_ses = 1;
+               rdev->config.evergreen.max_pipes = 4;
+               rdev->config.evergreen.max_tile_pipes = 2;
+               if (rdev->pdev->device == 0x9648)
+                       rdev->config.evergreen.max_simds = 3;
+               else if ((rdev->pdev->device == 0x9647) ||
+                        (rdev->pdev->device == 0x964a))
+                       rdev->config.evergreen.max_simds = 4;
+               else
+                       rdev->config.evergreen.max_simds = 5;
+               rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
+               rdev->config.evergreen.max_gprs = 256;
+               rdev->config.evergreen.max_threads = 248;
+               rdev->config.evergreen.max_gs_threads = 32;
+               rdev->config.evergreen.max_stack_entries = 256;
+               rdev->config.evergreen.sx_num_of_sets = 4;
+               rdev->config.evergreen.sx_max_export_size = 256;
+               rdev->config.evergreen.sx_max_export_pos_size = 64;
+               rdev->config.evergreen.sx_max_export_smx_size = 192;
+               rdev->config.evergreen.max_hw_contexts = 8;
+               rdev->config.evergreen.sq_num_cf_insts = 2;
+
+               rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+               rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+               rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+               break;
+       case CHIP_SUMO2:
+               rdev->config.evergreen.num_ses = 1;
+               rdev->config.evergreen.max_pipes = 4;
+               rdev->config.evergreen.max_tile_pipes = 4;
+               rdev->config.evergreen.max_simds = 2;
+               rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+               rdev->config.evergreen.max_gprs = 256;
+               rdev->config.evergreen.max_threads = 248;
+               rdev->config.evergreen.max_gs_threads = 32;
+               rdev->config.evergreen.max_stack_entries = 512;
+               rdev->config.evergreen.sx_num_of_sets = 4;
+               rdev->config.evergreen.sx_max_export_size = 256;
+               rdev->config.evergreen.sx_max_export_pos_size = 64;
+               rdev->config.evergreen.sx_max_export_smx_size = 192;
+               rdev->config.evergreen.max_hw_contexts = 8;
+               rdev->config.evergreen.sq_num_cf_insts = 2;
+
                rdev->config.evergreen.sc_prim_fifo_size = 0x40;
                rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
@@ -2039,6 +2110,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
        switch (rdev->family) {
        case CHIP_CEDAR:
        case CHIP_PALM:
+       case CHIP_SUMO:
+       case CHIP_SUMO2:
        case CHIP_CAICOS:
                /* no vertex cache */
                sq_config &= ~VC_ENABLE;
@@ -2060,6 +2133,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
        switch (rdev->family) {
        case CHIP_CEDAR:
        case CHIP_PALM:
+       case CHIP_SUMO:
+       case CHIP_SUMO2:
                ps_thread_count = 96;
                break;
        default:
@@ -2099,6 +2174,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
        switch (rdev->family) {
        case CHIP_CEDAR:
        case CHIP_PALM:
+       case CHIP_SUMO:
+       case CHIP_SUMO2:
        case CHIP_CAICOS:
                vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
                break;
@@ -2868,7 +2945,7 @@ restart_ih:
                        radeon_fence_process(rdev);
                        break;
                case 233: /* GUI IDLE */
-                       DRM_DEBUG("IH: CP EOP\n");
+                       DRM_DEBUG("IH: GUI idle\n");
                        rdev->pm.gui_idle = true;
                        wake_up(&rdev->irq.idle_queue);
                        break;
index ba06a69c6de857bd250b6c7258faa8a308361e43..57f3bc17b87e09a9dd0d1fd99fe66800e43ee9fb 100644 (file)
@@ -31,6 +31,7 @@
 
 #include "evergreend.h"
 #include "evergreen_blit_shaders.h"
+#include "cayman_blit_shaders.h"
 
 #define DI_PT_RECTLIST        0x11
 #define DI_INDEX_SIZE_16_BIT  0x0
@@ -152,6 +153,8 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
 
        if ((rdev->family == CHIP_CEDAR) ||
            (rdev->family == CHIP_PALM) ||
+           (rdev->family == CHIP_SUMO) ||
+           (rdev->family == CHIP_SUMO2) ||
            (rdev->family == CHIP_CAICOS))
                cp_set_surface_sync(rdev,
                                    PACKET3_TC_ACTION_ENA, 48, gpu_addr);
@@ -199,6 +202,16 @@ static void
 set_scissors(struct radeon_device *rdev, int x1, int y1,
             int x2, int y2)
 {
+       /* workaround some hw bugs */
+       if (x2 == 0)
+               x1 = 1;
+       if (y2 == 0)
+               y1 = 1;
+       if (rdev->family == CHIP_CAYMAN) {
+               if ((x2 == 1) && (y2 == 1))
+                       x2 = 2;
+       }
+
        radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
        radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
        radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
@@ -255,238 +268,284 @@ set_default_state(struct radeon_device *rdev)
        u64 gpu_addr;
        int dwords;
 
-       switch (rdev->family) {
-       case CHIP_CEDAR:
-       default:
-               num_ps_gprs = 93;
-               num_vs_gprs = 46;
-               num_temp_gprs = 4;
-               num_gs_gprs = 31;
-               num_es_gprs = 31;
-               num_hs_gprs = 23;
-               num_ls_gprs = 23;
-               num_ps_threads = 96;
-               num_vs_threads = 16;
-               num_gs_threads = 16;
-               num_es_threads = 16;
-               num_hs_threads = 16;
-               num_ls_threads = 16;
-               num_ps_stack_entries = 42;
-               num_vs_stack_entries = 42;
-               num_gs_stack_entries = 42;
-               num_es_stack_entries = 42;
-               num_hs_stack_entries = 42;
-               num_ls_stack_entries = 42;
-               break;
-       case CHIP_REDWOOD:
-               num_ps_gprs = 93;
-               num_vs_gprs = 46;
-               num_temp_gprs = 4;
-               num_gs_gprs = 31;
-               num_es_gprs = 31;
-               num_hs_gprs = 23;
-               num_ls_gprs = 23;
-               num_ps_threads = 128;
-               num_vs_threads = 20;
-               num_gs_threads = 20;
-               num_es_threads = 20;
-               num_hs_threads = 20;
-               num_ls_threads = 20;
-               num_ps_stack_entries = 42;
-               num_vs_stack_entries = 42;
-               num_gs_stack_entries = 42;
-               num_es_stack_entries = 42;
-               num_hs_stack_entries = 42;
-               num_ls_stack_entries = 42;
-               break;
-       case CHIP_JUNIPER:
-               num_ps_gprs = 93;
-               num_vs_gprs = 46;
-               num_temp_gprs = 4;
-               num_gs_gprs = 31;
-               num_es_gprs = 31;
-               num_hs_gprs = 23;
-               num_ls_gprs = 23;
-               num_ps_threads = 128;
-               num_vs_threads = 20;
-               num_gs_threads = 20;
-               num_es_threads = 20;
-               num_hs_threads = 20;
-               num_ls_threads = 20;
-               num_ps_stack_entries = 85;
-               num_vs_stack_entries = 85;
-               num_gs_stack_entries = 85;
-               num_es_stack_entries = 85;
-               num_hs_stack_entries = 85;
-               num_ls_stack_entries = 85;
-               break;
-       case CHIP_CYPRESS:
-       case CHIP_HEMLOCK:
-               num_ps_gprs = 93;
-               num_vs_gprs = 46;
-               num_temp_gprs = 4;
-               num_gs_gprs = 31;
-               num_es_gprs = 31;
-               num_hs_gprs = 23;
-               num_ls_gprs = 23;
-               num_ps_threads = 128;
-               num_vs_threads = 20;
-               num_gs_threads = 20;
-               num_es_threads = 20;
-               num_hs_threads = 20;
-               num_ls_threads = 20;
-               num_ps_stack_entries = 85;
-               num_vs_stack_entries = 85;
-               num_gs_stack_entries = 85;
-               num_es_stack_entries = 85;
-               num_hs_stack_entries = 85;
-               num_ls_stack_entries = 85;
-               break;
-       case CHIP_PALM:
-               num_ps_gprs = 93;
-               num_vs_gprs = 46;
-               num_temp_gprs = 4;
-               num_gs_gprs = 31;
-               num_es_gprs = 31;
-               num_hs_gprs = 23;
-               num_ls_gprs = 23;
-               num_ps_threads = 96;
-               num_vs_threads = 16;
-               num_gs_threads = 16;
-               num_es_threads = 16;
-               num_hs_threads = 16;
-               num_ls_threads = 16;
-               num_ps_stack_entries = 42;
-               num_vs_stack_entries = 42;
-               num_gs_stack_entries = 42;
-               num_es_stack_entries = 42;
-               num_hs_stack_entries = 42;
-               num_ls_stack_entries = 42;
-               break;
-       case CHIP_BARTS:
-               num_ps_gprs = 93;
-               num_vs_gprs = 46;
-               num_temp_gprs = 4;
-               num_gs_gprs = 31;
-               num_es_gprs = 31;
-               num_hs_gprs = 23;
-               num_ls_gprs = 23;
-               num_ps_threads = 128;
-               num_vs_threads = 20;
-               num_gs_threads = 20;
-               num_es_threads = 20;
-               num_hs_threads = 20;
-               num_ls_threads = 20;
-               num_ps_stack_entries = 85;
-               num_vs_stack_entries = 85;
-               num_gs_stack_entries = 85;
-               num_es_stack_entries = 85;
-               num_hs_stack_entries = 85;
-               num_ls_stack_entries = 85;
-               break;
-       case CHIP_TURKS:
-               num_ps_gprs = 93;
-               num_vs_gprs = 46;
-               num_temp_gprs = 4;
-               num_gs_gprs = 31;
-               num_es_gprs = 31;
-               num_hs_gprs = 23;
-               num_ls_gprs = 23;
-               num_ps_threads = 128;
-               num_vs_threads = 20;
-               num_gs_threads = 20;
-               num_es_threads = 20;
-               num_hs_threads = 20;
-               num_ls_threads = 20;
-               num_ps_stack_entries = 42;
-               num_vs_stack_entries = 42;
-               num_gs_stack_entries = 42;
-               num_es_stack_entries = 42;
-               num_hs_stack_entries = 42;
-               num_ls_stack_entries = 42;
-               break;
-       case CHIP_CAICOS:
-               num_ps_gprs = 93;
-               num_vs_gprs = 46;
-               num_temp_gprs = 4;
-               num_gs_gprs = 31;
-               num_es_gprs = 31;
-               num_hs_gprs = 23;
-               num_ls_gprs = 23;
-               num_ps_threads = 128;
-               num_vs_threads = 10;
-               num_gs_threads = 10;
-               num_es_threads = 10;
-               num_hs_threads = 10;
-               num_ls_threads = 10;
-               num_ps_stack_entries = 42;
-               num_vs_stack_entries = 42;
-               num_gs_stack_entries = 42;
-               num_es_stack_entries = 42;
-               num_hs_stack_entries = 42;
-               num_ls_stack_entries = 42;
-               break;
-       }
-
-       if ((rdev->family == CHIP_CEDAR) ||
-           (rdev->family == CHIP_PALM) ||
-           (rdev->family == CHIP_CAICOS))
-               sq_config = 0;
-       else
-               sq_config = VC_ENABLE;
-
-       sq_config |= (EXPORT_SRC_C |
-                     CS_PRIO(0) |
-                     LS_PRIO(0) |
-                     HS_PRIO(0) |
-                     PS_PRIO(0) |
-                     VS_PRIO(1) |
-                     GS_PRIO(2) |
-                     ES_PRIO(3));
-
-       sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
-                                 NUM_VS_GPRS(num_vs_gprs) |
-                                 NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
-       sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
-                                 NUM_ES_GPRS(num_es_gprs));
-       sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) |
-                                 NUM_LS_GPRS(num_ls_gprs));
-       sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
-                                  NUM_VS_THREADS(num_vs_threads) |
-                                  NUM_GS_THREADS(num_gs_threads) |
-                                  NUM_ES_THREADS(num_es_threads));
-       sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) |
-                                    NUM_LS_THREADS(num_ls_threads));
-       sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
-                                   NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
-       sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
-                                   NUM_ES_STACK_ENTRIES(num_es_stack_entries));
-       sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) |
-                                   NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
-
        /* set clear context state */
        radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
        radeon_ring_write(rdev, 0);
 
-       /* disable dyn gprs */
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-       radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
-       radeon_ring_write(rdev, 0);
+       if (rdev->family < CHIP_CAYMAN) {
+               switch (rdev->family) {
+               case CHIP_CEDAR:
+               default:
+                       num_ps_gprs = 93;
+                       num_vs_gprs = 46;
+                       num_temp_gprs = 4;
+                       num_gs_gprs = 31;
+                       num_es_gprs = 31;
+                       num_hs_gprs = 23;
+                       num_ls_gprs = 23;
+                       num_ps_threads = 96;
+                       num_vs_threads = 16;
+                       num_gs_threads = 16;
+                       num_es_threads = 16;
+                       num_hs_threads = 16;
+                       num_ls_threads = 16;
+                       num_ps_stack_entries = 42;
+                       num_vs_stack_entries = 42;
+                       num_gs_stack_entries = 42;
+                       num_es_stack_entries = 42;
+                       num_hs_stack_entries = 42;
+                       num_ls_stack_entries = 42;
+                       break;
+               case CHIP_REDWOOD:
+                       num_ps_gprs = 93;
+                       num_vs_gprs = 46;
+                       num_temp_gprs = 4;
+                       num_gs_gprs = 31;
+                       num_es_gprs = 31;
+                       num_hs_gprs = 23;
+                       num_ls_gprs = 23;
+                       num_ps_threads = 128;
+                       num_vs_threads = 20;
+                       num_gs_threads = 20;
+                       num_es_threads = 20;
+                       num_hs_threads = 20;
+                       num_ls_threads = 20;
+                       num_ps_stack_entries = 42;
+                       num_vs_stack_entries = 42;
+                       num_gs_stack_entries = 42;
+                       num_es_stack_entries = 42;
+                       num_hs_stack_entries = 42;
+                       num_ls_stack_entries = 42;
+                       break;
+               case CHIP_JUNIPER:
+                       num_ps_gprs = 93;
+                       num_vs_gprs = 46;
+                       num_temp_gprs = 4;
+                       num_gs_gprs = 31;
+                       num_es_gprs = 31;
+                       num_hs_gprs = 23;
+                       num_ls_gprs = 23;
+                       num_ps_threads = 128;
+                       num_vs_threads = 20;
+                       num_gs_threads = 20;
+                       num_es_threads = 20;
+                       num_hs_threads = 20;
+                       num_ls_threads = 20;
+                       num_ps_stack_entries = 85;
+                       num_vs_stack_entries = 85;
+                       num_gs_stack_entries = 85;
+                       num_es_stack_entries = 85;
+                       num_hs_stack_entries = 85;
+                       num_ls_stack_entries = 85;
+                       break;
+               case CHIP_CYPRESS:
+               case CHIP_HEMLOCK:
+                       num_ps_gprs = 93;
+                       num_vs_gprs = 46;
+                       num_temp_gprs = 4;
+                       num_gs_gprs = 31;
+                       num_es_gprs = 31;
+                       num_hs_gprs = 23;
+                       num_ls_gprs = 23;
+                       num_ps_threads = 128;
+                       num_vs_threads = 20;
+                       num_gs_threads = 20;
+                       num_es_threads = 20;
+                       num_hs_threads = 20;
+                       num_ls_threads = 20;
+                       num_ps_stack_entries = 85;
+                       num_vs_stack_entries = 85;
+                       num_gs_stack_entries = 85;
+                       num_es_stack_entries = 85;
+                       num_hs_stack_entries = 85;
+                       num_ls_stack_entries = 85;
+                       break;
+               case CHIP_PALM:
+                       num_ps_gprs = 93;
+                       num_vs_gprs = 46;
+                       num_temp_gprs = 4;
+                       num_gs_gprs = 31;
+                       num_es_gprs = 31;
+                       num_hs_gprs = 23;
+                       num_ls_gprs = 23;
+                       num_ps_threads = 96;
+                       num_vs_threads = 16;
+                       num_gs_threads = 16;
+                       num_es_threads = 16;
+                       num_hs_threads = 16;
+                       num_ls_threads = 16;
+                       num_ps_stack_entries = 42;
+                       num_vs_stack_entries = 42;
+                       num_gs_stack_entries = 42;
+                       num_es_stack_entries = 42;
+                       num_hs_stack_entries = 42;
+                       num_ls_stack_entries = 42;
+                       break;
+               case CHIP_SUMO:
+                       num_ps_gprs = 93;
+                       num_vs_gprs = 46;
+                       num_temp_gprs = 4;
+                       num_gs_gprs = 31;
+                       num_es_gprs = 31;
+                       num_hs_gprs = 23;
+                       num_ls_gprs = 23;
+                       num_ps_threads = 96;
+                       num_vs_threads = 25;
+                       num_gs_threads = 25;
+                       num_es_threads = 25;
+                       num_hs_threads = 25;
+                       num_ls_threads = 25;
+                       num_ps_stack_entries = 42;
+                       num_vs_stack_entries = 42;
+                       num_gs_stack_entries = 42;
+                       num_es_stack_entries = 42;
+                       num_hs_stack_entries = 42;
+                       num_ls_stack_entries = 42;
+                       break;
+               case CHIP_SUMO2:
+                       num_ps_gprs = 93;
+                       num_vs_gprs = 46;
+                       num_temp_gprs = 4;
+                       num_gs_gprs = 31;
+                       num_es_gprs = 31;
+                       num_hs_gprs = 23;
+                       num_ls_gprs = 23;
+                       num_ps_threads = 96;
+                       num_vs_threads = 25;
+                       num_gs_threads = 25;
+                       num_es_threads = 25;
+                       num_hs_threads = 25;
+                       num_ls_threads = 25;
+                       num_ps_stack_entries = 85;
+                       num_vs_stack_entries = 85;
+                       num_gs_stack_entries = 85;
+                       num_es_stack_entries = 85;
+                       num_hs_stack_entries = 85;
+                       num_ls_stack_entries = 85;
+                       break;
+               case CHIP_BARTS:
+                       num_ps_gprs = 93;
+                       num_vs_gprs = 46;
+                       num_temp_gprs = 4;
+                       num_gs_gprs = 31;
+                       num_es_gprs = 31;
+                       num_hs_gprs = 23;
+                       num_ls_gprs = 23;
+                       num_ps_threads = 128;
+                       num_vs_threads = 20;
+                       num_gs_threads = 20;
+                       num_es_threads = 20;
+                       num_hs_threads = 20;
+                       num_ls_threads = 20;
+                       num_ps_stack_entries = 85;
+                       num_vs_stack_entries = 85;
+                       num_gs_stack_entries = 85;
+                       num_es_stack_entries = 85;
+                       num_hs_stack_entries = 85;
+                       num_ls_stack_entries = 85;
+                       break;
+               case CHIP_TURKS:
+                       num_ps_gprs = 93;
+                       num_vs_gprs = 46;
+                       num_temp_gprs = 4;
+                       num_gs_gprs = 31;
+                       num_es_gprs = 31;
+                       num_hs_gprs = 23;
+                       num_ls_gprs = 23;
+                       num_ps_threads = 128;
+                       num_vs_threads = 20;
+                       num_gs_threads = 20;
+                       num_es_threads = 20;
+                       num_hs_threads = 20;
+                       num_ls_threads = 20;
+                       num_ps_stack_entries = 42;
+                       num_vs_stack_entries = 42;
+                       num_gs_stack_entries = 42;
+                       num_es_stack_entries = 42;
+                       num_hs_stack_entries = 42;
+                       num_ls_stack_entries = 42;
+                       break;
+               case CHIP_CAICOS:
+                       num_ps_gprs = 93;
+                       num_vs_gprs = 46;
+                       num_temp_gprs = 4;
+                       num_gs_gprs = 31;
+                       num_es_gprs = 31;
+                       num_hs_gprs = 23;
+                       num_ls_gprs = 23;
+                       num_ps_threads = 128;
+                       num_vs_threads = 10;
+                       num_gs_threads = 10;
+                       num_es_threads = 10;
+                       num_hs_threads = 10;
+                       num_ls_threads = 10;
+                       num_ps_stack_entries = 42;
+                       num_vs_stack_entries = 42;
+                       num_gs_stack_entries = 42;
+                       num_es_stack_entries = 42;
+                       num_hs_stack_entries = 42;
+                       num_ls_stack_entries = 42;
+                       break;
+               }
 
-       /* SQ config */
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
-       radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
-       radeon_ring_write(rdev, sq_config);
-       radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
-       radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
-       radeon_ring_write(rdev, sq_gpr_resource_mgmt_3);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, 0);
-       radeon_ring_write(rdev, sq_thread_resource_mgmt);
-       radeon_ring_write(rdev, sq_thread_resource_mgmt_2);
-       radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
-       radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
-       radeon_ring_write(rdev, sq_stack_resource_mgmt_3);
+               if ((rdev->family == CHIP_CEDAR) ||
+                   (rdev->family == CHIP_PALM) ||
+                   (rdev->family == CHIP_SUMO) ||
+                   (rdev->family == CHIP_SUMO2) ||
+                   (rdev->family == CHIP_CAICOS))
+                       sq_config = 0;
+               else
+                       sq_config = VC_ENABLE;
+
+               sq_config |= (EXPORT_SRC_C |
+                             CS_PRIO(0) |
+                             LS_PRIO(0) |
+                             HS_PRIO(0) |
+                             PS_PRIO(0) |
+                             VS_PRIO(1) |
+                             GS_PRIO(2) |
+                             ES_PRIO(3));
+
+               sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
+                                         NUM_VS_GPRS(num_vs_gprs) |
+                                         NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
+               sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
+                                         NUM_ES_GPRS(num_es_gprs));
+               sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) |
+                                         NUM_LS_GPRS(num_ls_gprs));
+               sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
+                                          NUM_VS_THREADS(num_vs_threads) |
+                                          NUM_GS_THREADS(num_gs_threads) |
+                                          NUM_ES_THREADS(num_es_threads));
+               sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) |
+                                            NUM_LS_THREADS(num_ls_threads));
+               sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
+                                           NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
+               sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
+                                           NUM_ES_STACK_ENTRIES(num_es_stack_entries));
+               sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) |
+                                           NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
+
+               /* disable dyn gprs */
+               radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+               radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
+               radeon_ring_write(rdev, 0);
+
+               /* SQ config */
+               radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
+               radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
+               radeon_ring_write(rdev, sq_config);
+               radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
+               radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
+               radeon_ring_write(rdev, sq_gpr_resource_mgmt_3);
+               radeon_ring_write(rdev, 0);
+               radeon_ring_write(rdev, 0);
+               radeon_ring_write(rdev, sq_thread_resource_mgmt);
+               radeon_ring_write(rdev, sq_thread_resource_mgmt_2);
+               radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
+               radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
+               radeon_ring_write(rdev, sq_stack_resource_mgmt_3);
+       }
 
        /* CONTEXT_CONTROL */
        radeon_ring_write(rdev, 0xc0012800);
@@ -560,7 +619,10 @@ int evergreen_blit_init(struct radeon_device *rdev)
        mutex_init(&rdev->r600_blit.mutex);
        rdev->r600_blit.state_offset = 0;
 
-       rdev->r600_blit.state_len = evergreen_default_size;
+       if (rdev->family < CHIP_CAYMAN)
+               rdev->r600_blit.state_len = evergreen_default_size;
+       else
+               rdev->r600_blit.state_len = cayman_default_size;
 
        dwords = rdev->r600_blit.state_len;
        while (dwords & 0xf) {
@@ -572,11 +634,17 @@ int evergreen_blit_init(struct radeon_device *rdev)
        obj_size = ALIGN(obj_size, 256);
 
        rdev->r600_blit.vs_offset = obj_size;
-       obj_size += evergreen_vs_size * 4;
+       if (rdev->family < CHIP_CAYMAN)
+               obj_size += evergreen_vs_size * 4;
+       else
+               obj_size += cayman_vs_size * 4;
        obj_size = ALIGN(obj_size, 256);
 
        rdev->r600_blit.ps_offset = obj_size;
-       obj_size += evergreen_ps_size * 4;
+       if (rdev->family < CHIP_CAYMAN)
+               obj_size += evergreen_ps_size * 4;
+       else
+               obj_size += cayman_ps_size * 4;
        obj_size = ALIGN(obj_size, 256);
 
        r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
@@ -599,16 +667,29 @@ int evergreen_blit_init(struct radeon_device *rdev)
                return r;
        }
 
-       memcpy_toio(ptr + rdev->r600_blit.state_offset,
-                   evergreen_default_state, rdev->r600_blit.state_len * 4);
-
-       if (num_packet2s)
-               memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
-                           packet2s, num_packet2s * 4);
-       for (i = 0; i < evergreen_vs_size; i++)
-               *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]);
-       for (i = 0; i < evergreen_ps_size; i++)
-               *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]);
+       if (rdev->family < CHIP_CAYMAN) {
+               memcpy_toio(ptr + rdev->r600_blit.state_offset,
+                           evergreen_default_state, rdev->r600_blit.state_len * 4);
+
+               if (num_packet2s)
+                       memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
+                                   packet2s, num_packet2s * 4);
+               for (i = 0; i < evergreen_vs_size; i++)
+                       *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]);
+               for (i = 0; i < evergreen_ps_size; i++)
+                       *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]);
+       } else {
+               memcpy_toio(ptr + rdev->r600_blit.state_offset,
+                           cayman_default_state, rdev->r600_blit.state_len * 4);
+
+               if (num_packet2s)
+                       memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
+                                   packet2s, num_packet2s * 4);
+               for (i = 0; i < cayman_vs_size; i++)
+                       *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(cayman_vs[i]);
+               for (i = 0; i < cayman_ps_size; i++)
+                       *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(cayman_ps[i]);
+       }
        radeon_bo_kunmap(rdev->r600_blit.shader_obj);
        radeon_bo_unreserve(rdev->r600_blit.shader_obj);
 
index f37e91ee8a1114aed16bab49143fd5d1fe206914..1636e34498252c3550e17a6515aa6097974bbf7f 100644 (file)
 #define                SE_DB_BUSY                                      (1 << 30)
 #define                SE_CB_BUSY                                      (1 << 31)
 /* evergreen */
+#define        CG_THERMAL_CTRL                                 0x72c
+#define                TOFFSET_MASK                            0x00003FE0
+#define                TOFFSET_SHIFT                           5
 #define        CG_MULT_THERMAL_STATUS                          0x740
 #define                ASIC_T(x)                               ((x) << 16)
-#define                ASIC_T_MASK                             0x7FF0000
+#define                ASIC_T_MASK                             0x07FF0000
 #define                ASIC_T_SHIFT                            16
+#define        CG_TS0_STATUS                                   0x760
+#define                TS0_ADC_DOUT_MASK                       0x000003FF
+#define                TS0_ADC_DOUT_SHIFT                      0
 /* APU */
 #define        CG_THERMAL_STATUS                               0x678
 
index b205ba1cdd8f9b562a2033c9fec000a7b0b83e7e..16caafeadf5e5603208a3055b2344355106d5a05 100644 (file)
@@ -1387,14 +1387,12 @@ static int cayman_startup(struct radeon_device *rdev)
                return r;
        cayman_gpu_init(rdev);
 
-#if 0
-       r = cayman_blit_init(rdev);
+       r = evergreen_blit_init(rdev);
        if (r) {
-               cayman_blit_fini(rdev);
+               evergreen_blit_fini(rdev);
                rdev->asic->copy = NULL;
                dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
        }
-#endif
 
        /* allocate wb buffer */
        r = radeon_wb_init(rdev);
@@ -1452,7 +1450,7 @@ int cayman_resume(struct radeon_device *rdev)
 
 int cayman_suspend(struct radeon_device *rdev)
 {
-       /* int r; */
+       int r;
 
        /* FIXME: we should wait for ring to be empty */
        cayman_cp_enable(rdev, false);
@@ -1461,14 +1459,13 @@ int cayman_suspend(struct radeon_device *rdev)
        radeon_wb_disable(rdev);
        cayman_pcie_gart_disable(rdev);
 
-#if 0
        /* unpin shaders bo */
        r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
        if (likely(r == 0)) {
                radeon_bo_unpin(rdev->r600_blit.shader_obj);
                radeon_bo_unreserve(rdev->r600_blit.shader_obj);
        }
-#endif
+
        return 0;
 }
 
@@ -1580,7 +1577,7 @@ int cayman_init(struct radeon_device *rdev)
 
 void cayman_fini(struct radeon_device *rdev)
 {
-       /* cayman_blit_fini(rdev); */
+       evergreen_blit_fini(rdev);
        cayman_cp_fini(rdev);
        r600_irq_fini(rdev);
        radeon_wb_fini(rdev);
index 2fef9de7f363b2fb47478e8720ea1a7ca7fb7331..686f9dc5d4bd94f8f8dec4a23de185aee832542a 100644 (file)
@@ -63,7 +63,7 @@ struct r100_cs_track {
        unsigned                        num_arrays;
        unsigned                        max_indx;
        unsigned                        color_channel_mask;
-       struct r100_cs_track_array      arrays[11];
+       struct r100_cs_track_array      arrays[16];
        struct r100_cs_track_cb         cb[R300_MAX_CB];
        struct r100_cs_track_cb         zb;
        struct r100_cs_track_cb         aa;
@@ -146,6 +146,12 @@ static inline int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
        ib = p->ib->ptr;
        track = (struct r100_cs_track *)p->track;
        c = radeon_get_ib_value(p, idx++) & 0x1F;
+       if (c > 16) {
+           DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
+                     pkt->opcode);
+           r100_cs_dump_packet(p, pkt);
+           return -EINVAL;
+       }
        track->num_arrays = c;
        for (i = 0; i < (c - 1); i+=2, idx+=3) {
                r = r100_cs_packet_next_reloc(p, &reloc);
index 6f27593901c7110752b9b2f39267f04546f544eb..7dd45ca64e29464b717a837f04a1be049e5a9292 100644 (file)
@@ -87,6 +87,10 @@ MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
 MODULE_FIRMWARE("radeon/PALM_pfp.bin");
 MODULE_FIRMWARE("radeon/PALM_me.bin");
 MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
+MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
+MODULE_FIRMWARE("radeon/SUMO_me.bin");
+MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
+MODULE_FIRMWARE("radeon/SUMO2_me.bin");
 
 int r600_debugfs_mc_info_init(struct radeon_device *rdev);
 
@@ -2024,6 +2028,14 @@ int r600_init_microcode(struct radeon_device *rdev)
                chip_name = "PALM";
                rlc_chip_name = "SUMO";
                break;
+       case CHIP_SUMO:
+               chip_name = "SUMO";
+               rlc_chip_name = "SUMO";
+               break;
+       case CHIP_SUMO2:
+               chip_name = "SUMO2";
+               rlc_chip_name = "SUMO";
+               break;
        default: BUG();
        }
 
@@ -3432,7 +3444,7 @@ restart_ih:
                        radeon_fence_process(rdev);
                        break;
                case 233: /* GUI IDLE */
-                       DRM_DEBUG("IH: CP EOP\n");
+                       DRM_DEBUG("IH: GUI idle\n");
                        rdev->pm.gui_idle = true;
                        wake_up(&rdev->irq.idle_queue);
                        break;
index fd18be9871ab5b897cc93c4a309a9081653f9489..909bda8dd550c54a9965e787d6876ff3fc325d33 100644 (file)
@@ -71,20 +71,21 @@ struct r600_cs_track {
        u64                     db_bo_mc;
 };
 
-#define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc }
-#define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc }
-#define FMT_24_BIT(fmt) [fmt] = { 1, 1, 3, 0 }
-#define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc }
-#define FMT_48_BIT(fmt) [fmt] = { 1, 1, 6, 0 }
-#define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc }
-#define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0 }
-#define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16, vc }
+#define FMT_8_BIT(fmt, vc)   [fmt] = { 1, 1, 1, vc, CHIP_R600 }
+#define FMT_16_BIT(fmt, vc)  [fmt] = { 1, 1, 2, vc, CHIP_R600 }
+#define FMT_24_BIT(fmt)      [fmt] = { 1, 1, 3,  0, CHIP_R600 }
+#define FMT_32_BIT(fmt, vc)  [fmt] = { 1, 1, 4, vc, CHIP_R600 }
+#define FMT_48_BIT(fmt)      [fmt] = { 1, 1, 6,  0, CHIP_R600 }
+#define FMT_64_BIT(fmt, vc)  [fmt] = { 1, 1, 8, vc, CHIP_R600 }
+#define FMT_96_BIT(fmt)      [fmt] = { 1, 1, 12, 0, CHIP_R600 }
+#define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 }
 
 struct gpu_formats {
        unsigned blockwidth;
        unsigned blockheight;
        unsigned blocksize;
        unsigned valid_color;
+       enum radeon_family min_family;
 };
 
 static const struct gpu_formats color_formats_table[] = {
@@ -154,7 +155,11 @@ static const struct gpu_formats color_formats_table[] = {
        [V_038004_FMT_BC3] = { 4, 4, 16, 0 },
        [V_038004_FMT_BC4] = { 4, 4, 8, 0 },
        [V_038004_FMT_BC5] = { 4, 4, 16, 0},
+       [V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
+       [V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
 
+       /* The other Evergreen formats */
+       [V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR},
 };
 
 static inline bool fmt_is_valid_color(u32 format)
@@ -168,11 +173,14 @@ static inline bool fmt_is_valid_color(u32 format)
        return false;
 }
 
-static inline bool fmt_is_valid_texture(u32 format)
+static inline bool fmt_is_valid_texture(u32 format, enum radeon_family family)
 {
        if (format >= ARRAY_SIZE(color_formats_table))
                return false;
        
+       if (family < color_formats_table[format].min_family)
+               return false;
+
        if (color_formats_table[format].blockwidth > 0)
                return true;
 
@@ -1325,7 +1333,7 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 i
                return -EINVAL;
        }
        format = G_038004_DATA_FORMAT(word1);
-       if (!fmt_is_valid_texture(format)) {
+       if (!fmt_is_valid_texture(format, p->family)) {
                dev_warn(p->dev, "%s:%d texture invalid format %d\n",
                         __func__, __LINE__, format);
                return -EINVAL;
index b2b944bcd05ae53d1ff19aa16edeee941aba3187..f140a0d5cb543c0281c8e8a85f62df873fa51809 100644 (file)
 #define     V_038004_FMT_BC3                           0x00000033
 #define     V_038004_FMT_BC4                           0x00000034
 #define     V_038004_FMT_BC5                           0x00000035
+#define     V_038004_FMT_BC6                           0x00000036
+#define     V_038004_FMT_BC7                           0x00000037
+#define     V_038004_FMT_32_AS_32_32_32_32             0x00000038
 #define R_038010_SQ_TEX_RESOURCE_WORD4_0             0x038010
 #define   S_038010_FORMAT_COMP_X(x)                    (((x) & 0x3) << 0)
 #define   G_038010_FORMAT_COMP_X(x)                    (((x) >> 0) & 0x3)
index ba643b5760542e0e6ad3298464be1e7505995739..27f45579e64ba4d895037db76241814bd24f5d14 100644 (file)
@@ -165,6 +165,7 @@ struct radeon_clock {
        uint32_t default_sclk;
        uint32_t default_dispclk;
        uint32_t dp_extclk;
+       uint32_t max_pixel_clock;
 };
 
 /*
index d948265db87e7140bae6bc1c0fc15005ba2940bd..b2449629537d3b5922e86e325ac85bb214cacea2 100644 (file)
@@ -906,9 +906,9 @@ static struct radeon_asic cayman_asic = {
        .get_vblank_counter = &evergreen_get_vblank_counter,
        .fence_ring_emit = &r600_fence_ring_emit,
        .cs_parse = &evergreen_cs_parse,
-       .copy_blit = NULL,
-       .copy_dma = NULL,
-       .copy = NULL,
+       .copy_blit = &evergreen_copy_blit,
+       .copy_dma = &evergreen_copy_blit,
+       .copy = &evergreen_copy_blit,
        .get_engine_clock = &radeon_atom_get_engine_clock,
        .set_engine_clock = &radeon_atom_set_engine_clock,
        .get_memory_clock = &radeon_atom_get_memory_clock,
@@ -938,6 +938,13 @@ static struct radeon_asic cayman_asic = {
 int radeon_asic_init(struct radeon_device *rdev)
 {
        radeon_register_accessor_init(rdev);
+
+       /* set the number of crtcs */
+       if (rdev->flags & RADEON_SINGLE_CRTC)
+               rdev->num_crtc = 1;
+       else
+               rdev->num_crtc = 2;
+
        switch (rdev->family) {
        case CHIP_R100:
        case CHIP_RV100:
@@ -1017,18 +1024,32 @@ int radeon_asic_init(struct radeon_device *rdev)
        case CHIP_JUNIPER:
        case CHIP_CYPRESS:
        case CHIP_HEMLOCK:
+               /* set num crtcs */
+               if (rdev->family == CHIP_CEDAR)
+                       rdev->num_crtc = 4;
+               else
+                       rdev->num_crtc = 6;
                rdev->asic = &evergreen_asic;
                break;
        case CHIP_PALM:
+       case CHIP_SUMO:
+       case CHIP_SUMO2:
                rdev->asic = &sumo_asic;
                break;
        case CHIP_BARTS:
        case CHIP_TURKS:
        case CHIP_CAICOS:
+               /* set num crtcs */
+               if (rdev->family == CHIP_CAICOS)
+                       rdev->num_crtc = 4;
+               else
+                       rdev->num_crtc = 6;
                rdev->asic = &btc_asic;
                break;
        case CHIP_CAYMAN:
                rdev->asic = &cayman_asic;
+               /* set num crtcs */
+               rdev->num_crtc = 6;
                break;
        default:
                /* FIXME: not supported yet */
@@ -1040,18 +1061,6 @@ int radeon_asic_init(struct radeon_device *rdev)
                rdev->asic->set_memory_clock = NULL;
        }
 
-       /* set the number of crtcs */
-       if (rdev->flags & RADEON_SINGLE_CRTC)
-               rdev->num_crtc = 1;
-       else {
-               if (ASIC_IS_DCE41(rdev))
-                       rdev->num_crtc = 2;
-               else if (ASIC_IS_DCE4(rdev))
-                       rdev->num_crtc = 6;
-               else
-                       rdev->num_crtc = 2;
-       }
-
        return 0;
 }
 
index 90dfb2b8cf0318529b28b84b0bfd831764e5d59e..fa62a503ae70e9daf5b371aa12b2ff1dbdc0a508 100644 (file)
@@ -1246,6 +1246,10 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
                }
                *dcpll = *p1pll;
 
+               rdev->clock.max_pixel_clock = le16_to_cpu(firmware_info->info.usMaxPixelClock);
+               if (rdev->clock.max_pixel_clock == 0)
+                       rdev->clock.max_pixel_clock = 40000;
+
                return true;
        }
 
index 5249af8931e60549e01362102f9c8ca941ba0d24..2d48e7a1474b4d915652cc8c6718dc4c8f4a8db4 100644 (file)
@@ -117,7 +117,7 @@ static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
        p1pll->reference_div = RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff;
        if (p1pll->reference_div < 2)
                p1pll->reference_div = 12;
-       p2pll->reference_div = p1pll->reference_div;    
+       p2pll->reference_div = p1pll->reference_div;
 
        /* These aren't in the device-tree */
        if (rdev->family >= CHIP_R420) {
@@ -139,6 +139,8 @@ static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
                p2pll->pll_out_min = 12500;
                p2pll->pll_out_max = 35000;
        }
+       /* not sure what the max should be in all cases */
+       rdev->clock.max_pixel_clock = 35000;
 
        spll->reference_freq = mpll->reference_freq = p1pll->reference_freq;
        spll->reference_div = mpll->reference_div =
@@ -151,7 +153,7 @@ static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
        else
                rdev->clock.default_sclk =
                        radeon_legacy_get_engine_clock(rdev);
-                       
+
        val = of_get_property(dp, "ATY,MCLK", NULL);
        if (val && *val)
                rdev->clock.default_mclk = (*val) / 10;
@@ -160,7 +162,7 @@ static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
                        radeon_legacy_get_memory_clock(rdev);
 
        DRM_INFO("Using device-tree clock info\n");
-       
+
        return true;
 }
 #else
index 5b991f7c6e2add24f1b4290b270cf1d44282915f..e4594676a07c2c4bdb0c1f3575e91e8e98c8265d 100644 (file)
@@ -866,6 +866,11 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
                rdev->clock.default_sclk = sclk;
                rdev->clock.default_mclk = mclk;
 
+               if (RBIOS32(pll_info + 0x16))
+                       rdev->clock.max_pixel_clock = RBIOS32(pll_info + 0x16);
+               else
+                       rdev->clock.max_pixel_clock = 35000; /* might need something asic specific */
+
                return true;
        }
        return false;
@@ -1548,10 +1553,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                           (rdev->pdev->subsystem_device == 0x4a48)) {
                        /* Mac X800 */
                        rdev->mode_info.connector_table = CT_MAC_X800;
-               } else if ((rdev->pdev->device == 0x4150) &&
+               } else if ((of_machine_is_compatible("PowerMac7,2") ||
+                           of_machine_is_compatible("PowerMac7,3")) &&
+                          (rdev->pdev->device == 0x4150) &&
                           (rdev->pdev->subsystem_vendor == 0x1002) &&
                           (rdev->pdev->subsystem_device == 0x4150)) {
-                       /* Mac G5 9600 */
+                       /* Mac G5 tower 9600 */
                        rdev->mode_info.connector_table = CT_MAC_G5_9600;
                } else
 #endif /* CONFIG_PPC_PMAC */
index ee1dccb3fec9792e721c17aae9b0eb81d832c396..cbfca3a24fdf9caac828d4cca8ce8445963af4c4 100644 (file)
@@ -44,6 +44,8 @@ extern void
 radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
                             struct drm_connector *drm_connector);
 
+bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector);
+
 void radeon_connector_hotplug(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
@@ -626,8 +628,14 @@ static int radeon_vga_get_modes(struct drm_connector *connector)
 static int radeon_vga_mode_valid(struct drm_connector *connector,
                                  struct drm_display_mode *mode)
 {
+       struct drm_device *dev = connector->dev;
+       struct radeon_device *rdev = dev->dev_private;
+
        /* XXX check mode bandwidth */
-       /* XXX verify against max DAC output frequency */
+
+       if ((mode->clock / 10) > rdev->clock.max_pixel_clock)
+               return MODE_CLOCK_HIGH;
+
        return MODE_OK;
 }
 
@@ -830,6 +838,13 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
                if (!radeon_connector->edid) {
                        DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
                                        drm_get_connector_name(connector));
+                       /* rs690 seems to have a problem with connectors not existing and always
+                        * return a block of 0's. If we see this just stop polling on this output */
+                       if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) && radeon_connector->base.null_edid_counter) {
+                               ret = connector_status_disconnected;
+                               DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n", drm_get_connector_name(connector));
+                               radeon_connector->ddc_bus = NULL;
+                       }
                } else {
                        radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
 
@@ -1015,6 +1030,11 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
                } else
                        return MODE_CLOCK_HIGH;
        }
+
+       /* check against the max pixel clock */
+       if ((mode->clock / 10) > rdev->clock.max_pixel_clock)
+               return MODE_CLOCK_HIGH;
+
        return MODE_OK;
 }
 
@@ -1052,10 +1072,11 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
 {
        struct radeon_connector *radeon_connector = to_radeon_connector(connector);
        struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+       struct drm_encoder *encoder = radeon_best_single_encoder(connector);
        int ret;
 
-       if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
-               struct drm_encoder *encoder;
+       if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
+           (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
                struct drm_display_mode *mode;
 
                if (!radeon_dig_connector->edp_on)
@@ -1067,7 +1088,6 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
                                                     ATOM_TRANSMITTER_ACTION_POWER_OFF);
 
                if (ret > 0) {
-                       encoder = radeon_best_single_encoder(connector);
                        if (encoder) {
                                radeon_fixup_lvds_native_mode(encoder, connector);
                                /* add scaled modes */
@@ -1091,8 +1111,14 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
                        /* add scaled modes */
                        radeon_add_common_modes(encoder, connector);
                }
-       } else
+       } else {
+               /* need to setup ddc on the bridge */
+               if (radeon_connector_encoder_is_dp_bridge(connector)) {
+                       if (encoder)
+                               radeon_atom_ext_encoder_setup_ddc(encoder);
+               }
                ret = radeon_ddc_get_modes(radeon_connector);
+       }
 
        return ret;
 }
@@ -1176,14 +1202,15 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
        struct radeon_connector *radeon_connector = to_radeon_connector(connector);
        enum drm_connector_status ret = connector_status_disconnected;
        struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+       struct drm_encoder *encoder = radeon_best_single_encoder(connector);
 
        if (radeon_connector->edid) {
                kfree(radeon_connector->edid);
                radeon_connector->edid = NULL;
        }
 
-       if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
-               struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+       if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
+           (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
                if (encoder) {
                        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
                        struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
@@ -1203,6 +1230,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
                        atombios_set_edp_panel_power(connector,
                                                     ATOM_TRANSMITTER_ACTION_POWER_OFF);
        } else {
+               /* need to setup ddc on the bridge */
+               if (radeon_connector_encoder_is_dp_bridge(connector)) {
+                       if (encoder)
+                               radeon_atom_ext_encoder_setup_ddc(encoder);
+               }
                radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
                if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
                        ret = connector_status_connected;
@@ -1217,6 +1249,16 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
                                        ret = connector_status_connected;
                        }
                }
+
+               if ((ret == connector_status_disconnected) &&
+                   radeon_connector->dac_load_detect) {
+                       struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+                       struct drm_encoder_helper_funcs *encoder_funcs;
+                       if (encoder) {
+                               encoder_funcs = encoder->helper_private;
+                               ret = encoder_funcs->detect(encoder, connector);
+                       }
+               }
        }
 
        radeon_connector_update_scratch_regs(connector, ret);
@@ -1231,7 +1273,8 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
 
        /* XXX check mode bandwidth */
 
-       if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+       if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
+           (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
                struct drm_encoder *encoder = radeon_best_single_encoder(connector);
 
                if ((mode->hdisplay < 320) || (mode->vdisplay < 240))
@@ -1241,7 +1284,7 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
                        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
                        struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
 
-               /* AVIVO hardware supports downscaling modes larger than the panel
+                       /* AVIVO hardware supports downscaling modes larger than the panel
                         * to the panel size, but I'm not sure this is desirable.
                         */
                        if ((mode->hdisplay > native_mode->hdisplay) ||
@@ -1390,6 +1433,10 @@ radeon_add_atom_connector(struct drm_device *dev,
                default:
                        connector->interlace_allowed = true;
                        connector->doublescan_allowed = true;
+                       radeon_connector->dac_load_detect = true;
+                       drm_connector_attach_property(&radeon_connector->base,
+                                                     rdev->mode_info.load_detect_property,
+                                                     1);
                        break;
                case DRM_MODE_CONNECTOR_DVII:
                case DRM_MODE_CONNECTOR_DVID:
@@ -1411,6 +1458,12 @@ radeon_add_atom_connector(struct drm_device *dev,
                                connector->doublescan_allowed = true;
                        else
                                connector->doublescan_allowed = false;
+                       if (connector_type == DRM_MODE_CONNECTOR_DVII) {
+                               radeon_connector->dac_load_detect = true;
+                               drm_connector_attach_property(&radeon_connector->base,
+                                                             rdev->mode_info.load_detect_property,
+                                                             1);
+                       }
                        break;
                case DRM_MODE_CONNECTOR_LVDS:
                case DRM_MODE_CONNECTOR_eDP:
index 8c1916941871d4bd10b0c3fda53eecdaad3e96cd..fae00c0d75aaf1fae7fcbfc9b504370f560f5d42 100644 (file)
@@ -228,6 +228,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
        parser.filp = filp;
        parser.rdev = rdev;
        parser.dev = rdev->dev;
+       parser.family = rdev->family;
        r = radeon_cs_parser_init(&parser, data);
        if (r) {
                DRM_ERROR("Failed to initialize parser !\n");
index 5b61364e31f4b2be689c481969d62302f1f2e5c0..7cfaa7e2f3b57195671beb6958c8447f9f108683 100644 (file)
@@ -82,6 +82,8 @@ static const char radeon_family_name[][16] = {
        "CYPRESS",
        "HEMLOCK",
        "PALM",
+       "SUMO",
+       "SUMO2",
        "BARTS",
        "TURKS",
        "CAICOS",
@@ -213,6 +215,8 @@ int radeon_wb_init(struct radeon_device *rdev)
                return r;
        }
 
+       /* clear wb memory */
+       memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
        /* disable event_write fences */
        rdev->wb.use_event = false;
        /* disabled via module param */
@@ -752,6 +756,7 @@ int radeon_device_init(struct radeon_device *rdev,
        dma_bits = rdev->need_dma32 ? 32 : 40;
        r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
        if (r) {
+               rdev->need_dma32 = true;
                printk(KERN_WARNING "radeon: No suitable DMA available.\n");
        }
 
index ae247eec87c0be7181b4649ee4fc79d890c9b19a..292f73f0ddbd51210b8bf3494acdd79dc3d54719 100644 (file)
@@ -264,6 +264,8 @@ static void radeon_unpin_work_func(struct work_struct *__work)
                radeon_bo_unreserve(work->old_rbo);
        } else
                DRM_ERROR("failed to reserve buffer after flip\n");
+
+       drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
        kfree(work);
 }
 
@@ -371,6 +373,8 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
        new_radeon_fb = to_radeon_framebuffer(fb);
        /* schedule unpin of the old buffer */
        obj = old_radeon_fb->obj;
+       /* take a reference to the old object */
+       drm_gem_object_reference(obj);
        rbo = gem_to_radeon_bo(obj);
        work->old_rbo = rbo;
        INIT_WORK(&work->work, radeon_unpin_work_func);
@@ -378,12 +382,9 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
        /* We borrow the event spin lock for protecting unpin_work */
        spin_lock_irqsave(&dev->event_lock, flags);
        if (radeon_crtc->unpin_work) {
-               spin_unlock_irqrestore(&dev->event_lock, flags);
-               kfree(work);
-               radeon_fence_unref(&fence);
-
                DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
-               return -EBUSY;
+               r = -EBUSY;
+               goto unlock_free;
        }
        radeon_crtc->unpin_work = work;
        radeon_crtc->deferred_flip_completion = 0;
@@ -497,6 +498,8 @@ pflip_cleanup1:
 pflip_cleanup:
        spin_lock_irqsave(&dev->event_lock, flags);
        radeon_crtc->unpin_work = NULL;
+unlock_free:
+       drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
        spin_unlock_irqrestore(&dev->event_lock, flags);
        radeon_fence_unref(&fence);
        kfree(work);
index 1d330606292feb8d2bec2fafc1ea3593bb82a980..73dfbe8e5f9ed7d750a424c5fa9aa7a8ed6ab025 100644 (file)
@@ -113,7 +113,7 @@ int radeon_benchmarking = 0;
 int radeon_testing = 0;
 int radeon_connector_table = 0;
 int radeon_tv = 1;
-int radeon_audio = 1;
+int radeon_audio = 0;
 int radeon_disp_priority = 0;
 int radeon_hw_i2c = 0;
 int radeon_pcie_gen2 = 0;
@@ -151,7 +151,7 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
 MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
 module_param_named(tv, radeon_tv, int, 0444);
 
-MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
+MODULE_PARM_DESC(audio, "Audio enable (1 = enable)");
 module_param_named(audio, radeon_audio, int, 0444);
 
 MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
index 1b557554696e09b871486236b8449e7e2f131311..f55b64cb59d1d0170b25e35605354bca56bfd3ab 100644 (file)
@@ -367,7 +367,8 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
        }
 
        if (ASIC_IS_DCE3(rdev) &&
-           (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT))) {
+           ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
+            radeon_encoder_is_dp_bridge(encoder))) {
                struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
                radeon_dp_set_link_config(connector, mode);
        }
@@ -660,21 +661,16 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
        if (radeon_encoder_is_dp_bridge(encoder))
                return ATOM_ENCODER_MODE_DP;
 
+       /* DVO is always DVO */
+       if (radeon_encoder->encoder_id == ATOM_ENCODER_MODE_DVO)
+               return ATOM_ENCODER_MODE_DVO;
+
        connector = radeon_get_connector_for_encoder(encoder);
-       if (!connector) {
-               switch (radeon_encoder->encoder_id) {
-               case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
-               case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
-               case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
-               case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
-               case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
-                       return ATOM_ENCODER_MODE_DVI;
-               case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
-               case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
-               default:
-                       return ATOM_ENCODER_MODE_CRT;
-               }
-       }
+       /* if we don't have an active device yet, just use one of
+        * the connectors tied to the encoder.
+        */
+       if (!connector)
+               connector = radeon_get_connector_for_encoder_init(encoder);
        radeon_connector = to_radeon_connector(connector);
 
        switch (connector->connector_type) {
@@ -954,10 +950,15 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
        int dp_lane_count = 0;
        int connector_object_id = 0;
        int igp_lane_info = 0;
+       int dig_encoder = dig->dig_encoder;
 
-       if (action == ATOM_TRANSMITTER_ACTION_INIT)
+       if (action == ATOM_TRANSMITTER_ACTION_INIT) {
                connector = radeon_get_connector_for_encoder_init(encoder);
-       else
+               /* just needed to avoid bailing in the encoder check.  the encoder
+                * isn't used for init
+                */
+               dig_encoder = 0;
+       } else
                connector = radeon_get_connector_for_encoder(encoder);
 
        if (connector) {
@@ -973,7 +974,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
        }
 
        /* no dig encoder assigned */
-       if (dig->dig_encoder == -1)
+       if (dig_encoder == -1)
                return;
 
        if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
@@ -1023,7 +1024,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
 
                if (dig->linkb)
                        args.v3.acConfig.ucLinkSel = 1;
-               if (dig->dig_encoder & 1)
+               if (dig_encoder & 1)
                        args.v3.acConfig.ucEncoderSel = 1;
 
                /* Select the PLL for the PHY
@@ -1073,7 +1074,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
                                args.v3.acConfig.fDualLinkConnector = 1;
                }
        } else if (ASIC_IS_DCE32(rdev)) {
-               args.v2.acConfig.ucEncoderSel = dig->dig_encoder;
+               args.v2.acConfig.ucEncoderSel = dig_encoder;
                if (dig->linkb)
                        args.v2.acConfig.ucLinkSel = 1;
 
@@ -1100,7 +1101,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
        } else {
                args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
 
-               if (dig->dig_encoder)
+               if (dig_encoder)
                        args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
                else
                        args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
@@ -1521,26 +1522,29 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
        }
 
        if (ext_encoder) {
-               int action;
-
                switch (mode) {
                case DRM_MODE_DPMS_ON:
                default:
-                       if (ASIC_IS_DCE41(rdev))
-                               action = EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT;
-                       else
-                               action = ATOM_ENABLE;
+                       if (ASIC_IS_DCE41(rdev)) {
+                               atombios_external_encoder_setup(encoder, ext_encoder,
+                                                               EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT);
+                               atombios_external_encoder_setup(encoder, ext_encoder,
+                                                               EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF);
+                       } else
+                               atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
                        break;
                case DRM_MODE_DPMS_STANDBY:
                case DRM_MODE_DPMS_SUSPEND:
                case DRM_MODE_DPMS_OFF:
-                       if (ASIC_IS_DCE41(rdev))
-                               action = EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT;
-                       else
-                               action = ATOM_DISABLE;
+                       if (ASIC_IS_DCE41(rdev)) {
+                               atombios_external_encoder_setup(encoder, ext_encoder,
+                                                               EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING);
+                               atombios_external_encoder_setup(encoder, ext_encoder,
+                                                               EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT);
+                       } else
+                               atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE);
                        break;
                }
-               atombios_external_encoder_setup(encoder, ext_encoder, action);
        }
 
        radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
@@ -1999,6 +2003,65 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec
        return connector_status_disconnected;
 }
 
+static enum drm_connector_status
+radeon_atom_dig_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+       struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
+       u32 bios_0_scratch;
+
+       if (!ASIC_IS_DCE4(rdev))
+               return connector_status_unknown;
+
+       if (!ext_encoder)
+               return connector_status_unknown;
+
+       if ((radeon_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0)
+               return connector_status_unknown;
+
+       /* load detect on the dp bridge */
+       atombios_external_encoder_setup(encoder, ext_encoder,
+                                       EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION);
+
+       bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
+
+       DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
+       if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+               if (bios_0_scratch & ATOM_S0_CRT1_MASK)
+                       return connector_status_connected;
+       }
+       if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+               if (bios_0_scratch & ATOM_S0_CRT2_MASK)
+                       return connector_status_connected;
+       }
+       if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
+               if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
+                       return connector_status_connected;
+       }
+       if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
+               if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
+                       return connector_status_connected; /* CTV */
+               else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
+                       return connector_status_connected; /* STV */
+       }
+       return connector_status_disconnected;
+}
+
+void
+radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder)
+{
+       struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
+
+       if (ext_encoder)
+               /* ddc_setup on the dp bridge */
+               atombios_external_encoder_setup(encoder, ext_encoder,
+                                               EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP);
+
+}
+
 static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
 {
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -2162,7 +2225,7 @@ static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = {
        .mode_set = radeon_atom_encoder_mode_set,
        .commit = radeon_atom_encoder_commit,
        .disable = radeon_atom_encoder_disable,
-       /* no detect for TMDS/LVDS yet */
+       .detect = radeon_atom_dig_detect,
 };
 
 static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = {
index 6f1d9e563e7701f4f6817769e9ea02955ac70f96..ec2f1ea84f81e6c292543aa1e1a56433a19a253f 100644 (file)
@@ -81,6 +81,8 @@ enum radeon_family {
        CHIP_CYPRESS,
        CHIP_HEMLOCK,
        CHIP_PALM,
+       CHIP_SUMO,
+       CHIP_SUMO2,
        CHIP_BARTS,
        CHIP_TURKS,
        CHIP_CAICOS,
index 1f822943657017b3684d6204ae19c76abd95ae6a..021d2b6b556f9e837d2c8a508891c4b6bc19850e 100644 (file)
 #include "radeon.h"
 #include "radeon_trace.h"
 
+static void radeon_fence_write(struct radeon_device *rdev, u32 seq)
+{
+       if (rdev->wb.enabled) {
+               u32 scratch_index;
+               if (rdev->wb.use_event)
+                       scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
+               else
+                       scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
+               rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);;
+       } else
+               WREG32(rdev->fence_drv.scratch_reg, seq);
+}
+
+static u32 radeon_fence_read(struct radeon_device *rdev)
+{
+       u32 seq;
+
+       if (rdev->wb.enabled) {
+               u32 scratch_index;
+               if (rdev->wb.use_event)
+                       scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
+               else
+                       scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
+               seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
+       } else
+               seq = RREG32(rdev->fence_drv.scratch_reg);
+       return seq;
+}
+
 int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
 {
        unsigned long irq_flags;
@@ -50,12 +79,12 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
                return 0;
        }
        fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
-       if (!rdev->cp.ready) {
+       if (!rdev->cp.ready)
                /* FIXME: cp is not running assume everythings is done right
                 * away
                 */
-               WREG32(rdev->fence_drv.scratch_reg, fence->seq);
-       else
+               radeon_fence_write(rdev, fence->seq);
+       else
                radeon_fence_ring_emit(rdev, fence);
 
        trace_radeon_fence_emit(rdev->ddev, fence->seq);
@@ -73,15 +102,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
        bool wake = false;
        unsigned long cjiffies;
 
-       if (rdev->wb.enabled) {
-               u32 scratch_index;
-               if (rdev->wb.use_event)
-                       scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
-               else
-                       scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
-               seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
-       } else
-               seq = RREG32(rdev->fence_drv.scratch_reg);
+       seq = radeon_fence_read(rdev);
        if (seq != rdev->fence_drv.last_seq) {
                rdev->fence_drv.last_seq = seq;
                rdev->fence_drv.last_jiffies = jiffies;
@@ -251,7 +272,7 @@ retry:
                        r = radeon_gpu_reset(rdev);
                        if (r)
                                return r;
-                       WREG32(rdev->fence_drv.scratch_reg, fence->seq);
+                       radeon_fence_write(rdev, fence->seq);
                        rdev->gpu_lockup = false;
                }
                timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
@@ -351,7 +372,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
                write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
                return r;
        }
-       WREG32(rdev->fence_drv.scratch_reg, 0);
+       radeon_fence_write(rdev, 0);
        atomic_set(&rdev->fence_drv.seq, 0);
        INIT_LIST_HEAD(&rdev->fence_drv.created);
        INIT_LIST_HEAD(&rdev->fence_drv.emited);
@@ -391,7 +412,7 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
        struct radeon_fence *fence;
 
        seq_printf(m, "Last signaled fence 0x%08X\n",
-                  RREG32(rdev->fence_drv.scratch_reg));
+                  radeon_fence_read(rdev));
        if (!list_empty(&rdev->fence_drv.emited)) {
                   fence = list_entry(rdev->fence_drv.emited.prev,
                                      struct radeon_fence, list);
index 977a341266b6d2d5c52fa5073c7f1a21a8dcdd37..6df4e3cec0c23cdced34b0da862eddd6d08c2c88 100644 (file)
@@ -483,6 +483,8 @@ extern void radeon_atom_encoder_init(struct radeon_device *rdev);
 extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
                                           int action, uint8_t lane_num,
                                           uint8_t lane_set);
+extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder);
+extern struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder);
 extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
                                u8 write_byte, u8 *read_byte);
 
index 86eda1ea94dfc7a250d7add83ad2b25b8ae0dd76..aaa19dc418a0f0bf194d789592de943d3e5c2dca 100644 (file)
@@ -487,6 +487,7 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
        case THERMAL_TYPE_RV6XX:
        case THERMAL_TYPE_RV770:
        case THERMAL_TYPE_EVERGREEN:
+       case THERMAL_TYPE_NI:
        case THERMAL_TYPE_SUMO:
                rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
                if (IS_ERR(rdev->pm.int_hwmon_dev)) {
index 92f1900dc7caf10f14dba34edec2e34b721ff09d..ea49752ee99c472a15025efb2335c9a03394ce61 100644 (file)
@@ -758,6 +758,5 @@ r600 0x9400
 0x00009714 VC_ENHANCE
 0x00009830 DB_DEBUG
 0x00009838 DB_WATERMARKS
-0x00028D28 DB_SRESULTS_COMPARE_STATE0
 0x00028D44 DB_ALPHA_TO_MASK
 0x00009700 VC_CNTL
index bf5f83ea14fe19819874311b66618957859286f2..cb1ee4e0050ade2a0da63d4b8cdac714f10c1aff 100644 (file)
@@ -647,9 +647,6 @@ int savage_driver_firstopen(struct drm_device *dev)
        ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE,
                         _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING,
                         &dev_priv->aperture);
-       if (ret)
-               return ret;
-
        return ret;
 }
 
index 67d2a7585934c31b3607710c675e531acb21e6a6..36ca465c00cefac036a22fad61fc70b2568a1ccc 100644 (file)
@@ -305,6 +305,7 @@ config HID_MULTITOUCH
          - 3M PCT touch screens
          - ActionStar dual touch panels
          - Cando dual touch panels
+         - Chunghwa panels
          - CVTouch panels
          - Cypress TrueTouch panels
          - Elo TouchSystems IntelliTouch Plus panels
index c957c4b4fe703368a4d19e739277b5c9bd41383b..f7440e8ce3e77e299796e131bfcf4684ee36de44 100644 (file)
@@ -1359,6 +1359,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT, USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CVTOUCH, USB_DEVICE_ID_CVTOUCH_SCREEN) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
index 0b374a6d6db007853f6762ef489e9191a5e1258e..aecb5a4b8d6d91e80fe4eb6398bf5992ea6a7f9d 100644 (file)
 #define USB_DEVICE_ID_CHICONY_MULTI_TOUCH      0xb19d
 #define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618
 
+#define USB_VENDOR_ID_CHUNGHWAT                0x2247
+#define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH     0x0001
+
 #define USB_VENDOR_ID_CIDC             0x1677
 
 #define USB_VENDOR_ID_CMEDIA           0x0d8c
 #define USB_VENDOR_ID_UCLOGIC          0x5543
 #define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209    0x0042
 #define USB_DEVICE_ID_UCLOGIC_TABLET_KNA5      0x6001
+#define USB_DEVICE_ID_UCLOGIC_TABLET_TWA60     0x0064
 #define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U   0x0003
 #define USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U   0x0004
 #define USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U   0x0005
index a5eda4c8127a274ee20343d26a82383135c3048b..0ec91c18a4216a52a4a3c7d85292a22f61a6e828 100644 (file)
@@ -501,17 +501,9 @@ static int magicmouse_probe(struct hid_device *hdev,
        }
        report->size = 6;
 
-       /*
-        * The device reponds with 'invalid report id' when feature
-        * report switching it into multitouch mode is sent to it.
-        *
-        * This results in -EIO from the _raw low-level transport callback,
-        * but there seems to be no other way of switching the mode.
-        * Thus the super-ugly hacky success check below.
-        */
        ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature),
                        HID_FEATURE_REPORT);
-       if (ret != -EIO) {
+       if (ret != sizeof(feature)) {
                hid_err(hdev, "unable to request touch data (%d)\n", ret);
                goto err_stop_hw;
        }
index ecd4d2db9e800ca2397c3ad18945d30b2080a028..0b2dcd0ee591f29fa8d1b6e14344c4ed2743cc9a 100644 (file)
@@ -64,6 +64,7 @@ struct mt_device {
        struct mt_class *mtclass;       /* our mt device class */
        unsigned last_field_index;      /* last field index of the report */
        unsigned last_slot_field;       /* the last field of a slot */
+       int last_mt_collection; /* last known mt-related collection */
        __s8 inputmode;         /* InputMode HID feature, -1 if non-existent */
        __u8 num_received;      /* how many contacts we received */
        __u8 num_expected;      /* expected last contact index */
@@ -225,8 +226,10 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                                cls->sn_move);
                        /* touchscreen emulation */
                        set_abs(hi->input, ABS_X, field, cls->sn_move);
-                       td->last_slot_field = usage->hid;
-                       td->last_field_index = field->index;
+                       if (td->last_mt_collection == usage->collection_index) {
+                               td->last_slot_field = usage->hid;
+                               td->last_field_index = field->index;
+                       }
                        return 1;
                case HID_GD_Y:
                        if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP)
@@ -237,8 +240,10 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                                cls->sn_move);
                        /* touchscreen emulation */
                        set_abs(hi->input, ABS_Y, field, cls->sn_move);
-                       td->last_slot_field = usage->hid;
-                       td->last_field_index = field->index;
+                       if (td->last_mt_collection == usage->collection_index) {
+                               td->last_slot_field = usage->hid;
+                               td->last_field_index = field->index;
+                       }
                        return 1;
                }
                return 0;
@@ -246,31 +251,40 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
        case HID_UP_DIGITIZER:
                switch (usage->hid) {
                case HID_DG_INRANGE:
-                       td->last_slot_field = usage->hid;
-                       td->last_field_index = field->index;
+                       if (td->last_mt_collection == usage->collection_index) {
+                               td->last_slot_field = usage->hid;
+                               td->last_field_index = field->index;
+                       }
                        return 1;
                case HID_DG_CONFIDENCE:
-                       td->last_slot_field = usage->hid;
-                       td->last_field_index = field->index;
+                       if (td->last_mt_collection == usage->collection_index) {
+                               td->last_slot_field = usage->hid;
+                               td->last_field_index = field->index;
+                       }
                        return 1;
                case HID_DG_TIPSWITCH:
                        hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
                        input_set_capability(hi->input, EV_KEY, BTN_TOUCH);
-                       td->last_slot_field = usage->hid;
-                       td->last_field_index = field->index;
+                       if (td->last_mt_collection == usage->collection_index) {
+                               td->last_slot_field = usage->hid;
+                               td->last_field_index = field->index;
+                       }
                        return 1;
                case HID_DG_CONTACTID:
                        input_mt_init_slots(hi->input, td->maxcontacts);
                        td->last_slot_field = usage->hid;
                        td->last_field_index = field->index;
+                       td->last_mt_collection = usage->collection_index;
                        return 1;
                case HID_DG_WIDTH:
                        hid_map_usage(hi, usage, bit, max,
                                        EV_ABS, ABS_MT_TOUCH_MAJOR);
                        set_abs(hi->input, ABS_MT_TOUCH_MAJOR, field,
                                cls->sn_width);
-                       td->last_slot_field = usage->hid;
-                       td->last_field_index = field->index;
+                       if (td->last_mt_collection == usage->collection_index) {
+                               td->last_slot_field = usage->hid;
+                               td->last_field_index = field->index;
+                       }
                        return 1;
                case HID_DG_HEIGHT:
                        hid_map_usage(hi, usage, bit, max,
@@ -279,8 +293,10 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                                cls->sn_height);
                        input_set_abs_params(hi->input,
                                        ABS_MT_ORIENTATION, 0, 1, 0, 0);
-                       td->last_slot_field = usage->hid;
-                       td->last_field_index = field->index;
+                       if (td->last_mt_collection == usage->collection_index) {
+                               td->last_slot_field = usage->hid;
+                               td->last_field_index = field->index;
+                       }
                        return 1;
                case HID_DG_TIPPRESSURE:
                        if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP)
@@ -292,16 +308,20 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                        /* touchscreen emulation */
                        set_abs(hi->input, ABS_PRESSURE, field,
                                cls->sn_pressure);
-                       td->last_slot_field = usage->hid;
-                       td->last_field_index = field->index;
+                       if (td->last_mt_collection == usage->collection_index) {
+                               td->last_slot_field = usage->hid;
+                               td->last_field_index = field->index;
+                       }
                        return 1;
                case HID_DG_CONTACTCOUNT:
-                       td->last_field_index = field->index;
+                       if (td->last_mt_collection == usage->collection_index)
+                               td->last_field_index = field->index;
                        return 1;
                case HID_DG_CONTACTMAX:
                        /* we don't set td->last_slot_field as contactcount and
                         * contact max are global to the report */
-                       td->last_field_index = field->index;
+                       if (td->last_mt_collection == usage->collection_index)
+                               td->last_field_index = field->index;
                        return -1;
                }
                /* let hid-input decide for the others */
@@ -516,6 +536,7 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
        }
        td->mtclass = mtclass;
        td->inputmode = -1;
+       td->last_mt_collection = -1;
        hid_set_drvdata(hdev, td);
 
        ret = hid_parse(hdev);
@@ -593,6 +614,11 @@ static const struct hid_device_id mt_devices[] = {
                HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
                        USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) },
 
+       /* Chunghwa Telecom touch panels */
+       {  .driver_data = MT_CLS_DEFAULT,
+               HID_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT,
+                       USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
+
        /* CVTouch panels */
        { .driver_data = MT_CLS_DEFAULT,
                HID_USB_DEVICE(USB_VENDOR_ID_CVTOUCH,
index 0e30b140edca173d3fb47b4d2b756ad7c53f9f43..621959d5cc42c6b6798328fe32ce32072fa8669a 100644 (file)
@@ -74,6 +74,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT },
+       { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH, HID_QUIRK_MULTI_INPUT },
index ff3c644888b1ba8b0bf14e2426313b66db3688c0..7c1188b53c3ec0ea4aca4364239d9e3596cb7cfa 100644 (file)
@@ -248,12 +248,15 @@ static int hiddev_release(struct inode * inode, struct file * file)
                        usbhid_close(list->hiddev->hid);
                        usbhid_put_power(list->hiddev->hid);
                } else {
+                       mutex_unlock(&list->hiddev->existancelock);
                        kfree(list->hiddev);
+                       kfree(list);
+                       return 0;
                }
        }
 
-       kfree(list);
        mutex_unlock(&list->hiddev->existancelock);
+       kfree(list);
 
        return 0;
 }
@@ -923,10 +926,11 @@ void hiddev_disconnect(struct hid_device *hid)
        usb_deregister_dev(usbhid->intf, &hiddev_class);
 
        if (hiddev->open) {
+               mutex_unlock(&hiddev->existancelock);
                usbhid_close(hiddev->hid);
                wake_up_interruptible(&hiddev->wait);
        } else {
+               mutex_unlock(&hiddev->existancelock);
                kfree(hiddev);
        }
-       mutex_unlock(&hiddev->existancelock);
 }
index b5e892017e0c57497f7726d59812a56144f35ea8..dcb78a7a804754956035f2c57e2e1f8673ac45df 100644 (file)
@@ -268,6 +268,7 @@ static struct device_attribute atk_name_attr =
 static void atk_init_attribute(struct device_attribute *attr, char *name,
                sysfs_show_func show)
 {
+       sysfs_attr_init(&attr->attr);
        attr->attr.name = name;
        attr->attr.mode = 0444;
        attr->show = show;
@@ -1188,19 +1189,15 @@ static int atk_create_files(struct atk_data *data)
        int err;
 
        list_for_each_entry(s, &data->sensor_list, list) {
-               sysfs_attr_init(&s->input_attr.attr);
                err = device_create_file(data->hwmon_dev, &s->input_attr);
                if (err)
                        return err;
-               sysfs_attr_init(&s->label_attr.attr);
                err = device_create_file(data->hwmon_dev, &s->label_attr);
                if (err)
                        return err;
-               sysfs_attr_init(&s->limit1_attr.attr);
                err = device_create_file(data->hwmon_dev, &s->limit1_attr);
                if (err)
                        return err;
-               sysfs_attr_init(&s->limit2_attr.attr);
                err = device_create_file(data->hwmon_dev, &s->limit2_attr);
                if (err)
                        return err;
index de3d2465fe24eeabe835b9f76cc34eeb2b106d98..0070d5476dd0b5ee96bba29259c821d5419bf117 100644 (file)
@@ -97,9 +97,7 @@ struct platform_data {
 struct pdev_entry {
        struct list_head list;
        struct platform_device *pdev;
-       unsigned int cpu;
        u16 phys_proc_id;
-       u16 cpu_core_id;
 };
 
 static LIST_HEAD(pdev_list);
@@ -296,7 +294,7 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
                 * If the TjMax is not plausible, an assumption
                 * will be used
                 */
-               if (val > 80 && val < 120) {
+               if (val) {
                        dev_info(dev, "TjMax is %d C.\n", val);
                        return val * 1000;
                }
@@ -304,24 +302,9 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
 
        /*
         * An assumption is made for early CPUs and unreadable MSR.
-        * NOTE: the given value may not be correct.
+        * NOTE: the calculated value may not be correct.
         */
-
-       switch (c->x86_model) {
-       case 0xe:
-       case 0xf:
-       case 0x16:
-       case 0x1a:
-               dev_warn(dev, "TjMax is assumed as 100 C!\n");
-               return 100000;
-       case 0x17:
-       case 0x1c:              /* Atom CPUs */
-               return adjust_tjmax(c, id, dev);
-       default:
-               dev_warn(dev, "CPU (model=0x%x) is not supported yet,"
-                       " using default TjMax of 100C.\n", c->x86_model);
-               return 100000;
-       }
+       return adjust_tjmax(c, id, dev);
 }
 
 static void __devinit get_ucode_rev_on_cpu(void *edx)
@@ -341,7 +324,7 @@ static int get_pkg_tjmax(unsigned int cpu, struct device *dev)
        err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
        if (!err) {
                val = (eax >> 16) & 0xff;
-               if (val > 80 && val < 120)
+               if (val)
                        return val * 1000;
        }
        dev_warn(dev, "Unable to read Pkg-TjMax from CPU:%u\n", cpu);
@@ -668,9 +651,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
        }
 
        pdev_entry->pdev = pdev;
-       pdev_entry->cpu = cpu;
        pdev_entry->phys_proc_id = TO_PHYS_ID(cpu);
-       pdev_entry->cpu_core_id = TO_CORE_ID(cpu);
 
        list_add_tail(&pdev_entry->list, &pdev_list);
        mutex_unlock(&pdev_list_mutex);
index 537409d07ee730b55e358fcb1e9e6d515d27e1e8..1a409c5bc9bce687922ce5389150599c6e928f39 100644 (file)
@@ -947,6 +947,7 @@ static int aem_register_sensors(struct aem_data *data,
 
        /* Set up read-only sensors */
        while (ro->label) {
+               sysfs_attr_init(&sensors->dev_attr.attr);
                sensors->dev_attr.attr.name = ro->label;
                sensors->dev_attr.attr.mode = S_IRUGO;
                sensors->dev_attr.show = ro->show;
@@ -963,6 +964,7 @@ static int aem_register_sensors(struct aem_data *data,
 
        /* Set up read-write sensors */
        while (rw->label) {
+               sysfs_attr_init(&sensors->dev_attr.attr);
                sensors->dev_attr.attr.name = rw->label;
                sensors->dev_attr.attr.mode = S_IRUGO | S_IWUSR;
                sensors->dev_attr.show = rw->show;
index 06d4eafcf76b231fdc74e0418341e496387b78d9..41dbf8161ed7b4fb1bc29811c61f006152c9a41c 100644 (file)
@@ -358,6 +358,7 @@ static int create_sensor(struct ibmpex_bmc_data *data, int type,
        else if (type == POWER_SENSOR)
                sprintf(n, power_sensor_name_templates[func], "power", counter);
 
+       sysfs_attr_init(&data->sensors[sensor].attr[func].dev_attr.attr);
        data->sensors[sensor].attr[func].dev_attr.attr.name = n;
        data->sensors[sensor].attr[func].dev_attr.attr.mode = S_IRUGO;
        data->sensors[sensor].attr[func].dev_attr.show = ibmpex_show_sensor;
index 0f9fc40379cd14df869f15f8d8608d9f8393eb19..e855d3b0bd1f1d5e623c913343ae8ec9d6834e01 100644 (file)
@@ -136,15 +136,29 @@ static int max6642_detect(struct i2c_client *client,
        if (man_id != 0x4D)
                return -ENODEV;
 
+       /* sanity check */
+       if (i2c_smbus_read_byte_data(client, 0x04) != 0x4D
+           || i2c_smbus_read_byte_data(client, 0x06) != 0x4D
+           || i2c_smbus_read_byte_data(client, 0xff) != 0x4D)
+               return -ENODEV;
+
        /*
         * We read the config and status register, the 4 lower bits in the
         * config register should be zero and bit 5, 3, 1 and 0 should be
         * zero in the status register.
         */
        reg_config = i2c_smbus_read_byte_data(client, MAX6642_REG_R_CONFIG);
+       if ((reg_config & 0x0f) != 0x00)
+               return -ENODEV;
+
+       /* in between, another round of sanity checks */
+       if (i2c_smbus_read_byte_data(client, 0x04) != reg_config
+           || i2c_smbus_read_byte_data(client, 0x06) != reg_config
+           || i2c_smbus_read_byte_data(client, 0xff) != reg_config)
+               return -ENODEV;
+
        reg_status = i2c_smbus_read_byte_data(client, MAX6642_REG_R_STATUS);
-       if (((reg_config & 0x0f) != 0x00) ||
-           ((reg_status & 0x2b) != 0x00))
+       if ((reg_status & 0x2b) != 0x00)
                return -ENODEV;
 
        strlcpy(info->type, "max6642", I2C_NAME_SIZE);
@@ -246,7 +260,7 @@ static SENSOR_DEVICE_ATTR_2(temp1_max, S_IWUSR | S_IRUGO, show_temp_max,
                            set_temp_max, 0, MAX6642_REG_W_LOCAL_HIGH);
 static SENSOR_DEVICE_ATTR_2(temp2_max, S_IWUSR | S_IRUGO, show_temp_max,
                            set_temp_max, 1, MAX6642_REG_W_REMOTE_HIGH);
-static SENSOR_DEVICE_ATTR(temp_fault, S_IRUGO, show_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 2);
 static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6);
 static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 4);
 
@@ -256,7 +270,7 @@ static struct attribute *max6642_attributes[] = {
        &sensor_dev_attr_temp1_max.dev_attr.attr,
        &sensor_dev_attr_temp2_max.dev_attr.attr,
 
-       &sensor_dev_attr_temp_fault.dev_attr.attr,
+       &sensor_dev_attr_temp2_fault.dev_attr.attr,
        &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
        &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
        NULL
index 92b42db43bcfd9e5706c41bfb9049a26fd967b93..b39f52e2752a7bca54a1bb25c7083382b01aa52e 100644 (file)
@@ -232,6 +232,7 @@ static int s3c_hwmon_create_attr(struct device *dev,
 
        attr = &attrs->in;
        attr->index = channel;
+       sysfs_attr_init(&attr->dev_attr.attr);
        attr->dev_attr.attr.name  = attrs->in_name;
        attr->dev_attr.attr.mode  = S_IRUGO;
        attr->dev_attr.show = s3c_hwmon_ch_show;
@@ -249,6 +250,7 @@ static int s3c_hwmon_create_attr(struct device *dev,
 
                attr = &attrs->label;
                attr->index = channel;
+               sysfs_attr_init(&attr->dev_attr.attr);
                attr->dev_attr.attr.name  = attrs->label_name;
                attr->dev_attr.attr.mode  = S_IRUGO;
                attr->dev_attr.show = s3c_hwmon_label_show;
index 144d27261e43396161f0b0b196cbfc74bdccd285..04b09564bfa902bf84f6a313594112e74a1db2c6 100644 (file)
@@ -778,7 +778,8 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
                                        sector_t block)
 {
        struct ide_cmd cmd;
-       int uptodate = 0, nsectors;
+       int uptodate = 0;
+       unsigned int nsectors;
 
        ide_debug_log(IDE_DBG_RQ, "cmd: 0x%x, block: %llu",
                                  rq->cmd[0], (unsigned long long)block);
index f3698967edf6bdd0e8b7f4b19abdb19e335a2c3d..8755f5f3ad37c2218de60b96782c0863615a705e 100644 (file)
@@ -120,21 +120,17 @@ static void serport_ldisc_close(struct tty_struct *tty)
  * 'interrupt' routine.
  */
 
-static unsigned int serport_ldisc_receive(struct tty_struct *tty,
-               const unsigned char *cp, char *fp, int count)
+static void serport_ldisc_receive(struct tty_struct *tty, const unsigned char *cp, char *fp, int count)
 {
        struct serport *serport = (struct serport*) tty->disc_data;
        unsigned long flags;
        unsigned int ch_flags;
-       int ret = 0;
        int i;
 
        spin_lock_irqsave(&serport->lock, flags);
 
-       if (!test_bit(SERPORT_ACTIVE, &serport->flags)) {
-               ret = -EINVAL;
+       if (!test_bit(SERPORT_ACTIVE, &serport->flags))
                goto out;
-       }
 
        for (i = 0; i < count; i++) {
                switch (fp[i]) {
@@ -156,8 +152,6 @@ static unsigned int serport_ldisc_receive(struct tty_struct *tty,
 
 out:
        spin_unlock_irqrestore(&serport->lock, flags);
-
-       return ret == 0 ? count : ret;
 }
 
 /*
index 1d44d470897ca4660b6e4f4e05218293f3e9c398..86a5c4f7775eb5c95a23a35b1bf05a77a8ba9681 100644 (file)
@@ -674,7 +674,7 @@ gigaset_tty_ioctl(struct tty_struct *tty, struct file *file,
  *     cflags  buffer containing error flags for received characters (ignored)
  *     count   number of received characters
  */
-static unsigned int
+static void
 gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
                    char *cflags, int count)
 {
@@ -683,12 +683,12 @@ gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
        struct inbuf_t *inbuf;
 
        if (!cs)
-               return -ENODEV;
+               return;
        inbuf = cs->inbuf;
        if (!inbuf) {
                dev_err(cs->dev, "%s: no inbuf\n", __func__);
                cs_put(cs);
-               return -EINVAL;
+               return;
        }
 
        tail = inbuf->tail;
@@ -725,8 +725,6 @@ gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
        gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
        gigaset_schedule_event(cs);
        cs_put(cs);
-
-       return count;
 }
 
 /*
index 3ccbff13eaf238541beb0564542db9a647cbdd8b..71a8eb6ef71ead7efca9b247f205aef4cca86817 100644 (file)
@@ -283,6 +283,7 @@ hfcsusb_ph_info(struct hfcsusb *hw)
        _queue_data(&dch->dev.D, MPH_INFORMATION_IND, MISDN_ID_ANY,
                sizeof(struct ph_info_dch) + dch->dev.nrbchan *
                sizeof(struct ph_info_ch), phi, GFP_ATOMIC);
+       kfree(phi);
 }
 
 /*
index 23f0d5e99f35705c97c165e05b05a7e5239804b7..713d43b4e563396f419e9583af58b6a4ed636290 100644 (file)
@@ -1,3 +1,10 @@
+config LEDS_GPIO_REGISTER
+       bool
+       help
+         This option provides the function gpio_led_register_device.
+         As this function is used by arch code it must not be compiled as a
+         module.
+
 menuconfig NEW_LEDS
        bool "LED Support"
        help
@@ -7,22 +14,14 @@ menuconfig NEW_LEDS
          This is not related to standard keyboard LEDs which are controlled
          via the input system.
 
+if NEW_LEDS
+
 config LEDS_CLASS
        bool "LED Class Support"
-       depends on NEW_LEDS
        help
          This option enables the led sysfs class in /sys/class/leds.  You'll
          need this to do anything useful with LEDs.  If unsure, say N.
 
-config LEDS_GPIO_REGISTER
-       bool
-       help
-         This option provides the function gpio_led_register_device.
-         As this function is used by arch code it must not be compiled as a
-         module.
-
-if NEW_LEDS
-
 comment "LED drivers"
 
 config LEDS_88PM860X
@@ -391,6 +390,7 @@ config LEDS_NETXBIG
 
 config LEDS_ASIC3
        bool "LED support for the HTC ASIC3"
+       depends on LEDS_CLASS
        depends on MFD_ASIC3
        default y
        help
index 70bd738b8b99eff89185b8dee59f5a4f6599e4d3..574b09afedd32ff1f8f30ddefab722fb01edd17d 100644 (file)
@@ -534,6 +534,82 @@ void bitmap_print_sb(struct bitmap *bitmap)
        kunmap_atomic(sb, KM_USER0);
 }
 
+/*
+ * bitmap_new_disk_sb
+ * @bitmap
+ *
+ * This function is somewhat the reverse of bitmap_read_sb.  bitmap_read_sb
+ * reads and verifies the on-disk bitmap superblock and populates bitmap_info.
+ * This function verifies 'bitmap_info' and populates the on-disk bitmap
+ * structure, which is to be written to disk.
+ *
+ * Returns: 0 on success, -Exxx on error
+ */
+static int bitmap_new_disk_sb(struct bitmap *bitmap)
+{
+       bitmap_super_t *sb;
+       unsigned long chunksize, daemon_sleep, write_behind;
+       int err = -EINVAL;
+
+       bitmap->sb_page = alloc_page(GFP_KERNEL);
+       if (IS_ERR(bitmap->sb_page)) {
+               err = PTR_ERR(bitmap->sb_page);
+               bitmap->sb_page = NULL;
+               return err;
+       }
+       bitmap->sb_page->index = 0;
+
+       sb = kmap_atomic(bitmap->sb_page, KM_USER0);
+
+       sb->magic = cpu_to_le32(BITMAP_MAGIC);
+       sb->version = cpu_to_le32(BITMAP_MAJOR_HI);
+
+       chunksize = bitmap->mddev->bitmap_info.chunksize;
+       BUG_ON(!chunksize);
+       if (!is_power_of_2(chunksize)) {
+               kunmap_atomic(sb, KM_USER0);
+               printk(KERN_ERR "bitmap chunksize not a power of 2\n");
+               return -EINVAL;
+       }
+       sb->chunksize = cpu_to_le32(chunksize);
+
+       daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
+       if (!daemon_sleep ||
+           (daemon_sleep < 1) || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
+               printk(KERN_INFO "Choosing daemon_sleep default (5 sec)\n");
+               daemon_sleep = 5 * HZ;
+       }
+       sb->daemon_sleep = cpu_to_le32(daemon_sleep);
+       bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
+
+       /*
+        * FIXME: write_behind for RAID1.  If not specified, what
+        * is a good choice?  We choose COUNTER_MAX / 2 arbitrarily.
+        */
+       write_behind = bitmap->mddev->bitmap_info.max_write_behind;
+       if (write_behind > COUNTER_MAX)
+               write_behind = COUNTER_MAX / 2;
+       sb->write_behind = cpu_to_le32(write_behind);
+       bitmap->mddev->bitmap_info.max_write_behind = write_behind;
+
+       /* keep the array size field of the bitmap superblock up to date */
+       sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
+
+       memcpy(sb->uuid, bitmap->mddev->uuid, 16);
+
+       bitmap->flags |= BITMAP_STALE;
+       sb->state |= cpu_to_le32(BITMAP_STALE);
+       bitmap->events_cleared = bitmap->mddev->events;
+       sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
+
+       bitmap->flags |= BITMAP_HOSTENDIAN;
+       sb->version = cpu_to_le32(BITMAP_MAJOR_HOSTENDIAN);
+
+       kunmap_atomic(sb, KM_USER0);
+
+       return 0;
+}
+
 /* read the superblock from the bitmap file and initialize some bitmap fields */
 static int bitmap_read_sb(struct bitmap *bitmap)
 {
@@ -575,7 +651,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
                reason = "unrecognized superblock version";
        else if (chunksize < 512)
                reason = "bitmap chunksize too small";
-       else if ((1 << ffz(~chunksize)) != chunksize)
+       else if (!is_power_of_2(chunksize))
                reason = "bitmap chunksize not a power of 2";
        else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT)
                reason = "daemon sleep period out of range";
@@ -1076,8 +1152,8 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
        }
 
        printk(KERN_INFO "%s: bitmap initialized from disk: "
-               "read %lu/%lu pages, set %lu bits\n",
-               bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt);
+              "read %lu/%lu pages, set %lu of %lu bits\n",
+              bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt, chunks);
 
        return 0;
 
@@ -1332,7 +1408,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
                        return 0;
                }
 
-               if (unlikely((*bmc & COUNTER_MAX) == COUNTER_MAX)) {
+               if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) {
                        DEFINE_WAIT(__wait);
                        /* note that it is safe to do the prepare_to_wait
                         * after the test as long as we do it before dropping
@@ -1404,10 +1480,10 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
                        sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
                }
 
-               if (!success && ! (*bmc & NEEDED_MASK))
+               if (!success && !NEEDED(*bmc))
                        *bmc |= NEEDED_MASK;
 
-               if ((*bmc & COUNTER_MAX) == COUNTER_MAX)
+               if (COUNTER(*bmc) == COUNTER_MAX)
                        wake_up(&bitmap->overflow_wait);
 
                (*bmc)--;
@@ -1728,9 +1804,16 @@ int bitmap_create(mddev_t *mddev)
                vfs_fsync(file, 1);
        }
        /* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
-       if (!mddev->bitmap_info.external)
-               err = bitmap_read_sb(bitmap);
-       else {
+       if (!mddev->bitmap_info.external) {
+               /*
+                * If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is
+                * instructing us to create a new on-disk bitmap instance.
+                */
+               if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags))
+                       err = bitmap_new_disk_sb(bitmap);
+               else
+                       err = bitmap_read_sb(bitmap);
+       } else {
                err = 0;
                if (mddev->bitmap_info.chunksize == 0 ||
                    mddev->bitmap_info.daemon_sleep == 0)
@@ -1754,9 +1837,6 @@ int bitmap_create(mddev_t *mddev)
        bitmap->chunks = chunks;
        bitmap->pages = pages;
        bitmap->missing_pages = pages;
-       bitmap->counter_bits = COUNTER_BITS;
-
-       bitmap->syncchunk = ~0UL;
 
 #ifdef INJECT_FATAL_FAULT_1
        bitmap->bp = NULL;
index d0aeaf46d932017505e4b3728cfa1ca3c9e6fe3c..b2a127e891acedc9843ff4b61e1e8c62b2456a48 100644 (file)
@@ -85,7 +85,6 @@
 typedef __u16 bitmap_counter_t;
 #define COUNTER_BITS 16
 #define COUNTER_BIT_SHIFT 4
-#define COUNTER_BYTE_RATIO (COUNTER_BITS / 8)
 #define COUNTER_BYTE_SHIFT (COUNTER_BIT_SHIFT - 3)
 
 #define NEEDED_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 1)))
@@ -196,19 +195,10 @@ struct bitmap {
 
        mddev_t *mddev; /* the md device that the bitmap is for */
 
-       int counter_bits; /* how many bits per block counter */
-
        /* bitmap chunksize -- how much data does each bit represent? */
        unsigned long chunkshift; /* chunksize = 2^chunkshift (for bitops) */
        unsigned long chunks; /* total number of data chunks for the array */
 
-       /* We hold a count on the chunk currently being synced, and drop
-        * it when the last block is started.  If the resync is aborted
-        * midway, we need to be able to drop that count, so we remember
-        * the counted chunk..
-        */
-       unsigned long syncchunk;
-
        __u64   events_cleared;
        int need_sync;
 
index aa640a85bb2169aa14fb8d9875beb0ac9091192c..4332fc2f25d4a2260b575f66229514bad10a2fa4 100644 (file)
@@ -351,6 +351,9 @@ void mddev_resume(mddev_t *mddev)
        mddev->suspended = 0;
        wake_up(&mddev->sb_wait);
        mddev->pers->quiesce(mddev, 0);
+
+       md_wakeup_thread(mddev->thread);
+       md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
 }
 EXPORT_SYMBOL_GPL(mddev_resume);
 
@@ -1750,6 +1753,18 @@ static struct super_type super_types[] = {
        },
 };
 
+static void sync_super(mddev_t *mddev, mdk_rdev_t *rdev)
+{
+       if (mddev->sync_super) {
+               mddev->sync_super(mddev, rdev);
+               return;
+       }
+
+       BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
+
+       super_types[mddev->major_version].sync_super(mddev, rdev);
+}
+
 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
 {
        mdk_rdev_t *rdev, *rdev2;
@@ -1781,8 +1796,8 @@ int md_integrity_register(mddev_t *mddev)
 
        if (list_empty(&mddev->disks))
                return 0; /* nothing to do */
-       if (blk_get_integrity(mddev->gendisk))
-               return 0; /* already registered */
+       if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
+               return 0; /* shouldn't register, or already is */
        list_for_each_entry(rdev, &mddev->disks, same_set) {
                /* skip spares and non-functional disks */
                if (test_bit(Faulty, &rdev->flags))
@@ -2168,8 +2183,7 @@ static void sync_sbs(mddev_t * mddev, int nospares)
                        /* Don't update this superblock */
                        rdev->sb_loaded = 2;
                } else {
-                       super_types[mddev->major_version].
-                               sync_super(mddev, rdev);
+                       sync_super(mddev, rdev);
                        rdev->sb_loaded = 1;
                }
        }
@@ -2462,7 +2476,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
                if (rdev->raid_disk == -1)
                        return -EEXIST;
                /* personality does all needed checks */
-               if (rdev->mddev->pers->hot_add_disk == NULL)
+               if (rdev->mddev->pers->hot_remove_disk == NULL)
                        return -EINVAL;
                err = rdev->mddev->pers->
                        hot_remove_disk(rdev->mddev, rdev->raid_disk);
@@ -4619,9 +4633,6 @@ int md_run(mddev_t *mddev)
        if (mddev->flags)
                md_update_sb(mddev, 0);
 
-       md_wakeup_thread(mddev->thread);
-       md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
-
        md_new_event(mddev);
        sysfs_notify_dirent_safe(mddev->sysfs_state);
        sysfs_notify_dirent_safe(mddev->sysfs_action);
@@ -4642,6 +4653,10 @@ static int do_md_run(mddev_t *mddev)
                bitmap_destroy(mddev);
                goto out;
        }
+
+       md_wakeup_thread(mddev->thread);
+       md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
+
        set_capacity(mddev->gendisk, mddev->array_sectors);
        revalidate_disk(mddev->gendisk);
        mddev->changed = 1;
@@ -5259,6 +5274,8 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
                if (mddev->degraded)
                        set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
                set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+               if (!err)
+                       md_new_event(mddev);
                md_wakeup_thread(mddev->thread);
                return err;
        }
@@ -6866,8 +6883,8 @@ void md_do_sync(mddev_t *mddev)
         * Tune reconstruction:
         */
        window = 32*(PAGE_SIZE/512);
-       printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
-               window/2,(unsigned long long) max_sectors/2);
+       printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n",
+               window/2, (unsigned long long)max_sectors/2);
 
        atomic_set(&mddev->recovery_active, 0);
        last_check = 0;
@@ -7045,7 +7062,6 @@ void md_do_sync(mddev_t *mddev)
 }
 EXPORT_SYMBOL_GPL(md_do_sync);
 
-
 static int remove_and_add_spares(mddev_t *mddev)
 {
        mdk_rdev_t *rdev;
@@ -7157,6 +7173,9 @@ static void reap_sync_thread(mddev_t *mddev)
  */
 void md_check_recovery(mddev_t *mddev)
 {
+       if (mddev->suspended)
+               return;
+
        if (mddev->bitmap)
                bitmap_daemon_work(mddev);
 
index 0b1fd3f1d85b5decfaaba4ab8b70595fffbaba1f..1c26c7a08ae6c053524cbee6c751c84935c5cfe3 100644 (file)
@@ -124,6 +124,7 @@ struct mddev_s
 #define MD_CHANGE_DEVS 0       /* Some device status has changed */
 #define MD_CHANGE_CLEAN 1      /* transition to or from 'clean' */
 #define MD_CHANGE_PENDING 2    /* switch from 'clean' to 'active' in progress */
+#define MD_ARRAY_FIRST_USE 3    /* First use of array, needs initialization */
 
        int                             suspended;
        atomic_t                        active_io;
@@ -330,6 +331,7 @@ struct mddev_s
        atomic_t flush_pending;
        struct work_struct flush_work;
        struct work_struct event_work;  /* used by dm to report failure event */
+       void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
 };
 
 
index 5d096096f9584972a9da2197ed1bef9303edf95f..f7431b6d8447df5979c0b80a5ded841f61a16cc1 100644 (file)
@@ -497,21 +497,19 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
        return best_disk;
 }
 
-static int raid1_congested(void *data, int bits)
+int md_raid1_congested(mddev_t *mddev, int bits)
 {
-       mddev_t *mddev = data;
        conf_t *conf = mddev->private;
        int i, ret = 0;
 
-       if (mddev_congested(mddev, bits))
-               return 1;
-
        rcu_read_lock();
        for (i = 0; i < mddev->raid_disks; i++) {
                mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
                if (rdev && !test_bit(Faulty, &rdev->flags)) {
                        struct request_queue *q = bdev_get_queue(rdev->bdev);
 
+                       BUG_ON(!q);
+
                        /* Note the '|| 1' - when read_balance prefers
                         * non-congested targets, it can be removed
                         */
@@ -524,7 +522,15 @@ static int raid1_congested(void *data, int bits)
        rcu_read_unlock();
        return ret;
 }
+EXPORT_SYMBOL_GPL(md_raid1_congested);
 
+static int raid1_congested(void *data, int bits)
+{
+       mddev_t *mddev = data;
+
+       return mddev_congested(mddev, bits) ||
+               md_raid1_congested(mddev, bits);
+}
 
 static void flush_pending_writes(conf_t *conf)
 {
@@ -1972,6 +1978,8 @@ static int run(mddev_t *mddev)
                return PTR_ERR(conf);
 
        list_for_each_entry(rdev, &mddev->disks, same_set) {
+               if (!mddev->gendisk)
+                       continue;
                disk_stack_limits(mddev->gendisk, rdev->bdev,
                                  rdev->data_offset << 9);
                /* as we don't honour merge_bvec_fn, we must never risk
@@ -2013,8 +2021,10 @@ static int run(mddev_t *mddev)
 
        md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
 
-       mddev->queue->backing_dev_info.congested_fn = raid1_congested;
-       mddev->queue->backing_dev_info.congested_data = mddev;
+       if (mddev->queue) {
+               mddev->queue->backing_dev_info.congested_fn = raid1_congested;
+               mddev->queue->backing_dev_info.congested_data = mddev;
+       }
        return md_integrity_register(mddev);
 }
 
index 5fc4ca1af8639b5a61a6a17901866fe0987effcc..e743a64fac4f10f2dbc27f2c194658fdd35f3fa7 100644 (file)
@@ -126,4 +126,6 @@ struct r1bio_s {
  */
 #define        R1BIO_Returned 6
 
+extern int md_raid1_congested(mddev_t *mddev, int bits);
+
 #endif
index 346e69bfdab3f8b0b744f6b239c93ea0f3f099e2..b72edf35ec544d0da66346107cededa933ed20dc 100644 (file)
@@ -129,7 +129,7 @@ static inline int raid5_dec_bi_hw_segments(struct bio *bio)
 
 static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
 {
-       bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16);
+       bio->bi_phys_segments = raid5_bi_phys_segments(bio) | (cnt << 16);
 }
 
 /* Find first data disk in a raid6 stripe */
@@ -514,7 +514,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                bi = &sh->dev[i].req;
 
                bi->bi_rw = rw;
-               if (rw == WRITE)
+               if (rw & WRITE)
                        bi->bi_end_io = raid5_end_write_request;
                else
                        bi->bi_end_io = raid5_end_read_request;
@@ -548,13 +548,13 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                        bi->bi_io_vec[0].bv_offset = 0;
                        bi->bi_size = STRIPE_SIZE;
                        bi->bi_next = NULL;
-                       if (rw == WRITE &&
+                       if ((rw & WRITE) &&
                            test_bit(R5_ReWrite, &sh->dev[i].flags))
                                atomic_add(STRIPE_SECTORS,
                                        &rdev->corrected_errors);
                        generic_make_request(bi);
                } else {
-                       if (rw == WRITE)
+                       if (rw & WRITE)
                                set_bit(STRIPE_DEGRADED, &sh->state);
                        pr_debug("skip op %ld on disc %d for sector %llu\n",
                                bi->bi_rw, i, (unsigned long long)sh->sector);
@@ -585,7 +585,7 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
        init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
 
        bio_for_each_segment(bvl, bio, i) {
-               int len = bio_iovec_idx(bio, i)->bv_len;
+               int len = bvl->bv_len;
                int clen;
                int b_offset = 0;
 
@@ -601,8 +601,8 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
                        clen = len;
 
                if (clen > 0) {
-                       b_offset += bio_iovec_idx(bio, i)->bv_offset;
-                       bio_page = bio_iovec_idx(bio, i)->bv_page;
+                       b_offset += bvl->bv_offset;
+                       bio_page = bvl->bv_page;
                        if (frombio)
                                tx = async_memcpy(page, bio_page, page_offset,
                                                  b_offset, clen, &submit);
@@ -4858,7 +4858,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
                        printk(KERN_INFO "md/raid:%s: device %s operational as raid"
                               " disk %d\n",
                               mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
-               } else
+               } else if (rdev->saved_raid_disk != raid_disk)
                        /* Cannot rely on bitmap to complete recovery */
                        conf->fullsync = 1;
        }
index 4dc1ca3332363e7569f34e08a034f13186f677fd..7c327b54308e30312848d7b9c4c289f28fe2d937 100644 (file)
@@ -60,8 +60,6 @@ static int anysee_ctrl_msg(struct dvb_usb_device *d, u8 *sbuf, u8 slen,
        int act_len, ret;
        u8 buf[64];
 
-       if (slen > sizeof(buf))
-               slen = sizeof(buf);
        memcpy(&buf[0], sbuf, slen);
        buf[60] = state->seq++;
 
@@ -180,30 +178,37 @@ static int anysee_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
 {
        struct dvb_usb_device *d = i2c_get_adapdata(adap);
        int ret = 0, inc, i = 0;
+       u8 buf[52]; /* 4 + 48 (I2C WR USB command header + I2C WR max) */
 
        if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
                return -EAGAIN;
 
        while (i < num) {
                if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) {
-                       u8 buf[6];
+                       if (msg[i].len > 2 || msg[i+1].len > 60) {
+                               ret = -EOPNOTSUPP;
+                               break;
+                       }
                        buf[0] = CMD_I2C_READ;
                        buf[1] = (msg[i].addr << 1) | 0x01;
                        buf[2] = msg[i].buf[0];
                        buf[3] = msg[i].buf[1];
                        buf[4] = msg[i].len-1;
                        buf[5] = msg[i+1].len;
-                       ret = anysee_ctrl_msg(d, buf, sizeof(buf), msg[i+1].buf,
+                       ret = anysee_ctrl_msg(d, buf, 6, msg[i+1].buf,
                                msg[i+1].len);
                        inc = 2;
                } else {
-                       u8 buf[4+msg[i].len];
+                       if (msg[i].len > 48) {
+                               ret = -EOPNOTSUPP;
+                               break;
+                       }
                        buf[0] = CMD_I2C_WRITE;
                        buf[1] = (msg[i].addr << 1);
                        buf[2] = msg[i].len;
                        buf[3] = 0x01;
                        memcpy(&buf[4], msg[i].buf, msg[i].len);
-                       ret = anysee_ctrl_msg(d, buf, sizeof(buf), NULL, 0);
+                       ret = anysee_ctrl_msg(d, buf, 4 + msg[i].len, NULL, 0);
                        inc = 1;
                }
                if (ret)
index af5263c6625adeafab44bfa5cdff167122ff52d5..7b42ace419d9378c32407650044b2daa69adc0a6 100644 (file)
@@ -213,14 +213,14 @@ int __must_check media_devnode_register(struct media_devnode *mdev)
 
        /* Part 1: Find a free minor number */
        mutex_lock(&media_devnode_lock);
-       minor = find_next_zero_bit(media_devnode_nums, 0, MEDIA_NUM_DEVICES);
+       minor = find_next_zero_bit(media_devnode_nums, MEDIA_NUM_DEVICES, 0);
        if (minor == MEDIA_NUM_DEVICES) {
                mutex_unlock(&media_devnode_lock);
                printk(KERN_ERR "could not get a free minor\n");
                return -ENFILE;
        }
 
-       set_bit(mdev->minor, media_devnode_nums);
+       set_bit(minor, media_devnode_nums);
        mutex_unlock(&media_devnode_lock);
 
        mdev->minor = minor;
index 2354336862cf7f0d0f1203093295dae5fc77c423..934185cca758d7461e3bc96bead9f64db90625c0 100644 (file)
@@ -25,8 +25,8 @@
 #include <linux/delay.h>
 #include <media/cx25840.h>
 #include <linux/firmware.h>
-#include <staging/altera.h>
 
+#include "../../../staging/altera-stapl/altera.h"
 #include "cx23885.h"
 #include "tuner-xc2028.h"
 #include "netup-init.h"
diff --git a/drivers/media/video/gspca/coarse_expo_autogain.h b/drivers/media/video/gspca/coarse_expo_autogain.h
deleted file mode 100644 (file)
index 1cb9d94..0000000
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Auto gain algorithm for camera's with a coarse exposure control
- *
- * Copyright (C) 2010 Hans de Goede <hdegoede@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-
-/* Autogain + exposure algorithm for cameras with a coarse exposure control
-   (usually this means we can only control the clockdiv to change exposure)
-   As changing the clockdiv so that the fps drops from 30 to 15 fps for
-   example, will lead to a huge exposure change (it effectively doubles),
-   this algorithm normally tries to only adjust the gain (between 40 and
-   80 %) and if that does not help, only then changes exposure. This leads
-   to a much more stable image then using the knee algorithm which at
-   certain points of the knee graph will only try to adjust exposure,
-   which leads to oscilating as one exposure step is huge.
-
-   Note this assumes that the sd struct for the cam in question has
-   exp_too_high_cnt and exp_too_high_cnt int members for use by this function.
-
-   Returns 0 if no changes were made, 1 if the gain and or exposure settings
-   where changed. */
-static int gspca_coarse_grained_expo_autogain(struct gspca_dev *gspca_dev,
-       int avg_lum, int desired_avg_lum, int deadzone)
-{
-       int i, steps, gain, orig_gain, exposure, orig_exposure;
-       int gain_low, gain_high;
-       const struct ctrl *gain_ctrl = NULL;
-       const struct ctrl *exposure_ctrl = NULL;
-       struct sd *sd = (struct sd *) gspca_dev;
-       int retval = 0;
-
-       for (i = 0; i < gspca_dev->sd_desc->nctrls; i++) {
-               if (gspca_dev->ctrl_dis & (1 << i))
-                       continue;
-               if (gspca_dev->sd_desc->ctrls[i].qctrl.id == V4L2_CID_GAIN)
-                       gain_ctrl = &gspca_dev->sd_desc->ctrls[i];
-               if (gspca_dev->sd_desc->ctrls[i].qctrl.id == V4L2_CID_EXPOSURE)
-                       exposure_ctrl = &gspca_dev->sd_desc->ctrls[i];
-       }
-       if (!gain_ctrl || !exposure_ctrl) {
-               PDEBUG(D_ERR, "Error: gspca_coarse_grained_expo_autogain "
-                       "called on cam without gain or exposure");
-               return 0;
-       }
-
-       if (gain_ctrl->get(gspca_dev, &gain) ||
-           exposure_ctrl->get(gspca_dev, &exposure))
-               return 0;
-
-       orig_gain = gain;
-       orig_exposure = exposure;
-       gain_low =
-               (gain_ctrl->qctrl.maximum - gain_ctrl->qctrl.minimum) / 5 * 2;
-       gain_low += gain_ctrl->qctrl.minimum;
-       gain_high =
-               (gain_ctrl->qctrl.maximum - gain_ctrl->qctrl.minimum) / 5 * 4;
-       gain_high += gain_ctrl->qctrl.minimum;
-
-       /* If we are of a multiple of deadzone, do multiple steps to reach the
-          desired lumination fast (with the risc of a slight overshoot) */
-       steps = (desired_avg_lum - avg_lum) / deadzone;
-
-       PDEBUG(D_FRAM, "autogain: lum: %d, desired: %d, steps: %d",
-               avg_lum, desired_avg_lum, steps);
-
-       if ((gain + steps) > gain_high &&
-           sd->exposure < exposure_ctrl->qctrl.maximum) {
-               gain = gain_high;
-               sd->exp_too_low_cnt++;
-       } else if ((gain + steps) < gain_low &&
-                  sd->exposure > exposure_ctrl->qctrl.minimum) {
-               gain = gain_low;
-               sd->exp_too_high_cnt++;
-       } else {
-               gain += steps;
-               if (gain > gain_ctrl->qctrl.maximum)
-                       gain = gain_ctrl->qctrl.maximum;
-               else if (gain < gain_ctrl->qctrl.minimum)
-                       gain = gain_ctrl->qctrl.minimum;
-               sd->exp_too_high_cnt = 0;
-               sd->exp_too_low_cnt = 0;
-       }
-
-       if (sd->exp_too_high_cnt > 3) {
-               exposure--;
-               sd->exp_too_high_cnt = 0;
-       } else if (sd->exp_too_low_cnt > 3) {
-               exposure++;
-               sd->exp_too_low_cnt = 0;
-       }
-
-       if (gain != orig_gain) {
-               gain_ctrl->set(gspca_dev, gain);
-               retval = 1;
-       }
-       if (exposure != orig_exposure) {
-               exposure_ctrl->set(gspca_dev, exposure);
-               retval = 1;
-       }
-
-       return retval;
-}
index 36a46fc787341b86dbd8ceaa1fef5c1ab21541b7..057e287b9152a78fea793bdefa8ec6d26b699085 100644 (file)
@@ -609,7 +609,7 @@ static const struct v4l2_pix_format ovfx2_ov3610_mode[] = {
  * buffers, there are some pretty strict real time constraints for
  * isochronous transfer for larger frame sizes).
  */
-/*jfm: this value works well for 1600x1200, but not 800x600 - see isoc_init */
+/*jfm: this value does not work for 800x600 - see isoc_init */
 #define OVFX2_BULK_SIZE (13 * 4096)
 
 /* I2C registers */
@@ -3307,6 +3307,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
 
        gspca_dev->cam.ctrls = sd->ctrls;
        sd->quality = QUALITY_DEF;
+       sd->frame_rate = 15;
 
        return 0;
 }
@@ -3469,7 +3470,6 @@ static int sd_init(struct gspca_dev *gspca_dev)
                                ARRAY_SIZE(init_519_ov7660));
                write_i2c_regvals(sd, norm_7660, ARRAY_SIZE(norm_7660));
                sd->gspca_dev.curr_mode = 1;    /* 640x480 */
-               sd->frame_rate = 15;
                ov519_set_mode(sd);
                ov519_set_fr(sd);
                sd->ctrls[COLORS].max = 4;      /* 0..4 */
@@ -3511,7 +3511,7 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
 
        switch (sd->bridge) {
        case BRIDGE_OVFX2:
-               if (gspca_dev->width == 1600)
+               if (gspca_dev->width != 800)
                        gspca_dev->cam.bulk_size = OVFX2_BULK_SIZE;
                else
                        gspca_dev->cam.bulk_size = 7 * 4096;
@@ -4478,7 +4478,7 @@ static void ovfx2_pkt_scan(struct gspca_dev *gspca_dev,
        gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
 
        /* A short read signals EOF */
-       if (len < OVFX2_BULK_SIZE) {
+       if (len < gspca_dev->cam.bulk_size) {
                /* If the frame is short, and it is one of the first ones
                   the sensor and bridge are still syncing, so drop it. */
                if (sd->first_frame) {
index 6415aff5cbd1675cb4f96d106b9eb72b78b8977b..81b8a600783b575290814af47931ee5b1702ebcf 100644 (file)
@@ -60,7 +60,7 @@ struct sd {
 
        u32 pktsz;                      /* (used by pkt_scan) */
        u16 npkt;
-       u8 nchg;
+       s8 nchg;
        s8 short_mark;
 
        u8 quality;                     /* image quality */
index b538dce96f78d8cbb1c7297ea8b40ce21712ea7b..a14a84a5079b96d4da4b932d206d99b4d1a9532e 100644 (file)
 #define HDCS_SLEEP_MODE                (1 << 1)
 
 #define HDCS_DEFAULT_EXPOSURE  48
-#define HDCS_DEFAULT_GAIN      128
+#define HDCS_DEFAULT_GAIN      50
 
 static int hdcs_probe_1x00(struct sd *sd);
 static int hdcs_probe_1020(struct sd *sd);
index a4e4dfdbc2f2c9474c5616b9b68f44abb762dd75..0fb75524484d909af4925c3c33c9f12cf6d6519e 100644 (file)
@@ -1328,6 +1328,8 @@ int ivtv_init_on_first_open(struct ivtv *itv)
        if (!itv->has_cx23415)
                write_reg_sync(0x03, IVTV_REG_DMACONTROL);
 
+       ivtv_s_std_enc(itv, &itv->tuner_std);
+
        /* Default interrupts enabled. For the PVR350 this includes the
           decoder VSYNC interrupt, which is always on. It is not only used
           during decoding but also by the OSD.
@@ -1336,12 +1338,10 @@ int ivtv_init_on_first_open(struct ivtv *itv)
        if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) {
                ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_INIT | IVTV_IRQ_DEC_VSYNC);
                ivtv_set_osd_alpha(itv);
-       }
-       else
+               ivtv_s_std_dec(itv, &itv->tuner_std);
+       } else {
                ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_INIT);
-
-       /* For cards with video out, this call needs interrupts enabled */
-       ivtv_s_std(NULL, &fh, &itv->tuner_std);
+       }
 
        /* Setup initial controls */
        cx2341x_handler_setup(&itv->cxhdl);
index 14a1cea1d70da6bdcefc06184d4153f804348de8..02c5adebf517aac57248c473a60c793952629e75 100644 (file)
@@ -280,8 +280,6 @@ int ivtv_firmware_restart(struct ivtv *itv)
 {
        int rc = 0;
        v4l2_std_id std;
-       struct ivtv_open_id fh;
-       fh.itv = itv;
 
        if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)
                /* Display test image during restart */
@@ -301,14 +299,19 @@ int ivtv_firmware_restart(struct ivtv *itv)
        /* Allow settings to reload */
        ivtv_mailbox_cache_invalidate(itv);
 
-       /* Restore video standard */
+       /* Restore encoder video standard */
        std = itv->std;
        itv->std = 0;
-       ivtv_s_std(NULL, &fh, &std);
+       ivtv_s_std_enc(itv, &std);
 
        if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) {
                ivtv_init_mpeg_decoder(itv);
 
+               /* Restore decoder video standard */
+               std = itv->std_out;
+               itv->std_out = 0;
+               ivtv_s_std_dec(itv, &std);
+
                /* Restore framebuffer if active */
                if (itv->ivtvfb_restore)
                        itv->ivtvfb_restore(itv);
index 1689783cd19aa81261147b35870a485eb16736aa..f9e347dae7391a4487e15ac17e9e16578f59d16e 100644 (file)
@@ -1071,28 +1071,8 @@ static int ivtv_g_std(struct file *file, void *fh, v4l2_std_id *std)
        return 0;
 }
 
-int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std)
+void ivtv_s_std_enc(struct ivtv *itv, v4l2_std_id *std)
 {
-       DEFINE_WAIT(wait);
-       struct ivtv *itv = fh2id(fh)->itv;
-       struct yuv_playback_info *yi = &itv->yuv_info;
-       int f;
-
-       if ((*std & V4L2_STD_ALL) == 0)
-               return -EINVAL;
-
-       if (*std == itv->std)
-               return 0;
-
-       if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags) ||
-           atomic_read(&itv->capturing) > 0 ||
-           atomic_read(&itv->decoding) > 0) {
-               /* Switching standard would turn off the radio or mess
-                  with already running streams, prevent that by
-                  returning EBUSY. */
-               return -EBUSY;
-       }
-
        itv->std = *std;
        itv->is_60hz = (*std & V4L2_STD_525_60) ? 1 : 0;
        itv->is_50hz = !itv->is_60hz;
@@ -1106,48 +1086,79 @@ int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std)
        if (itv->hw_flags & IVTV_HW_CX25840)
                itv->vbi.sliced_decoder_line_size = itv->is_60hz ? 272 : 284;
 
-       IVTV_DEBUG_INFO("Switching standard to %llx.\n", (unsigned long long)itv->std);
-
        /* Tuner */
        ivtv_call_all(itv, core, s_std, itv->std);
+}
 
-       if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) {
-               /* set display standard */
-               itv->std_out = *std;
-               itv->is_out_60hz = itv->is_60hz;
-               itv->is_out_50hz = itv->is_50hz;
-               ivtv_call_all(itv, video, s_std_output, itv->std_out);
-
-               /*
-                * The next firmware call is time sensitive. Time it to
-                * avoid risk of a hard lock, by trying to ensure the call
-                * happens within the first 100 lines of the top field.
-                * Make 4 attempts to sync to the decoder before giving up.
-                */
-               for (f = 0; f < 4; f++) {
-                       prepare_to_wait(&itv->vsync_waitq, &wait,
-                                       TASK_UNINTERRUPTIBLE);
-                       if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100)
-                               break;
-                       schedule_timeout(msecs_to_jiffies(25));
-               }
-               finish_wait(&itv->vsync_waitq, &wait);
-
-               if (f == 4)
-                       IVTV_WARN("Mode change failed to sync to decoder\n");
-
-               ivtv_vapi(itv, CX2341X_DEC_SET_STANDARD, 1, itv->is_out_50hz);
-               itv->main_rect.left = itv->main_rect.top = 0;
-               itv->main_rect.width = 720;
-               itv->main_rect.height = itv->cxhdl.height;
-               ivtv_vapi(itv, CX2341X_OSD_SET_FRAMEBUFFER_WINDOW, 4,
-                       720, itv->main_rect.height, 0, 0);
-               yi->main_rect = itv->main_rect;
-               if (!itv->osd_info) {
-                       yi->osd_full_w = 720;
-                       yi->osd_full_h = itv->is_out_50hz ? 576 : 480;
-               }
+void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id *std)
+{
+       struct yuv_playback_info *yi = &itv->yuv_info;
+       DEFINE_WAIT(wait);
+       int f;
+
+       /* set display standard */
+       itv->std_out = *std;
+       itv->is_out_60hz = (*std & V4L2_STD_525_60) ? 1 : 0;
+       itv->is_out_50hz = !itv->is_out_60hz;
+       ivtv_call_all(itv, video, s_std_output, itv->std_out);
+
+       /*
+        * The next firmware call is time sensitive. Time it to
+        * avoid risk of a hard lock, by trying to ensure the call
+        * happens within the first 100 lines of the top field.
+        * Make 4 attempts to sync to the decoder before giving up.
+        */
+       for (f = 0; f < 4; f++) {
+               prepare_to_wait(&itv->vsync_waitq, &wait,
+                               TASK_UNINTERRUPTIBLE);
+               if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100)
+                       break;
+               schedule_timeout(msecs_to_jiffies(25));
        }
+       finish_wait(&itv->vsync_waitq, &wait);
+
+       if (f == 4)
+               IVTV_WARN("Mode change failed to sync to decoder\n");
+
+       ivtv_vapi(itv, CX2341X_DEC_SET_STANDARD, 1, itv->is_out_50hz);
+       itv->main_rect.left = 0;
+       itv->main_rect.top = 0;
+       itv->main_rect.width = 720;
+       itv->main_rect.height = itv->is_out_50hz ? 576 : 480;
+       ivtv_vapi(itv, CX2341X_OSD_SET_FRAMEBUFFER_WINDOW, 4,
+               720, itv->main_rect.height, 0, 0);
+       yi->main_rect = itv->main_rect;
+       if (!itv->osd_info) {
+               yi->osd_full_w = 720;
+               yi->osd_full_h = itv->is_out_50hz ? 576 : 480;
+       }
+}
+
+int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std)
+{
+       struct ivtv *itv = fh2id(fh)->itv;
+
+       if ((*std & V4L2_STD_ALL) == 0)
+               return -EINVAL;
+
+       if (*std == itv->std)
+               return 0;
+
+       if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags) ||
+           atomic_read(&itv->capturing) > 0 ||
+           atomic_read(&itv->decoding) > 0) {
+               /* Switching standard would mess with already running
+                  streams, prevent that by returning EBUSY. */
+               return -EBUSY;
+       }
+
+       IVTV_DEBUG_INFO("Switching standard to %llx.\n",
+               (unsigned long long)itv->std);
+
+       ivtv_s_std_enc(itv, std);
+       if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)
+               ivtv_s_std_dec(itv, std);
+
        return 0;
 }
 
index 58f003412afdd5141a854c41779b80ad38a4884d..89185caeafae0a5c72c8423d6b3ad6b242947227 100644 (file)
@@ -27,7 +27,8 @@ u16 ivtv_get_service_set(struct v4l2_sliced_vbi_format *fmt);
 void ivtv_set_osd_alpha(struct ivtv *itv);
 int ivtv_set_speed(struct ivtv *itv, int speed);
 void ivtv_set_funcs(struct video_device *vdev);
-int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std);
+void ivtv_s_std_enc(struct ivtv *itv, v4l2_std_id *std);
+void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id *std);
 int ivtv_s_frequency(struct file *file, void *fh, struct v4l2_frequency *vf);
 int ivtv_s_input(struct file *file, void *fh, unsigned int inp);
 long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
index 942683336555e02311c6d0b1d63acdab633fc777..e7794dc1330e50348a5e358714abf9f54702e56d 100644 (file)
@@ -589,7 +589,7 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
                v4l2_subdev_call(itv->sd_audio, audio, s_stream, 1);
                /* Avoid unpredictable PCI bus hang - disable video clocks */
                v4l2_subdev_call(itv->sd_video, video, s_stream, 0);
-               ivtv_msleep_timeout(300, 1);
+               ivtv_msleep_timeout(300, 0);
                ivtv_vapi(itv, CX2341X_ENC_INITIALIZE_INPUT, 0);
                v4l2_subdev_call(itv->sd_video, video, s_stream, 1);
        }
@@ -834,7 +834,7 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
                }
 
                /* Handle any pending interrupts */
-               ivtv_msleep_timeout(100, 1);
+               ivtv_msleep_timeout(100, 0);
        }
 
        atomic_dec(&itv->capturing);
index b6eb51ce773503ad60031b999934253edbc39d67..293db806d9362b688f000ed90ed3475c0b85ae09 100644 (file)
@@ -71,7 +71,7 @@ static void ivtv_set_wss(struct ivtv *itv, int enabled, int mode)
           Turning this signal on and off can confuse certain
           TVs. As far as I can tell there is no reason not to
           transmit this signal. */
-       if ((itv->std & V4L2_STD_625_50) && !enabled) {
+       if ((itv->std_out & V4L2_STD_625_50) && !enabled) {
                enabled = 1;
                mode = 0x08;  /* 4x3 full format */
        }
index 17247451c69326f3159cb6913e7eabaa152d72fc..6b7c9c8233309975726e42a65f97296df84cff54 100644 (file)
@@ -247,7 +247,7 @@ static int ivtvfb_set_osd_coords(struct ivtv *itv, const struct ivtv_osd_coords
 
 static int ivtvfb_set_display_window(struct ivtv *itv, struct v4l2_rect *ivtv_window)
 {
-       int osd_height_limit = itv->is_50hz ? 576 : 480;
+       int osd_height_limit = itv->is_out_50hz ? 576 : 480;
 
        /* Only fail if resolution too high, otherwise fudge the start coords. */
        if ((ivtv_window->height > osd_height_limit) || (ivtv_window->width > IVTV_OSD_MAX_WIDTH))
@@ -471,9 +471,9 @@ static int ivtvfb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long ar
                        vblank.flags = FB_VBLANK_HAVE_COUNT |FB_VBLANK_HAVE_VCOUNT |
                                        FB_VBLANK_HAVE_VSYNC;
                        trace = read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16;
-                       if (itv->is_50hz && trace > 312)
+                       if (itv->is_out_50hz && trace > 312)
                                trace -= 312;
-                       else if (itv->is_60hz && trace > 262)
+                       else if (itv->is_out_60hz && trace > 262)
                                trace -= 262;
                        if (trace == 1)
                                vblank.flags |= FB_VBLANK_VSYNCING;
@@ -656,7 +656,7 @@ static int _ivtvfb_check_var(struct fb_var_screeninfo *var, struct ivtv *itv)
        IVTVFB_DEBUG_INFO("ivtvfb_check_var\n");
 
        /* Set base references for mode calcs. */
-       if (itv->is_50hz) {
+       if (itv->is_out_50hz) {
                pixclock = 84316;
                hlimit = 776;
                vlimit = 591;
@@ -784,12 +784,12 @@ static int _ivtvfb_check_var(struct fb_var_screeninfo *var, struct ivtv *itv)
           If the margins are too large, just center the screen
           (enforcing margins causes too many problems) */
 
-       if (var->left_margin + var->xres > IVTV_OSD_MAX_WIDTH + 1) {
+       if (var->left_margin + var->xres > IVTV_OSD_MAX_WIDTH + 1)
                var->left_margin = 1 + ((IVTV_OSD_MAX_WIDTH - var->xres) / 2);
-       }
-       if (var->upper_margin + var->yres > (itv->is_50hz ? 577 : 481)) {
-               var->upper_margin = 1 + (((itv->is_50hz ? 576 : 480) - var->yres) / 2);
-       }
+
+       if (var->upper_margin + var->yres > (itv->is_out_50hz ? 577 : 481))
+               var->upper_margin = 1 + (((itv->is_out_50hz ? 576 : 480) -
+                       var->yres) / 2);
 
        /* Maintain overall 'size' for a constant refresh rate */
        var->right_margin = hlimit - var->left_margin - var->xres;
@@ -836,7 +836,12 @@ static int ivtvfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *inf
        u32 osd_pan_index;
        struct ivtv *itv = (struct ivtv *) info->par;
 
-       osd_pan_index = (var->xoffset + (var->yoffset * var->xres_virtual))*var->bits_per_pixel/8;
+       if (var->yoffset + info->var.yres > info->var.yres_virtual ||
+           var->xoffset + info->var.xres > info->var.xres_virtual)
+               return -EINVAL;
+
+       osd_pan_index = var->yoffset * info->fix.line_length
+                     + var->xoffset * info->var.bits_per_pixel / 8;
        write_reg(osd_pan_index, 0x02A0C);
 
        /* Pass this info back the yuv handler */
@@ -1003,19 +1008,21 @@ static int ivtvfb_init_vidmode(struct ivtv *itv)
        /* Hardware coords start at 0, user coords start at 1. */
        osd_left--;
 
-       start_window.left = osd_left >= 0 ? osd_left : ((IVTV_OSD_MAX_WIDTH - start_window.width) / 2);
+       start_window.left = osd_left >= 0 ?
+                osd_left : ((IVTV_OSD_MAX_WIDTH - start_window.width) / 2);
 
        oi->display_byte_stride =
                        start_window.width * oi->bytes_per_pixel;
 
        /* Vertical size & position */
 
-       max_height = itv->is_50hz ? 576 : 480;
+       max_height = itv->is_out_50hz ? 576 : 480;
 
        if (osd_yres > max_height)
                osd_yres = max_height;
 
-       start_window.height = osd_yres ? osd_yres : itv->is_50hz ? 480 : 400;
+       start_window.height = osd_yres ?
+               osd_yres : itv->is_out_50hz ? 480 : 400;
 
        /* Check vertical start (osd_upper). */
        if (osd_upper + start_window.height > max_height + 1) {
index 472a69359e609c16c67b014da479e7bbeb2c29ed..c9fd04ee70a8ae766811059c6942657334ac7621 100644 (file)
@@ -391,7 +391,7 @@ static inline void isp_isr_dbg(struct isp_device *isp, u32 irqstatus)
        };
        int i;
 
-       dev_dbg(isp->dev, "");
+       dev_dbg(isp->dev, "ISP IRQ: ");
 
        for (i = 0; i < ARRAY_SIZE(name); i++) {
                if ((1 << i) & irqstatus)
index 398864370267f33d12a7fe56a5feaa5ce6c916fa..4e4d4122d9a60339175486b999a8576b25565c09 100644 (file)
@@ -1512,7 +1512,7 @@ static int video_dev_create(struct soc_camera_device *icd)
  */
 static int soc_camera_video_start(struct soc_camera_device *icd)
 {
-       struct device_type *type = icd->vdev->dev.type;
+       const struct device_type *type = icd->vdev->dev.type;
        int ret;
 
        if (!icd->dev.parent)
index ede7852bb1df15b63ab575d968a6fe6862c1c875..c3ab0c813be249d4957fa93e83cfac0df567b703 100644 (file)
@@ -30,7 +30,7 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain,
        struct uvc_entity *remote;
        unsigned int i;
        u8 remote_pad;
-       int ret;
+       int ret = 0;
 
        for (i = 0; i < entity->num_pads; ++i) {
                struct media_entity *source;
index 200311fea369e6a952f38b8c205a0fb5c0c01f00..e2a52e5cf449c7f65f97e3e492a8304e321a80df 100644 (file)
@@ -609,6 +609,7 @@ static int apds990x_detect(struct apds990x_chip *chip)
        return ret;
 }
 
+#if defined(CONFIG_PM) || defined(CONFIG_PM_RUNTIME)
 static int apds990x_chip_on(struct apds990x_chip *chip)
 {
        int err  = regulator_bulk_enable(ARRAY_SIZE(chip->regs),
@@ -624,6 +625,7 @@ static int apds990x_chip_on(struct apds990x_chip *chip)
        apds990x_mode_on(chip);
        return 0;
 }
+#endif
 
 static int apds990x_chip_off(struct apds990x_chip *chip)
 {
index e01e08c8c88b6cb72e3e585fec9ccfacfe79eb2e..bc685bfc4c33aaacf89a5dc7205943bd69f14f8d 100644 (file)
@@ -174,7 +174,7 @@ struct cs5535_mfgpt_timer *cs5535_mfgpt_alloc_timer(int timer_nr, int domain)
                timer_nr = t < max ? (int) t : -1;
        } else {
                /* check if the requested timer's available */
-               if (test_bit(timer_nr, mfgpt->avail))
+               if (!test_bit(timer_nr, mfgpt->avail))
                        timer_nr = -1;
        }
 
index 7aded90f9daa1683b7a40fa24befed62b26376da..cfbddbef11de3b67c54be85797dd124843ca8596 100644 (file)
@@ -845,7 +845,7 @@ err_iounmap:
 err_iounmap_app:
        iounmap(config->va_app_base);
 err_kzalloc:
-       kfree(config);
+       kfree(target);
 err_rel_res:
        release_mem_region(res1->start, resource_size(res1));
 err_rel_res0:
index 1a05fe08e2cb7dc4462b76d360115e725321fc7e..f91f82eabda72a311602a8979ab835e0d938ffd6 100644 (file)
@@ -747,8 +747,8 @@ static void st_tty_close(struct tty_struct *tty)
        pr_debug("%s: done ", __func__);
 }
 
-static unsigned int st_tty_receive(struct tty_struct *tty,
-               const unsigned char *data, char *tty_flags, int count)
+static void st_tty_receive(struct tty_struct *tty, const unsigned char *data,
+                          char *tty_flags, int count)
 {
 #ifdef VERBOSE
        print_hex_dump(KERN_DEBUG, ">in>", DUMP_PREFIX_NONE,
@@ -761,8 +761,6 @@ static unsigned int st_tty_receive(struct tty_struct *tty,
         */
        st_recv(tty->disc_data, data, count);
        pr_debug("done %s", __func__);
-
-       return count;
 }
 
 /* wake-up function called in from the TTY layer
index 5da5bea0f9f09ed49eedf281da1308c04a0b7341..7721de942c69e29cd0d36300eb48a695dfbdafbf 100644 (file)
@@ -1144,9 +1144,17 @@ static int __devinit mmci_probe(struct amba_device *dev,
                else if (ret != -ENOSYS)
                        goto err_gpio_cd;
 
+               /*
+                * A gpio pin that will detect cards when inserted and removed
+                * will most likely want to trigger on the edges if it is
+                * 0 when ejected and 1 when inserted (or mutatis mutandis
+                * for the inverted case) so we request triggers on both
+                * edges.
+                */
                ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd),
-                                             mmci_cd_irq, 0,
-                                             DRIVER_NAME " (cd)", host);
+                               mmci_cd_irq,
+                               IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+                               DRIVER_NAME " (cd)", host);
                if (ret >= 0)
                        host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd);
        }
index 259ece047afcd6057bff748f7850d15430ae2136..5b2e2155b413bc988cabbd5125fe5c4c64f27d5b 100644 (file)
@@ -435,6 +435,9 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
                reg = regulator_get(host->dev, "vmmc_aux");
                host->vcc_aux = IS_ERR(reg) ? NULL : reg;
 
+               /* For eMMC do not power off when not in sleep state */
+               if (mmc_slot(host).no_regulator_off_init)
+                       return 0;
                /*
                * UGLY HACK:  workaround regulator framework bugs.
                * When the bootloader leaves a supply active, it's
index 5f25889e27efdafee0941407ab0c39c9f7b1ee36..44b28b2d70039e959811a4c9cfa173ed5c6b7b43 100644 (file)
@@ -185,7 +185,7 @@ static int max_interrupt_work = 10;
 static int nopnp;
 #endif
 
-static int el3_common_init(struct net_device *dev);
+static int __devinit el3_common_init(struct net_device *dev);
 static void el3_common_remove(struct net_device *dev);
 static ushort id_read_eeprom(int index);
 static ushort read_eeprom(int ioaddr, int index);
@@ -395,7 +395,7 @@ static struct isa_driver el3_isa_driver = {
 static int isa_registered;
 
 #ifdef CONFIG_PNP
-static const struct pnp_device_id el3_pnp_ids[] __devinitconst = {
+static struct pnp_device_id el3_pnp_ids[] = {
        { .id = "TCM5090" }, /* 3Com Etherlink III (TP) */
        { .id = "TCM5091" }, /* 3Com Etherlink III */
        { .id = "TCM5094" }, /* 3Com Etherlink III (combo) */
@@ -478,7 +478,7 @@ static int pnp_registered;
 #endif /* CONFIG_PNP */
 
 #ifdef CONFIG_EISA
-static const struct eisa_device_id el3_eisa_ids[] __devinitconst = {
+static struct eisa_device_id el3_eisa_ids[] = {
                { "TCM5090" },
                { "TCM5091" },
                { "TCM5092" },
@@ -508,7 +508,7 @@ static int eisa_registered;
 #ifdef CONFIG_MCA
 static int el3_mca_probe(struct device *dev);
 
-static const short el3_mca_adapter_ids[] __devinitconst = {
+static short el3_mca_adapter_ids[] __initdata = {
                0x627c,
                0x627d,
                0x62db,
@@ -517,7 +517,7 @@ static const short el3_mca_adapter_ids[] __devinitconst = {
                0x0000
 };
 
-static const char *const el3_mca_adapter_names[] __devinitconst = {
+static char *el3_mca_adapter_names[] __initdata = {
                "3Com 3c529 EtherLink III (10base2)",
                "3Com 3c529 EtherLink III (10baseT)",
                "3Com 3c529 EtherLink III (test mode)",
@@ -601,7 +601,7 @@ static void el3_common_remove (struct net_device *dev)
 }
 
 #ifdef CONFIG_MCA
-static int __devinit el3_mca_probe(struct device *device)
+static int __init el3_mca_probe(struct device *device)
 {
        /* Based on Erik Nygren's (nygren@mit.edu) 3c529 patch,
         * heavily modified by Chris Beauregard
@@ -671,7 +671,7 @@ static int __devinit el3_mca_probe(struct device *device)
 #endif /* CONFIG_MCA */
 
 #ifdef CONFIG_EISA
-static int __devinit el3_eisa_probe (struct device *device)
+static int __init el3_eisa_probe (struct device *device)
 {
        short i;
        int ioaddr, irq, if_port;
index 99f43d275442f793e117cc6064d3f894eee6f928..8cc22568ebd3429282b8674b912cb8fe80eb7a03 100644 (file)
@@ -901,14 +901,14 @@ static const struct dev_pm_ops vortex_pm_ops = {
 #endif /* !CONFIG_PM */
 
 #ifdef CONFIG_EISA
-static const struct eisa_device_id vortex_eisa_ids[] __devinitconst = {
+static struct eisa_device_id vortex_eisa_ids[] = {
        { "TCM5920", CH_3C592 },
        { "TCM5970", CH_3C597 },
        { "" }
 };
 MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids);
 
-static int __devinit vortex_eisa_probe(struct device *device)
+static int __init vortex_eisa_probe(struct device *device)
 {
        void __iomem *ioaddr;
        struct eisa_device *edev;
index 0c9217f48b72060d77c6ac20eaa592011cb4a009..7b3e23f38913657896b833a73d7c679b1723558a 100644 (file)
@@ -50,7 +50,7 @@ static const char version[] =
 #ifdef __arm__
 static void write_rreg(u_long base, u_int reg, u_int val)
 {
-       __asm__(
+       asm volatile(
        "str%?h %1, [%2]        @ NET_RAP\n\t"
        "str%?h %0, [%2, #-4]   @ NET_RDP"
        :
@@ -60,7 +60,7 @@ static void write_rreg(u_long base, u_int reg, u_int val)
 static inline unsigned short read_rreg(u_long base_addr, u_int reg)
 {
        unsigned short v;
-       __asm__(
+       asm volatile(
        "str%?h %1, [%2]        @ NET_RAP\n\t"
        "ldr%?h %0, [%2, #-4]   @ NET_RDP"
        : "=r" (v)
@@ -70,7 +70,7 @@ static inline unsigned short read_rreg(u_long base_addr, u_int reg)
 
 static inline void write_ireg(u_long base, u_int reg, u_int val)
 {
-       __asm__(
+       asm volatile(
        "str%?h %1, [%2]        @ NET_RAP\n\t"
        "str%?h %0, [%2, #8]    @ NET_IDP"
        :
@@ -80,7 +80,7 @@ static inline void write_ireg(u_long base, u_int reg, u_int val)
 static inline unsigned short read_ireg(u_long base_addr, u_int reg)
 {
        u_short v;
-       __asm__(
+       asm volatile(
        "str%?h %1, [%2]        @ NAT_RAP\n\t"
        "ldr%?h %0, [%2, #8]    @ NET_IDP\n\t"
        : "=r" (v)
@@ -91,47 +91,48 @@ static inline unsigned short read_ireg(u_long base_addr, u_int reg)
 #define am_writeword(dev,off,val) __raw_writew(val, ISAMEM_BASE + ((off) << 1))
 #define am_readword(dev,off)      __raw_readw(ISAMEM_BASE + ((off) << 1))
 
-static inline void
+static void
 am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
 {
        offset = ISAMEM_BASE + (offset << 1);
        length = (length + 1) & ~1;
        if ((int)buf & 2) {
-               __asm__ __volatile__("str%?h    %2, [%0], #4"
+               asm volatile("str%?h    %2, [%0], #4"
                 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
                buf += 2;
                length -= 2;
        }
        while (length > 8) {
-               unsigned int tmp, tmp2;
-               __asm__ __volatile__(
-                       "ldm%?ia        %1!, {%2, %3}\n\t"
+               register unsigned int tmp asm("r2"), tmp2 asm("r3");
+               asm volatile(
+                       "ldm%?ia        %0!, {%1, %2}"
+                       : "+r" (buf), "=&r" (tmp), "=&r" (tmp2));
+               length -= 8;
+               asm volatile(
+                       "str%?h %1, [%0], #4\n\t"
+                       "mov%?  %1, %1, lsr #16\n\t"
+                       "str%?h %1, [%0], #4\n\t"
                        "str%?h %2, [%0], #4\n\t"
                        "mov%?  %2, %2, lsr #16\n\t"
-                       "str%?h %2, [%0], #4\n\t"
-                       "str%?h %3, [%0], #4\n\t"
-                       "mov%?  %3, %3, lsr #16\n\t"
-                       "str%?h %3, [%0], #4"
-               : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2)
-               : "0" (offset), "1" (buf));
-               length -= 8;
+                       "str%?h %2, [%0], #4"
+               : "+r" (offset), "=&r" (tmp), "=&r" (tmp2));
        }
        while (length > 0) {
-               __asm__ __volatile__("str%?h    %2, [%0], #4"
+               asm volatile("str%?h    %2, [%0], #4"
                 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
                buf += 2;
                length -= 2;
        }
 }
 
-static inline void
+static void
 am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
 {
        offset = ISAMEM_BASE + (offset << 1);
        length = (length + 1) & ~1;
        if ((int)buf & 2) {
                unsigned int tmp;
-               __asm__ __volatile__(
+               asm volatile(
                        "ldr%?h %2, [%0], #4\n\t"
                        "str%?b %2, [%1], #1\n\t"
                        "mov%?  %2, %2, lsr #8\n\t"
@@ -140,12 +141,12 @@ am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned
                length -= 2;
        }
        while (length > 8) {
-               unsigned int tmp, tmp2, tmp3;
-               __asm__ __volatile__(
+               register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3;
+               asm volatile(
                        "ldr%?h %2, [%0], #4\n\t"
+                       "ldr%?h %4, [%0], #4\n\t"
                        "ldr%?h %3, [%0], #4\n\t"
-                       "orr%?  %2, %2, %3, lsl #16\n\t"
-                       "ldr%?h %3, [%0], #4\n\t"
+                       "orr%?  %2, %2, %4, lsl #16\n\t"
                        "ldr%?h %4, [%0], #4\n\t"
                        "orr%?  %3, %3, %4, lsl #16\n\t"
                        "stm%?ia        %1!, {%2, %3}"
@@ -155,7 +156,7 @@ am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned
        }
        while (length > 0) {
                unsigned int tmp;
-               __asm__ __volatile__(
+               asm volatile(
                        "ldr%?h %2, [%0], #4\n\t"
                        "str%?b %2, [%1], #1\n\t"
                        "mov%?  %2, %2, lsr #8\n\t"
@@ -196,6 +197,42 @@ am79c961_ramtest(struct net_device *dev, unsigned int val)
        return errorcount;
 }
 
+static void am79c961_mc_hash(char *addr, u16 *hash)
+{
+       if (addr[0] & 0x01) {
+               int idx, bit;
+               u32 crc;
+
+               crc = ether_crc_le(ETH_ALEN, addr);
+
+               idx = crc >> 30;
+               bit = (crc >> 26) & 15;
+
+               hash[idx] |= 1 << bit;
+       }
+}
+
+static unsigned int am79c961_get_rx_mode(struct net_device *dev, u16 *hash)
+{
+       unsigned int mode = MODE_PORT_10BT;
+
+       if (dev->flags & IFF_PROMISC) {
+               mode |= MODE_PROMISC;
+               memset(hash, 0xff, 4 * sizeof(*hash));
+       } else if (dev->flags & IFF_ALLMULTI) {
+               memset(hash, 0xff, 4 * sizeof(*hash));
+       } else {
+               struct netdev_hw_addr *ha;
+
+               memset(hash, 0, 4 * sizeof(*hash));
+
+               netdev_for_each_mc_addr(ha, dev)
+                       am79c961_mc_hash(ha->addr, hash);
+       }
+
+       return mode;
+}
+
 static void
 am79c961_init_for_open(struct net_device *dev)
 {
@@ -203,6 +240,7 @@ am79c961_init_for_open(struct net_device *dev)
        unsigned long flags;
        unsigned char *p;
        u_int hdr_addr, first_free_addr;
+       u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
        int i;
 
        /*
@@ -218,16 +256,12 @@ am79c961_init_for_open(struct net_device *dev)
        write_ireg (dev->base_addr, 2, 0x0000); /* MODE register selects media */
 
        for (i = LADRL; i <= LADRH; i++)
-               write_rreg (dev->base_addr, i, 0);
+               write_rreg (dev->base_addr, i, multi_hash[i - LADRL]);
 
        for (i = PADRL, p = dev->dev_addr; i <= PADRH; i++, p += 2)
                write_rreg (dev->base_addr, i, p[0] | (p[1] << 8));
 
-       i = MODE_PORT_10BT;
-       if (dev->flags & IFF_PROMISC)
-               i |= MODE_PROMISC;
-
-       write_rreg (dev->base_addr, MODE, i);
+       write_rreg (dev->base_addr, MODE, mode);
        write_rreg (dev->base_addr, POLLINT, 0);
        write_rreg (dev->base_addr, SIZERXR, -RX_BUFFERS);
        write_rreg (dev->base_addr, SIZETXR, -TX_BUFFERS);
@@ -340,21 +374,6 @@ am79c961_close(struct net_device *dev)
        return 0;
 }
 
-static void am79c961_mc_hash(char *addr, unsigned short *hash)
-{
-       if (addr[0] & 0x01) {
-               int idx, bit;
-               u32 crc;
-
-               crc = ether_crc_le(ETH_ALEN, addr);
-
-               idx = crc >> 30;
-               bit = (crc >> 26) & 15;
-
-               hash[idx] |= 1 << bit;
-       }
-}
-
 /*
  * Set or clear promiscuous/multicast mode filter for this adapter.
  */
@@ -362,24 +381,9 @@ static void am79c961_setmulticastlist (struct net_device *dev)
 {
        struct dev_priv *priv = netdev_priv(dev);
        unsigned long flags;
-       unsigned short multi_hash[4], mode;
+       u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
        int i, stopped;
 
-       mode = MODE_PORT_10BT;
-
-       if (dev->flags & IFF_PROMISC) {
-               mode |= MODE_PROMISC;
-       } else if (dev->flags & IFF_ALLMULTI) {
-               memset(multi_hash, 0xff, sizeof(multi_hash));
-       } else {
-               struct netdev_hw_addr *ha;
-
-               memset(multi_hash, 0x00, sizeof(multi_hash));
-
-               netdev_for_each_mc_addr(ha, dev)
-                       am79c961_mc_hash(ha->addr, multi_hash);
-       }
-
        spin_lock_irqsave(&priv->chip_lock, flags);
 
        stopped = read_rreg(dev->base_addr, CSR0) & CSR0_STOP;
index 5a77001b6d1053d4898b4635b0f1b9d1357e5868..0b46b8ea0e8006fa1456125752a006cd0e9dba8c 100644 (file)
@@ -283,10 +283,14 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
 
                skb = dev_alloc_skb(length + 2);
                if (likely(skb != NULL)) {
+                       struct ep93xx_rdesc *rxd = &ep->descs->rdesc[entry];
                        skb_reserve(skb, 2);
-                       dma_sync_single_for_cpu(NULL, ep->descs->rdesc[entry].buf_addr,
+                       dma_sync_single_for_cpu(dev->dev.parent, rxd->buf_addr,
                                                length, DMA_FROM_DEVICE);
                        skb_copy_to_linear_data(skb, ep->rx_buf[entry], length);
+                       dma_sync_single_for_device(dev->dev.parent,
+                                                  rxd->buf_addr, length,
+                                                  DMA_FROM_DEVICE);
                        skb_put(skb, length);
                        skb->protocol = eth_type_trans(skb, dev);
 
@@ -348,6 +352,7 @@ poll_some_more:
 static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct ep93xx_priv *ep = netdev_priv(dev);
+       struct ep93xx_tdesc *txd;
        int entry;
 
        if (unlikely(skb->len > MAX_PKT_SIZE)) {
@@ -359,11 +364,14 @@ static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
        entry = ep->tx_pointer;
        ep->tx_pointer = (ep->tx_pointer + 1) & (TX_QUEUE_ENTRIES - 1);
 
-       ep->descs->tdesc[entry].tdesc1 =
-               TDESC1_EOF | (entry << 16) | (skb->len & 0xfff);
+       txd = &ep->descs->tdesc[entry];
+
+       txd->tdesc1 = TDESC1_EOF | (entry << 16) | (skb->len & 0xfff);
+       dma_sync_single_for_cpu(dev->dev.parent, txd->buf_addr, skb->len,
+                               DMA_TO_DEVICE);
        skb_copy_and_csum_dev(skb, ep->tx_buf[entry]);
-       dma_sync_single_for_cpu(NULL, ep->descs->tdesc[entry].buf_addr,
-                               skb->len, DMA_TO_DEVICE);
+       dma_sync_single_for_device(dev->dev.parent, txd->buf_addr, skb->len,
+                                  DMA_TO_DEVICE);
        dev_kfree_skb(skb);
 
        spin_lock_irq(&ep->tx_pending_lock);
@@ -457,89 +465,80 @@ static irqreturn_t ep93xx_irq(int irq, void *dev_id)
 
 static void ep93xx_free_buffers(struct ep93xx_priv *ep)
 {
+       struct device *dev = ep->dev->dev.parent;
        int i;
 
-       for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) {
+       for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
                dma_addr_t d;
 
                d = ep->descs->rdesc[i].buf_addr;
                if (d)
-                       dma_unmap_single(NULL, d, PAGE_SIZE, DMA_FROM_DEVICE);
+                       dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_FROM_DEVICE);
 
                if (ep->rx_buf[i] != NULL)
-                       free_page((unsigned long)ep->rx_buf[i]);
+                       kfree(ep->rx_buf[i]);
        }
 
-       for (i = 0; i < TX_QUEUE_ENTRIES; i += 2) {
+       for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
                dma_addr_t d;
 
                d = ep->descs->tdesc[i].buf_addr;
                if (d)
-                       dma_unmap_single(NULL, d, PAGE_SIZE, DMA_TO_DEVICE);
+                       dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_TO_DEVICE);
 
                if (ep->tx_buf[i] != NULL)
-                       free_page((unsigned long)ep->tx_buf[i]);
+                       kfree(ep->tx_buf[i]);
        }
 
-       dma_free_coherent(NULL, sizeof(struct ep93xx_descs), ep->descs,
+       dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs,
                                                        ep->descs_dma_addr);
 }
 
-/*
- * The hardware enforces a sub-2K maximum packet size, so we put
- * two buffers on every hardware page.
- */
 static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
 {
+       struct device *dev = ep->dev->dev.parent;
        int i;
 
-       ep->descs = dma_alloc_coherent(NULL, sizeof(struct ep93xx_descs),
-                               &ep->descs_dma_addr, GFP_KERNEL | GFP_DMA);
+       ep->descs = dma_alloc_coherent(dev, sizeof(struct ep93xx_descs),
+                               &ep->descs_dma_addr, GFP_KERNEL);
        if (ep->descs == NULL)
                return 1;
 
-       for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) {
-               void *page;
+       for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
+               void *buf;
                dma_addr_t d;
 
-               page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
-               if (page == NULL)
+               buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL);
+               if (buf == NULL)
                        goto err;
 
-               d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE);
-               if (dma_mapping_error(NULL, d)) {
-                       free_page((unsigned long)page);
+               d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_FROM_DEVICE);
+               if (dma_mapping_error(dev, d)) {
+                       kfree(buf);
                        goto err;
                }
 
-               ep->rx_buf[i] = page;
+               ep->rx_buf[i] = buf;
                ep->descs->rdesc[i].buf_addr = d;
                ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE;
-
-               ep->rx_buf[i + 1] = page + PKT_BUF_SIZE;
-               ep->descs->rdesc[i + 1].buf_addr = d + PKT_BUF_SIZE;
-               ep->descs->rdesc[i + 1].rdesc1 = ((i + 1) << 16) | PKT_BUF_SIZE;
        }
 
-       for (i = 0; i < TX_QUEUE_ENTRIES; i += 2) {
-               void *page;
+       for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
+               void *buf;
                dma_addr_t d;
 
-               page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
-               if (page == NULL)
+               buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL);
+               if (buf == NULL)
                        goto err;
 
-               d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE);
-               if (dma_mapping_error(NULL, d)) {
-                       free_page((unsigned long)page);
+               d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_TO_DEVICE);
+               if (dma_mapping_error(dev, d)) {
+                       kfree(buf);
                        goto err;
                }
 
-               ep->tx_buf[i] = page;
+               ep->tx_buf[i] = buf;
                ep->descs->tdesc[i].buf_addr = d;
-
-               ep->tx_buf[i + 1] = page + PKT_BUF_SIZE;
-               ep->descs->tdesc[i + 1].buf_addr = d + PKT_BUF_SIZE;
        }
 
        return 0;
@@ -829,6 +828,7 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
        }
        ep = netdev_priv(dev);
        ep->dev = dev;
+       SET_NETDEV_DEV(dev, &pdev->dev);
        netif_napi_add(dev, &ep->napi, ep93xx_poll, 64);
 
        platform_set_drvdata(pdev, dev);
index 17b4dd94da907f388e66e9e5bd6ec6d5daf50148..652b30e525d01d8c4b547465bb66f943929fd4ea 100644 (file)
@@ -388,6 +388,8 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
        return next;
 }
 
+#define bond_queue_mapping(skb) (*(u16 *)((skb)->cb))
+
 /**
  * bond_dev_queue_xmit - Prepare skb for xmit.
  *
@@ -400,6 +402,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
 {
        skb->dev = slave_dev;
        skb->priority = 1;
+
+       skb->queue_mapping = bond_queue_mapping(skb);
+
        if (unlikely(netpoll_tx_running(slave_dev)))
                bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
        else
@@ -4206,6 +4211,7 @@ static inline int bond_slave_override(struct bonding *bond,
        return res;
 }
 
+
 static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
 {
        /*
@@ -4216,6 +4222,11 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
         */
        u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
 
+       /*
+        * Save the original txq to restore before passing to the driver
+        */
+       bond_queue_mapping(skb) = skb->queue_mapping;
+
        if (unlikely(txq >= dev->real_num_tx_queues)) {
                do {
                        txq -= dev->real_num_tx_queues;
index 73c7e03617ecc52313af4335f8b245d46b7eecb4..3df0c0f8b8bf93ca98a8829e7c09162a70f384c4 100644 (file)
@@ -167,8 +167,8 @@ static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
 
 #endif
 
-static unsigned int ldisc_receive(struct tty_struct *tty,
-               const u8 *data, char *flags, int count)
+static void ldisc_receive(struct tty_struct *tty, const u8 *data,
+                       char *flags, int count)
 {
        struct sk_buff *skb = NULL;
        struct ser_device *ser;
@@ -215,8 +215,6 @@ static unsigned int ldisc_receive(struct tty_struct *tty,
        } else
                ++ser->dev->stats.rx_dropped;
        update_tty_status(ser);
-
-       return count;
 }
 
 static int handle_tx(struct ser_device *ser)
index d4990568baee071cd5ceaaac9a91c6bc5d6cf7c9..17678117ed69dba42d02339ec76b962bf25f1695 100644 (file)
@@ -923,7 +923,7 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
        mem_size = resource_size(mem);
        if (!request_mem_region(mem->start, mem_size, pdev->name)) {
                err = -EBUSY;
-               goto failed_req;
+               goto failed_get;
        }
 
        base = ioremap(mem->start, mem_size);
@@ -977,9 +977,8 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
        iounmap(base);
  failed_map:
        release_mem_region(mem->start, mem_size);
- failed_req:
-       clk_put(clk);
  failed_get:
+       clk_put(clk);
  failed_clock:
        return err;
 }
index 75622d54581f15f87ea3ca4b893e7f37cbf34e9e..1b49df6b2470874b0cb76cf45214f247c6d3bf29 100644 (file)
@@ -425,17 +425,16 @@ static void slc_setup(struct net_device *dev)
  * in parallel
  */
 
-static unsigned int slcan_receive_buf(struct tty_struct *tty,
+static void slcan_receive_buf(struct tty_struct *tty,
                              const unsigned char *cp, char *fp, int count)
 {
        struct slcan *sl = (struct slcan *) tty->disc_data;
-       int bytes = count;
 
        if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
-               return -ENODEV;
+               return;
 
        /* Read the characters out of the buffer */
-       while (bytes--) {
+       while (count--) {
                if (fp && *fp++) {
                        if (!test_and_set_bit(SLF_ERROR, &sl->flags))
                                sl->dev->stats.rx_errors++;
@@ -444,8 +443,6 @@ static unsigned int slcan_receive_buf(struct tty_struct *tty,
                }
                slcan_unesc(sl, *cp++);
        }
-
-       return count;
 }
 
 /************************************
index 29a4f06fbfcf0004c53e96d51cc5d6da0aaaf76b..dcc4a170b0f397ed5362341a8d5a62fb07d411a4 100644 (file)
@@ -1781,8 +1781,8 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
        ndev = alloc_etherdev(sizeof(struct emac_priv));
        if (!ndev) {
                dev_err(&pdev->dev, "error allocating net_device\n");
-               clk_put(emac_clk);
-               return -ENOMEM;
+               rc = -ENOMEM;
+               goto free_clk;
        }
 
        platform_set_drvdata(pdev, ndev);
@@ -1796,7 +1796,8 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
        pdata = pdev->dev.platform_data;
        if (!pdata) {
                dev_err(&pdev->dev, "no platform data\n");
-               return -ENODEV;
+               rc = -ENODEV;
+               goto probe_quit;
        }
 
        /* MAC addr and PHY mask , RMII enable info from platform_data */
@@ -1929,8 +1930,9 @@ no_dma:
        iounmap(priv->remap_addr);
 
 probe_quit:
-       clk_put(emac_clk);
        free_netdev(ndev);
+free_clk:
+       clk_put(emac_clk);
        return rc;
 }
 
index 17654059922de988574f7c888d7367164265fda8..8b0084d17c8c82f1a6c30a273614a0689999661c 100644 (file)
@@ -331,18 +331,18 @@ static struct {
                          "DE422",\
                          ""}
 
-static const char* const depca_signature[] __devinitconst = DEPCA_SIGNATURE;
+static char* __initdata depca_signature[] = DEPCA_SIGNATURE;
 
 enum depca_type {
        DEPCA, de100, de101, de200, de201, de202, de210, de212, de422, unknown
 };
 
-static const char depca_string[] = "depca";
+static char depca_string[] = "depca";
 
 static int depca_device_remove (struct device *device);
 
 #ifdef CONFIG_EISA
-static const struct eisa_device_id depca_eisa_ids[] __devinitconst = {
+static struct eisa_device_id depca_eisa_ids[] = {
        { "DEC4220", de422 },
        { "" }
 };
@@ -367,19 +367,19 @@ static struct eisa_driver depca_eisa_driver = {
 #define DE210_ID 0x628d
 #define DE212_ID 0x6def
 
-static const short depca_mca_adapter_ids[] __devinitconst = {
+static short depca_mca_adapter_ids[] = {
        DE210_ID,
        DE212_ID,
        0x0000
 };
 
-static const char *depca_mca_adapter_name[] = {
+static char *depca_mca_adapter_name[] = {
        "DEC EtherWORKS MC Adapter (DE210)",
        "DEC EtherWORKS MC Adapter (DE212)",
        NULL
 };
 
-static const enum depca_type depca_mca_adapter_type[] = {
+static enum depca_type depca_mca_adapter_type[] = {
        de210,
        de212,
        0
@@ -541,9 +541,10 @@ static void SetMulticastFilter(struct net_device *dev);
 static int load_packet(struct net_device *dev, struct sk_buff *skb);
 static void depca_dbg_open(struct net_device *dev);
 
-static const u_char de1xx_irq[] __devinitconst = { 2, 3, 4, 5, 7, 9, 0 };
-static const u_char de2xx_irq[] __devinitconst = { 5, 9, 10, 11, 15, 0 };
-static const u_char de422_irq[] __devinitconst = { 5, 9, 10, 11, 0 };
+static u_char de1xx_irq[] __initdata = { 2, 3, 4, 5, 7, 9, 0 };
+static u_char de2xx_irq[] __initdata = { 5, 9, 10, 11, 15, 0 };
+static u_char de422_irq[] __initdata = { 5, 9, 10, 11, 0 };
+static u_char *depca_irq;
 
 static int irq;
 static int io;
@@ -579,7 +580,7 @@ static const struct net_device_ops depca_netdev_ops = {
        .ndo_validate_addr      = eth_validate_addr,
 };
 
-static int __devinit depca_hw_init (struct net_device *dev, struct device *device)
+static int __init depca_hw_init (struct net_device *dev, struct device *device)
 {
        struct depca_private *lp;
        int i, j, offset, netRAM, mem_len, status = 0;
@@ -747,7 +748,6 @@ static int __devinit depca_hw_init (struct net_device *dev, struct device *devic
        if (dev->irq < 2) {
                unsigned char irqnum;
                unsigned long irq_mask, delay;
-               const u_char *depca_irq;
 
                irq_mask = probe_irq_on();
 
@@ -770,7 +770,6 @@ static int __devinit depca_hw_init (struct net_device *dev, struct device *devic
                        break;
 
                default:
-                       depca_irq = NULL;
                        break;  /* Not reached */
                }
 
@@ -1303,7 +1302,7 @@ static void SetMulticastFilter(struct net_device *dev)
        }
 }
 
-static int __devinit depca_common_init (u_long ioaddr, struct net_device **devp)
+static int __init depca_common_init (u_long ioaddr, struct net_device **devp)
 {
        int status = 0;
 
@@ -1334,7 +1333,7 @@ static int __devinit depca_common_init (u_long ioaddr, struct net_device **devp)
 /*
 ** Microchannel bus I/O device probe
 */
-static int __devinit depca_mca_probe(struct device *device)
+static int __init depca_mca_probe(struct device *device)
 {
        unsigned char pos[2];
        unsigned char where;
@@ -1458,7 +1457,7 @@ static int __devinit depca_mca_probe(struct device *device)
 ** ISA bus I/O device probe
 */
 
-static void __devinit depca_platform_probe (void)
+static void __init depca_platform_probe (void)
 {
        int i;
        struct platform_device *pldev;
@@ -1498,7 +1497,7 @@ static void __devinit depca_platform_probe (void)
        }
 }
 
-static enum depca_type __devinit depca_shmem_probe (ulong *mem_start)
+static enum depca_type __init depca_shmem_probe (ulong *mem_start)
 {
        u_long mem_base[] = DEPCA_RAM_BASE_ADDRESSES;
        enum depca_type adapter = unknown;
@@ -1559,7 +1558,7 @@ static int __devinit depca_isa_probe (struct platform_device *device)
 */
 
 #ifdef CONFIG_EISA
-static int __devinit depca_eisa_probe (struct device *device)
+static int __init depca_eisa_probe (struct device *device)
 {
        enum depca_type adapter = unknown;
        struct eisa_device *edev;
@@ -1630,7 +1629,7 @@ static int __devexit depca_device_remove (struct device *device)
 ** and Boot (readb) ROM. This will also give us a clue to the network RAM
 ** base address.
 */
-static int __devinit DepcaSignature(char *name, u_long base_addr)
+static int __init DepcaSignature(char *name, u_long base_addr)
 {
        u_int i, j, k;
        void __iomem *ptr;
index c445457b66d53df856564a2d88895680249f49bf..23179dbcedd260ccd6b401f21c859bbdeb753675 100644 (file)
@@ -346,7 +346,7 @@ parse_eeprom (struct net_device *dev)
        if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) {  /* D-Link Only */
                /* Check CRC */
                crc = ~ether_crc_le (256 - 4, sromdata);
-               if (psrom->crc != crc) {
+               if (psrom->crc != cpu_to_le32(crc)) {
                        printk (KERN_ERR "%s: EEPROM data CRC error.\n",
                                        dev->name);
                        return -1;
index fbaff3584bd4982e5c3f72c6079d86f6d12b5f22..ee597e676ee500235f7dace13c570ad92e20724f 100644 (file)
@@ -1157,9 +1157,6 @@ dm9000_open(struct net_device *dev)
 
        irqflags |= IRQF_SHARED;
 
-       if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
-               return -EAGAIN;
-
        /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
        iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
        mdelay(1); /* delay needs by DM9000B */
@@ -1168,6 +1165,9 @@ dm9000_open(struct net_device *dev)
        dm9000_reset(db);
        dm9000_init_dm9000(dev);
 
+       if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
+               return -EAGAIN;
+
        /* Init driver variable */
        db->dbug_cnt = 0;
 
index ff60b23a5b7429b99fc06e5c138a643d5b804dd3..2dfcc8047847b12ade17da7c324a717133398680 100644 (file)
@@ -10,7 +10,7 @@
  * Maintainer: Kumar Gala
  * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
  *
- * Copyright 2002-2009 Freescale Semiconductor, Inc.
+ * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
  * Copyright 2007 MontaVista Software, Inc.
  *
  * This program is free software; you can redistribute  it and/or modify it
@@ -476,9 +476,6 @@ static const struct net_device_ops gfar_netdev_ops = {
 #endif
 };
 
-unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
-unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
-
 void lock_rx_qs(struct gfar_private *priv)
 {
        int i = 0x0;
@@ -868,28 +865,28 @@ static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
 
        rqfar--;
        rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
-       ftp_rqfpr[rqfar] = rqfpr;
-       ftp_rqfcr[rqfar] = rqfcr;
+       priv->ftp_rqfpr[rqfar] = rqfpr;
+       priv->ftp_rqfcr[rqfar] = rqfcr;
        gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
 
        rqfar--;
        rqfcr = RQFCR_CMP_NOMATCH;
-       ftp_rqfpr[rqfar] = rqfpr;
-       ftp_rqfcr[rqfar] = rqfcr;
+       priv->ftp_rqfpr[rqfar] = rqfpr;
+       priv->ftp_rqfcr[rqfar] = rqfcr;
        gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
 
        rqfar--;
        rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
        rqfpr = class;
-       ftp_rqfcr[rqfar] = rqfcr;
-       ftp_rqfpr[rqfar] = rqfpr;
+       priv->ftp_rqfcr[rqfar] = rqfcr;
+       priv->ftp_rqfpr[rqfar] = rqfpr;
        gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
 
        rqfar--;
        rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
        rqfpr = class;
-       ftp_rqfcr[rqfar] = rqfcr;
-       ftp_rqfpr[rqfar] = rqfpr;
+       priv->ftp_rqfcr[rqfar] = rqfcr;
+       priv->ftp_rqfpr[rqfar] = rqfpr;
        gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
 
        return rqfar;
@@ -904,8 +901,8 @@ static void gfar_init_filer_table(struct gfar_private *priv)
 
        /* Default rule */
        rqfcr = RQFCR_CMP_MATCH;
-       ftp_rqfcr[rqfar] = rqfcr;
-       ftp_rqfpr[rqfar] = rqfpr;
+       priv->ftp_rqfcr[rqfar] = rqfcr;
+       priv->ftp_rqfpr[rqfar] = rqfpr;
        gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
 
        rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
@@ -921,8 +918,8 @@ static void gfar_init_filer_table(struct gfar_private *priv)
        /* Rest are masked rules */
        rqfcr = RQFCR_CMP_NOMATCH;
        for (i = 0; i < rqfar; i++) {
-               ftp_rqfcr[i] = rqfcr;
-               ftp_rqfpr[i] = rqfpr;
+               priv->ftp_rqfcr[i] = rqfcr;
+               priv->ftp_rqfpr[i] = rqfpr;
                gfar_write_filer(priv, i, rqfcr, rqfpr);
        }
 }
index fc86f51954456e5ddcec1c6ba2e40f17e612bc01..ba36dc7a34356c0fac622cbd2de682201049d5eb 100644 (file)
@@ -9,7 +9,7 @@
  * Maintainer: Kumar Gala
  * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
  *
- * Copyright 2002-2009 Freescale Semiconductor, Inc.
+ * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
  *
  * This program is free software; you can redistribute  it and/or modify it
  * under  the terms of  the GNU General  Public License as published by the
@@ -1107,10 +1107,12 @@ struct gfar_private {
        /* HW time stamping enabled flag */
        int hwts_rx_en;
        int hwts_tx_en;
+
+       /*Filer table*/
+       unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
+       unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
 };
 
-extern unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
-extern unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
 
 static inline int gfar_has_errata(struct gfar_private *priv,
                                  enum gfar_errata err)
index 493d743839d977f035f36be50de59d2e5744b2e6..239e3330495fb4fc9b96063d8fbf2438e429a6df 100644 (file)
@@ -9,7 +9,7 @@
  *  Maintainer: Kumar Gala
  *  Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
  *
- *  Copyright 2003-2006, 2008-2009 Freescale Semiconductor, Inc.
+ *  Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc.
  *
  *  This software may be used and distributed according to
  *  the terms of the GNU Public License, Version 2, incorporated herein
@@ -609,15 +609,15 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
        if (ethflow & RXH_L2DA) {
                fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
                        RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
-               ftp_rqfpr[priv->cur_filer_idx] = fpr;
-               ftp_rqfcr[priv->cur_filer_idx] = fcr;
+               priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+               priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
                gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
                priv->cur_filer_idx = priv->cur_filer_idx - 1;
 
                fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
                                RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
-               ftp_rqfpr[priv->cur_filer_idx] = fpr;
-               ftp_rqfcr[priv->cur_filer_idx] = fcr;
+               priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+               priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
                gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
                priv->cur_filer_idx = priv->cur_filer_idx - 1;
        }
@@ -626,16 +626,16 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
                fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
                                RQFCR_AND | RQFCR_HASHTBL_0;
                gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
-               ftp_rqfpr[priv->cur_filer_idx] = fpr;
-               ftp_rqfcr[priv->cur_filer_idx] = fcr;
+               priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+               priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
                priv->cur_filer_idx = priv->cur_filer_idx - 1;
        }
 
        if (ethflow & RXH_IP_SRC) {
                fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
                        RQFCR_AND | RQFCR_HASHTBL_0;
-               ftp_rqfpr[priv->cur_filer_idx] = fpr;
-               ftp_rqfcr[priv->cur_filer_idx] = fcr;
+               priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+               priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
                gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
                priv->cur_filer_idx = priv->cur_filer_idx - 1;
        }
@@ -643,8 +643,8 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
        if (ethflow & (RXH_IP_DST)) {
                fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
                        RQFCR_AND | RQFCR_HASHTBL_0;
-               ftp_rqfpr[priv->cur_filer_idx] = fpr;
-               ftp_rqfcr[priv->cur_filer_idx] = fcr;
+               priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+               priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
                gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
                priv->cur_filer_idx = priv->cur_filer_idx - 1;
        }
@@ -652,8 +652,8 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
        if (ethflow & RXH_L3_PROTO) {
                fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
                        RQFCR_AND | RQFCR_HASHTBL_0;
-               ftp_rqfpr[priv->cur_filer_idx] = fpr;
-               ftp_rqfcr[priv->cur_filer_idx] = fcr;
+               priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+               priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
                gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
                priv->cur_filer_idx = priv->cur_filer_idx - 1;
        }
@@ -661,8 +661,8 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
        if (ethflow & RXH_L4_B_0_1) {
                fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
                        RQFCR_AND | RQFCR_HASHTBL_0;
-               ftp_rqfpr[priv->cur_filer_idx] = fpr;
-               ftp_rqfcr[priv->cur_filer_idx] = fcr;
+               priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+               priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
                gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
                priv->cur_filer_idx = priv->cur_filer_idx - 1;
        }
@@ -670,8 +670,8 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
        if (ethflow & RXH_L4_B_2_3) {
                fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
                        RQFCR_AND | RQFCR_HASHTBL_0;
-               ftp_rqfpr[priv->cur_filer_idx] = fpr;
-               ftp_rqfcr[priv->cur_filer_idx] = fcr;
+               priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+               priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
                gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
                priv->cur_filer_idx = priv->cur_filer_idx - 1;
        }
@@ -705,12 +705,12 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
        }
 
        for (i = 0; i < MAX_FILER_IDX + 1; i++) {
-               local_rqfpr[j] = ftp_rqfpr[i];
-               local_rqfcr[j] = ftp_rqfcr[i];
+               local_rqfpr[j] = priv->ftp_rqfpr[i];
+               local_rqfcr[j] = priv->ftp_rqfcr[i];
                j--;
-               if ((ftp_rqfcr[i] == (RQFCR_PID_PARSE |
+               if ((priv->ftp_rqfcr[i] == (RQFCR_PID_PARSE |
                        RQFCR_CLE |RQFCR_AND)) &&
-                       (ftp_rqfpr[i] == cmp_rqfpr))
+                       (priv->ftp_rqfpr[i] == cmp_rqfpr))
                        break;
        }
 
@@ -724,20 +724,22 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
         * if it was already programmed, we need to overwrite these rules
         */
        for (l = i+1; l < MAX_FILER_IDX; l++) {
-               if ((ftp_rqfcr[l] & RQFCR_CLE) &&
-                       !(ftp_rqfcr[l] & RQFCR_AND)) {
-                       ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
+               if ((priv->ftp_rqfcr[l] & RQFCR_CLE) &&
+                       !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
+                       priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
                                RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
-                       ftp_rqfpr[l] = FPR_FILER_MASK;
-                       gfar_write_filer(priv, l, ftp_rqfcr[l], ftp_rqfpr[l]);
+                       priv->ftp_rqfpr[l] = FPR_FILER_MASK;
+                       gfar_write_filer(priv, l, priv->ftp_rqfcr[l],
+                               priv->ftp_rqfpr[l]);
                        break;
                }
 
-               if (!(ftp_rqfcr[l] & RQFCR_CLE) && (ftp_rqfcr[l] & RQFCR_AND))
+               if (!(priv->ftp_rqfcr[l] & RQFCR_CLE) &&
+                       (priv->ftp_rqfcr[l] & RQFCR_AND))
                        continue;
                else {
-                       local_rqfpr[j] = ftp_rqfpr[l];
-                       local_rqfcr[j] = ftp_rqfcr[l];
+                       local_rqfpr[j] = priv->ftp_rqfpr[l];
+                       local_rqfcr[j] = priv->ftp_rqfcr[l];
                        j--;
                }
        }
@@ -750,8 +752,8 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
 
        /* Write back the popped out rules again */
        for (k = j+1; k < MAX_FILER_IDX; k++) {
-               ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
-               ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
+               priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
+               priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
                gfar_write_filer(priv, priv->cur_filer_idx,
                                local_rqfcr[k], local_rqfpr[k]);
                if (!priv->cur_filer_idx)
index 992089639ea4824404f42d53e5362eab2c2fb586..3e5d0b6b6516133039192fa5dfd1fdf88660d81b 100644 (file)
@@ -456,7 +456,7 @@ out:
  * a block of 6pack data has been received, which can now be decapsulated
  * and sent on to some IP layer for further processing.
  */
-static unsigned int sixpack_receive_buf(struct tty_struct *tty,
+static void sixpack_receive_buf(struct tty_struct *tty,
        const unsigned char *cp, char *fp, int count)
 {
        struct sixpack *sp;
@@ -464,11 +464,11 @@ static unsigned int sixpack_receive_buf(struct tty_struct *tty,
        int count1;
 
        if (!count)
-               return 0;
+               return;
 
        sp = sp_get(tty);
        if (!sp)
-               return -ENODEV;
+               return;
 
        memcpy(buf, cp, count < sizeof(buf) ? count : sizeof(buf));
 
@@ -487,8 +487,6 @@ static unsigned int sixpack_receive_buf(struct tty_struct *tty,
 
        sp_put(sp);
        tty_unthrottle(tty);
-
-       return count1;
 }
 
 /*
index 0e4f23531140c8ca6f938447f145351c39b93a37..4c628393c8b157cbc09de52d902b5fa8c3d370a3 100644 (file)
@@ -923,14 +923,13 @@ static long mkiss_compat_ioctl(struct tty_struct *tty, struct file *file,
  * a block of data has been received, which can now be decapsulated
  * and sent on to the AX.25 layer for further processing.
  */
-static unsigned int mkiss_receive_buf(struct tty_struct *tty,
-               const unsigned char *cp, char *fp, int count)
+static void mkiss_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+       char *fp, int count)
 {
        struct mkiss *ax = mkiss_get(tty);
-       int bytes = count;
 
        if (!ax)
-               return -ENODEV;
+               return;
 
        /*
         * Argh! mtu change time! - costs us the packet part received
@@ -940,7 +939,7 @@ static unsigned int mkiss_receive_buf(struct tty_struct *tty,
                ax_changedmtu(ax);
 
        /* Read the characters out of the buffer */
-       while (bytes--) {
+       while (count--) {
                if (fp != NULL && *fp++) {
                        if (!test_and_set_bit(AXF_ERROR, &ax->flags))
                                ax->dev->stats.rx_errors++;
@@ -953,8 +952,6 @@ static unsigned int mkiss_receive_buf(struct tty_struct *tty,
 
        mkiss_put(ax);
        tty_unthrottle(tty);
-
-       return count;
 }
 
 /*
index c52a1df5d922b0e0e22bced1d0d0d046003acddc..8e10d2f6a5adc8340d239c729e835fb0122b3acd 100644 (file)
@@ -188,14 +188,14 @@ struct hp100_private {
  *  variables
  */
 #ifdef CONFIG_ISA
-static const char *const hp100_isa_tbl[] __devinitconst = {
+static const char *hp100_isa_tbl[] = {
        "HWPF150", /* HP J2573 rev A */
        "HWP1950", /* HP J2573 */
 };
 #endif
 
 #ifdef CONFIG_EISA
-static const struct eisa_device_id hp100_eisa_tbl[] __devinitconst = {
+static struct eisa_device_id hp100_eisa_tbl[] = {
        { "HWPF180" }, /* HP J2577 rev A */
        { "HWP1920" }, /* HP 27248B */
        { "HWP1940" }, /* HP J2577 */
@@ -336,7 +336,7 @@ static __devinit const char *hp100_read_id(int ioaddr)
 }
 
 #ifdef CONFIG_ISA
-static __devinit int hp100_isa_probe1(struct net_device *dev, int ioaddr)
+static __init int hp100_isa_probe1(struct net_device *dev, int ioaddr)
 {
        const char *sig;
        int i;
@@ -372,7 +372,7 @@ static __devinit int hp100_isa_probe1(struct net_device *dev, int ioaddr)
  * EISA and PCI are handled by device infrastructure.
  */
 
-static int  __devinit hp100_isa_probe(struct net_device *dev, int addr)
+static int  __init hp100_isa_probe(struct net_device *dev, int addr)
 {
        int err = -ENODEV;
 
@@ -396,7 +396,7 @@ static int  __devinit hp100_isa_probe(struct net_device *dev, int addr)
 #endif /* CONFIG_ISA */
 
 #if !defined(MODULE) && defined(CONFIG_ISA)
-struct net_device * __devinit hp100_probe(int unit)
+struct net_device * __init hp100_probe(int unit)
 {
        struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
        int err;
@@ -2843,7 +2843,7 @@ static void cleanup_dev(struct net_device *d)
 }
 
 #ifdef CONFIG_EISA
-static int __devinit hp100_eisa_probe (struct device *gendev)
+static int __init hp100_eisa_probe (struct device *gendev)
 {
        struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
        struct eisa_device *edev = to_eisa_device(gendev);
index 136d7544cc33331c735684b56b44cffb27ef8b94..a7d6cad3295368a85f5720965746930141f51ac6 100644 (file)
@@ -895,12 +895,12 @@ static int ibmlana_irq;
 static int ibmlana_io;
 static int startslot;          /* counts through slots when probing multiple devices */
 
-static const short ibmlana_adapter_ids[] __devinitconst = {
+static short ibmlana_adapter_ids[] __initdata = {
        IBM_LANA_ID,
        0x0000
 };
 
-static const char *const ibmlana_adapter_names[] __devinitconst = {
+static char *ibmlana_adapter_names[] __devinitdata = {
        "IBM LAN Adapter/A",
        NULL
 };
index 18fccf913635e671fbca0d321c73aa4c484e2f42..2c28621eb30b5aa02e0e7fe6d214f5e4c0b60f72 100644 (file)
@@ -2373,6 +2373,9 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
        }
 #endif /* CONFIG_PCI_IOV */
        adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
+       /* i350 cannot do RSS and SR-IOV at the same time */
+       if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
+               adapter->rss_queues = 1;
 
        /*
         * if rss_queues > 4 or vfs are going to be allocated with rss_queues
index 035861d8acb157f47d080b074c1de9a73a2988e1..3352b2443e58eb1ca0a45856c628850c08092be6 100644 (file)
@@ -216,23 +216,23 @@ static int irtty_do_write(struct sir_dev *dev, const unsigned char *ptr, size_t
  * usbserial:  urb-complete-interrupt / softint
  */
 
-static unsigned int irtty_receive_buf(struct tty_struct *tty,
-               const unsigned char *cp, char *fp, int count)
+static void irtty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+                             char *fp, int count) 
 {
        struct sir_dev *dev;
        struct sirtty_cb *priv = tty->disc_data;
        int     i;
 
-       IRDA_ASSERT(priv != NULL, return -ENODEV;);
-       IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -EINVAL;);
+       IRDA_ASSERT(priv != NULL, return;);
+       IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;);
 
        if (unlikely(count==0))         /* yes, this happens */
-               return 0;
+               return;
 
        dev = priv->dev;
        if (!dev) {
                IRDA_WARNING("%s(), not ready yet!\n", __func__);
-               return -ENODEV;
+               return;
        }
 
        for (i = 0; i < count; i++) {
@@ -242,13 +242,11 @@ static unsigned int irtty_receive_buf(struct tty_struct *tty,
                if (fp && *fp++) { 
                        IRDA_DEBUG(0, "Framing or parity error!\n");
                        sirdev_receive(dev, NULL, 0);   /* notify sir_dev (updating stats) */
-                       return -EINVAL;
+                       return;
                }
        }
 
        sirdev_receive(dev, cp, count);
-
-       return count;
 }
 
 /*
index 69b5707db369e3dfdcbde140b5aa69e60803e47b..8800e1fe4129a84a41b2a6d5da495b5e807b8301 100644 (file)
@@ -222,19 +222,19 @@ static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 s
 static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self);
 
 /* Probing */
-static int smsc_ircc_look_for_chips(void);
-static const struct smsc_chip * smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type);
-static int smsc_superio_flat(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
-static int smsc_superio_paged(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
-static int smsc_superio_fdc(unsigned short cfg_base);
-static int smsc_superio_lpc(unsigned short cfg_base);
+static int __init smsc_ircc_look_for_chips(void);
+static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type);
+static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
+static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
+static int __init smsc_superio_fdc(unsigned short cfg_base);
+static int __init smsc_superio_lpc(unsigned short cfg_base);
 #ifdef CONFIG_PCI
-static int preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuration *conf);
-static int preconfigure_through_82801(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
-static void preconfigure_ali_port(struct pci_dev *dev,
+static int __init preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuration *conf);
+static int __init preconfigure_through_82801(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
+static void __init preconfigure_ali_port(struct pci_dev *dev,
                                         unsigned short port);
-static int preconfigure_through_ali(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
-static int smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
+static int __init preconfigure_through_ali(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
+static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
                                                    unsigned short ircc_fir,
                                                    unsigned short ircc_sir,
                                                    unsigned char ircc_dma,
@@ -366,7 +366,7 @@ static inline void register_bank(int iobase, int bank)
 }
 
 /* PNP hotplug support */
-static const struct pnp_device_id smsc_ircc_pnp_table[] __devinitconst = {
+static const struct pnp_device_id smsc_ircc_pnp_table[] = {
        { .id = "SMCf010", .driver_data = 0 },
        /* and presumably others */
        { }
@@ -515,7 +515,7 @@ static const struct net_device_ops smsc_ircc_netdev_ops = {
  *    Try to open driver instance
  *
  */
-static int __devinit smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq)
+static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq)
 {
        struct smsc_ircc_cb *self;
        struct net_device *dev;
@@ -2273,7 +2273,7 @@ static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned sho
 }
 
 
-static int __devinit smsc_access(unsigned short cfg_base, unsigned char reg)
+static int __init smsc_access(unsigned short cfg_base, unsigned char reg)
 {
        IRDA_DEBUG(1, "%s\n", __func__);
 
@@ -2281,7 +2281,7 @@ static int __devinit smsc_access(unsigned short cfg_base, unsigned char reg)
        return inb(cfg_base) != reg ? -1 : 0;
 }
 
-static const struct smsc_chip * __devinit smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type)
+static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type)
 {
        u8 devid, xdevid, rev;
 
@@ -2406,7 +2406,7 @@ static int __init smsc_superio_lpc(unsigned short cfg_base)
 #ifdef CONFIG_PCI
 #define PCIID_VENDOR_INTEL 0x8086
 #define PCIID_VENDOR_ALI 0x10b9
-static const struct smsc_ircc_subsystem_configuration subsystem_configurations[] __devinitconst = {
+static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __initdata = {
        /*
         * Subsystems needing entries:
         * 0x10b9:0x1533 0x103c:0x0850 HP nx9010 family
@@ -2532,7 +2532,7 @@ static const struct smsc_ircc_subsystem_configuration subsystem_configurations[]
  * (FIR port, SIR port, FIR DMA, FIR IRQ)
  * through the chip configuration port.
  */
-static int __devinit preconfigure_smsc_chip(struct
+static int __init preconfigure_smsc_chip(struct
                                         smsc_ircc_subsystem_configuration
                                         *conf)
 {
@@ -2633,7 +2633,7 @@ static int __devinit preconfigure_smsc_chip(struct
  * or Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge.
  * They all work the same way!
  */
-static int __devinit preconfigure_through_82801(struct pci_dev *dev,
+static int __init preconfigure_through_82801(struct pci_dev *dev,
                                             struct
                                             smsc_ircc_subsystem_configuration
                                             *conf)
@@ -2786,7 +2786,7 @@ static int __devinit preconfigure_through_82801(struct pci_dev *dev,
  * This is based on reverse-engineering since ALi does not
  * provide any data sheet for the 1533 chip.
  */
-static void __devinit preconfigure_ali_port(struct pci_dev *dev,
+static void __init preconfigure_ali_port(struct pci_dev *dev,
                                         unsigned short port)
 {
        unsigned char reg;
@@ -2824,7 +2824,7 @@ static void __devinit preconfigure_ali_port(struct pci_dev *dev,
        IRDA_MESSAGE("Activated ALi 1533 ISA bridge port 0x%04x.\n", port);
 }
 
-static int __devinit preconfigure_through_ali(struct pci_dev *dev,
+static int __init preconfigure_through_ali(struct pci_dev *dev,
                                           struct
                                           smsc_ircc_subsystem_configuration
                                           *conf)
@@ -2837,7 +2837,7 @@ static int __devinit preconfigure_through_ali(struct pci_dev *dev,
        return preconfigure_smsc_chip(conf);
 }
 
-static int __devinit smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
+static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
                                                    unsigned short ircc_fir,
                                                    unsigned short ircc_sir,
                                                    unsigned char ircc_dma,
@@ -2849,7 +2849,7 @@ static int __devinit smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
        int ret = 0;
 
        for_each_pci_dev(dev) {
-               const struct smsc_ircc_subsystem_configuration *conf;
+               struct smsc_ircc_subsystem_configuration *conf;
 
                /*
                 * Cache the subsystem vendor/device:
index 4d40626b3bfa989b7051a7ea5872faa06ba61601..fc12ac0d9f2e55012d202a44acfca9e8d432c6bd 100644 (file)
@@ -661,7 +661,7 @@ static void ks8842_rx_frame(struct net_device *netdev,
 
        /* check the status */
        if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
-               struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len);
+               struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len + 3);
 
                if (skb) {
 
index e8984b0ca52104c08e9bddf4eafeec8630a5d167..243ed2aee88e35e801a2c6582f8a80d4e287ba7b 100644 (file)
@@ -80,20 +80,17 @@ static void ne3210_block_output(struct net_device *dev, int count, const unsigne
 
 #define NE3210_DEBUG   0x0
 
-static const unsigned char irq_map[] __devinitconst =
-       { 15, 12, 11, 10, 9, 7, 5, 3 };
-static const unsigned int shmem_map[] __devinitconst =
-       { 0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0 };
-static const char *const ifmap[] __devinitconst =
-       { "UTP", "?", "BNC", "AUI" };
-static const int ifmap_val[] __devinitconst = {
+static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3};
+static unsigned int shmem_map[] __initdata = {0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0};
+static const char *ifmap[] __initdata = {"UTP", "?", "BNC", "AUI"};
+static int ifmap_val[] __initdata = {
                IF_PORT_10BASET,
                IF_PORT_UNKNOWN,
                IF_PORT_10BASE2,
                IF_PORT_AUI,
 };
 
-static int __devinit ne3210_eisa_probe (struct device *device)
+static int __init ne3210_eisa_probe (struct device *device)
 {
        unsigned long ioaddr, phys_mem;
        int i, retval, port_index;
@@ -316,7 +313,7 @@ static void ne3210_block_output(struct net_device *dev, int count,
        memcpy_toio(shmem, buf, count);
 }
 
-static const struct eisa_device_id ne3210_ids[] __devinitconst = {
+static struct eisa_device_id ne3210_ids[] = {
        { "EGL0101" },
        { "NVL1801" },
        { "" },
index 53872d7d738219968a0f433beb0ac128bdb67d6d..a1b82c9c67d246c64f41ee1ec51f83b04fe0a6fa 100644 (file)
@@ -340,7 +340,7 @@ ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
 }
 
 /* May sleep, don't call from interrupt level or with interrupts disabled */
-static unsigned int
+static void
 ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
                  char *cflags, int count)
 {
@@ -348,7 +348,7 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
        unsigned long flags;
 
        if (!ap)
-               return -ENODEV;
+               return;
        spin_lock_irqsave(&ap->recv_lock, flags);
        ppp_async_input(ap, buf, cflags, count);
        spin_unlock_irqrestore(&ap->recv_lock, flags);
@@ -356,8 +356,6 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
                tasklet_schedule(&ap->tsk);
        ap_put(ap);
        tty_unthrottle(tty);
-
-       return count;
 }
 
 static void
index 0815790a5cf9930c017f490ae5d52e1d3ea67ecb..2573f525f11c31ced737e750fefc66807bbbfc65 100644 (file)
@@ -381,7 +381,7 @@ ppp_sync_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
 }
 
 /* May sleep, don't call from interrupt level or with interrupts disabled */
-static unsigned int
+static void
 ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
                  char *cflags, int count)
 {
@@ -389,7 +389,7 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
        unsigned long flags;
 
        if (!ap)
-               return -ENODEV;
+               return;
        spin_lock_irqsave(&ap->recv_lock, flags);
        ppp_sync_input(ap, buf, cflags, count);
        spin_unlock_irqrestore(&ap->recv_lock, flags);
@@ -397,8 +397,6 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
                tasklet_schedule(&ap->tsk);
        sp_put(ap);
        tty_unthrottle(tty);
-
-       return count;
 }
 
 static void
index e9656616f2a256cfe16adde971ce53f63e0d98ab..a5d9fbf9d816a325d91adb79007a617922d7bf48 100644 (file)
@@ -1406,6 +1406,7 @@ qlcnic_dump_que(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
 
        for (loop = 0; loop < que->no_ops; loop++) {
                QLCNIC_WR_DUMP_REG(que->sel_addr, base, que_id);
+               addr = que->read_addr;
                for (i = 0; i < cnt; i++) {
                        QLCNIC_RD_DUMP_REG(addr, base, &data);
                        *buffer++ = cpu_to_le32(data);
index 3ab7d2c7baf20f2fb3ae3407b7eda7f113d6136a..0f6af5c61a7ca98b2cf5e0a5a44e5ff41eb9360a 100644 (file)
@@ -2159,6 +2159,7 @@ qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
 
        nf = &pbuf->frag_array[0];
        pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+       pbuf->skb = NULL;
 }
 
 static inline void
index 584809c656d5aaecda43d638fcaba1f48c929c89..8ec1a9a0bb9ae007c69865b2599f07d3f23c99c8 100644 (file)
@@ -670,17 +670,16 @@ static void sl_setup(struct net_device *dev)
  * in parallel
  */
 
-static unsigned int slip_receive_buf(struct tty_struct *tty,
-               const unsigned char *cp, char *fp, int count)
+static void slip_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+                                                       char *fp, int count)
 {
        struct slip *sl = tty->disc_data;
-       int bytes = count;
 
        if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
-               return -ENODEV;
+               return;
 
        /* Read the characters out of the buffer */
-       while (bytes--) {
+       while (count--) {
                if (fp && *fp++) {
                        if (!test_and_set_bit(SLF_ERROR, &sl->flags))
                                sl->dev->stats.rx_errors++;
@@ -694,8 +693,6 @@ static unsigned int slip_receive_buf(struct tty_struct *tty,
 #endif
                        slip_unesc(sl, *cp++);
        }
-
-       return count;
 }
 
 /************************************
index 0f29f261fcfeb9b7658bbdb862b6676e1d1b5cb0..d07c39cb4daf59ae1e765b927d5156e816b80a9b 100644 (file)
@@ -156,7 +156,7 @@ static const struct {
    { 14, 15 }
 };
 
-static const short smc_mca_adapter_ids[] __devinitconst = {
+static short smc_mca_adapter_ids[] __initdata = {
        0x61c8,
        0x61c9,
        0x6fc0,
@@ -168,7 +168,7 @@ static const short smc_mca_adapter_ids[] __devinitconst = {
        0x0000
 };
 
-static const char *const smc_mca_adapter_names[] __devinitconst = {
+static char *smc_mca_adapter_names[] __initdata = {
        "SMC Ethercard PLUS Elite/A BNC/AUI (WD8013EP/A)",
        "SMC Ethercard PLUS Elite/A UTP/AUI (WD8013WP/A)",
        "WD Ethercard PLUS/A (WD8003E/A or WD8003ET/A)",
@@ -199,7 +199,7 @@ static const struct net_device_ops ultramca_netdev_ops = {
 #endif
 };
 
-static int __devinit ultramca_probe(struct device *gen_dev)
+static int __init ultramca_probe(struct device *gen_dev)
 {
        unsigned short ioaddr;
        struct net_device *dev;
index dc4805f473e33ee439ea00b884b4ab656b0b0c7b..f6285748bd3c0bd82bd8a6cba684527378ad2874 100644 (file)
@@ -2400,8 +2400,10 @@ static const struct of_device_id smc91x_match[] = {
        { .compatible = "smsc,lan91c94", },
        { .compatible = "smsc,lan91c111", },
        {},
-}
+};
 MODULE_DEVICE_TABLE(of, smc91x_match);
+#else
+#define smc91x_match NULL
 #endif
 
 static struct dev_pm_ops smc_drv_pm_ops = {
@@ -2416,9 +2418,7 @@ static struct platform_driver smc_driver = {
                .name   = CARDNAME,
                .owner  = THIS_MODULE,
                .pm     = &smc_drv_pm_ops,
-#ifdef CONFIG_OF
                .of_match_table = smc91x_match,
-#endif
        },
 };
 
index f4b01c638a330d969ab32f88178bc51ab7eebf73..a1f9f9eef37d3b04b8d1344c964eec0a57c56eb2 100644 (file)
@@ -5774,7 +5774,7 @@ static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
                         dma_unmap_addr(txb, mapping),
                         skb_headlen(skb),
                         PCI_DMA_TODEVICE);
-       for (i = 0; i <= last; i++) {
+       for (i = 0; i < last; i++) {
                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
                entry = NEXT_TX(entry);
index 1313aa1315f01bcc241dbb41276831d2931b005d..2bedc0ace812ac2b5bfbf4ec05a54acc7f7c1ed5 100644 (file)
@@ -727,7 +727,7 @@ static int __devexit madgemc_remove(struct device *device)
        return 0;
 }
 
-static const short madgemc_adapter_ids[] __devinitconst = {
+static short madgemc_adapter_ids[] __initdata = {
        0x002d,
        0x0000
 };
index 45144d5bd11b26f6f86bd5db3e127239c2b0675c..efaa1d69b72082b5a0729710d0e86647b96bcc1e 100644 (file)
@@ -1995,7 +1995,7 @@ SetMulticastFilter(struct net_device *dev)
 
 static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST;
 
-static int __devinit de4x5_eisa_probe (struct device *gendev)
+static int __init de4x5_eisa_probe (struct device *gendev)
 {
        struct eisa_device *edev;
        u_long iobase;
@@ -2097,7 +2097,7 @@ static int __devexit de4x5_eisa_remove (struct device *device)
        return 0;
 }
 
-static const struct eisa_device_id de4x5_eisa_ids[] __devinitconst = {
+static struct eisa_device_id de4x5_eisa_ids[] = {
         { "DEC4250", 0 },      /* 0 is the board name index... */
         { "" }
 };
index d7221c4a5dcf97973b97ee50d93aec593a90b434..8056f8a27c6a0695f660648875dfeb0f60e7ee7e 100644 (file)
@@ -495,7 +495,7 @@ static void catc_ctrl_run(struct catc *catc)
        if (!q->dir && q->buf && q->len)
                memcpy(catc->ctrl_buf, q->buf, q->len);
 
-       if ((status = usb_submit_urb(catc->ctrl_urb, GFP_KERNEL)))
+       if ((status = usb_submit_urb(catc->ctrl_urb, GFP_ATOMIC)))
                err("submit(ctrl_urb) status %d", status);
 }
 
index cdd3ae486109419949612b0df069be7ac001c0a3..f33ca6aa29e9cb95a8277891298317ca810cefa7 100644 (file)
@@ -54,7 +54,7 @@
 #include <linux/usb/usbnet.h>
 #include <linux/usb/cdc.h>
 
-#define        DRIVER_VERSION                          "24-May-2011"
+#define        DRIVER_VERSION                          "01-June-2011"
 
 /* CDC NCM subclass 3.2.1 */
 #define USB_CDC_NCM_NDP16_LENGTH_MIN           0x10
@@ -1234,6 +1234,7 @@ static struct usb_driver cdc_ncm_driver = {
        .disconnect = cdc_ncm_disconnect,
        .suspend = usbnet_suspend,
        .resume = usbnet_resume,
+       .reset_resume = usbnet_resume,
        .supports_autosuspend = 1,
 };
 
index 40398bf7d036fca1d1b56047f638375fdfd6f796..24297b274cd475ac855114394784c7eeaab2bcd0 100644 (file)
@@ -517,18 +517,17 @@ static int x25_asy_close(struct net_device *dev)
  * and sent on to some IP layer for further processing.
  */
 
-static unsigned int x25_asy_receive_buf(struct tty_struct *tty,
+static void x25_asy_receive_buf(struct tty_struct *tty,
                                const unsigned char *cp, char *fp, int count)
 {
        struct x25_asy *sl = tty->disc_data;
-       int bytes = count;
 
        if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev))
                return;
 
 
        /* Read the characters out of the buffer */
-       while (bytes--) {
+       while (count--) {
                if (fp && *fp++) {
                        if (!test_and_set_bit(SLF_ERROR, &sl->flags))
                                sl->dev->stats.rx_errors++;
@@ -537,8 +536,6 @@ static unsigned int x25_asy_receive_buf(struct tty_struct *tty,
                }
                x25_asy_unesc(sl, *cp++);
        }
-
-       return count;
 }
 
 /*
index 22047628ccfa83bbd59242c511192e52ab04a12e..b6c5d3715b963e73f6b962070f6e78256449d107 100644 (file)
@@ -72,6 +72,11 @@ static int modparam_all_channels;
 module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
 MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");
 
+static int modparam_fastchanswitch;
+module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
+MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");
+
+
 /* Module info */
 MODULE_AUTHOR("Jiri Slaby");
 MODULE_AUTHOR("Nick Kossifidis");
@@ -2686,6 +2691,7 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
        struct ath5k_hw *ah = sc->ah;
        struct ath_common *common = ath5k_hw_common(ah);
        int ret, ani_mode;
+       bool fast;
 
        ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n");
 
@@ -2705,7 +2711,10 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
        ath5k_drain_tx_buffs(sc);
        if (chan)
                sc->curchan = chan;
-       ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, chan != NULL,
+
+       fast = ((chan != NULL) && modparam_fastchanswitch) ? 1 : 0;
+
+       ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, fast,
                                                                skip_pcu);
        if (ret) {
                ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret);
index 3510de2cf6224c6ec11b091cfb3f50dd1fccf6b9..126a4eab35f39d7a10995f0f20ff814a87def88c 100644 (file)
@@ -1124,8 +1124,11 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
                        /* Non fatal, can happen eg.
                         * on mode change */
                        ret = 0;
-               } else
+               } else {
+                       ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_RESET,
+                               "fast chan change successful\n");
                        return 0;
+               }
        }
 
        /*
index d9ff8413ab9af4e6e9634277cec7e58752c833b0..d9c08c619a3ab045fe7950aff6dc6f50d1f08b49 100644 (file)
@@ -26,7 +26,6 @@ config ATH9K
 config ATH9K_PCI
        bool "Atheros ath9k PCI/PCIe bus support"
        depends on ATH9K && PCI
-       default PCI
        ---help---
          This option enables the PCI bus support in ath9k.
 
index 015d97439935d05b0deb119471334929b7891f0f..2d4c0910295bd39b08cb4abdc845c140d6bc22de 100644 (file)
@@ -829,7 +829,7 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
        if (AR_SREV_9271(ah)) {
                if (!ar9285_hw_cl_cal(ah, chan))
                        return false;
-       } else if (AR_SREV_9285_12_OR_LATER(ah)) {
+       } else if (AR_SREV_9285(ah) && AR_SREV_9285_12_OR_LATER(ah)) {
                if (!ar9285_hw_clc(ah, chan))
                        return false;
        } else {
index 0ca7635d0669c276683762f85cd493b3a21e5b87..ff8150e46f0e969a159d3b481cee5752432fcace 100644 (file)
@@ -4645,10 +4645,16 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
        case 1:
                break;
        case 2:
-               scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
+               if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN)
+                       scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
+               else
+                       scaledPower = 0;
                break;
        case 3:
-               scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
+               if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN)
+                       scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
+               else
+                       scaledPower = 0;
                break;
        }
 
index eee23ecd118a292f35e959018414d6a548ed74d4..892c48b15434569c7b74d12d426e4cdb1c15dcfb 100644 (file)
@@ -1381,3 +1381,25 @@ void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah)
                "==== BB update: done ====\n\n");
 }
 EXPORT_SYMBOL(ar9003_hw_bb_watchdog_dbg_info);
+
+void ar9003_hw_disable_phy_restart(struct ath_hw *ah)
+{
+       u32 val;
+
+       /* While receiving unsupported rate frame rx state machine
+        * gets into a state 0xb and if phy_restart happens in that
+        * state, BB would go hang. If RXSM is in 0xb state after
+        * first bb panic, ensure to disable the phy_restart.
+        */
+       if (!((MS(ah->bb_watchdog_last_status,
+                 AR_PHY_WATCHDOG_RX_OFDM_SM) == 0xb) ||
+           ah->bb_hang_rx_ofdm))
+               return;
+
+       ah->bb_hang_rx_ofdm = true;
+       val = REG_READ(ah, AR_PHY_RESTART);
+       val &= ~AR_PHY_RESTART_ENA;
+
+       REG_WRITE(ah, AR_PHY_RESTART, val);
+}
+EXPORT_SYMBOL(ar9003_hw_disable_phy_restart);
index 7856f0d4512d6292a1a70ef3f4f98360666f1847..343fc9f946dbc7dcb6b613cad3cff3b2bb12e84e 100644 (file)
@@ -524,10 +524,16 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
        case 1:
                break;
        case 2:
-               scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
+               if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN)
+                       scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
+               else
+                       scaledPower = 0;
                break;
        case 3:
-               scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
+               if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN)
+                       scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
+               else
+                       scaledPower = 0;
                break;
        }
        scaledPower = max((u16)0, scaledPower);
index 72543ce8f616a23b84d2949117ea65d1bd29982c..1be7c8bbef842f5cef15e10f60e47eb46d7d9aa7 100644 (file)
@@ -1555,9 +1555,12 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
        if (ah->btcoex_hw.enabled)
                ath9k_hw_btcoex_enable(ah);
 
-       if (AR_SREV_9300_20_OR_LATER(ah))
+       if (AR_SREV_9300_20_OR_LATER(ah)) {
                ar9003_hw_bb_watchdog_config(ah);
 
+               ar9003_hw_disable_phy_restart(ah);
+       }
+
        ath9k_hw_apply_gpio_override(ah);
 
        return 0;
index 57435ce627928adaada552ce1aa5636f140ea8cd..4b157c53d1a8da11ec217a498c5bdf94b12d3e93 100644 (file)
@@ -842,6 +842,7 @@ struct ath_hw {
 
        u32 bb_watchdog_last_status;
        u32 bb_watchdog_timeout_ms; /* in ms, 0 to disable */
+       u8 bb_hang_rx_ofdm; /* true if bb hang due to rx_ofdm */
 
        unsigned int paprd_target_power;
        unsigned int paprd_training_power;
@@ -990,6 +991,7 @@ void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah);
 void ar9003_hw_bb_watchdog_config(struct ath_hw *ah);
 void ar9003_hw_bb_watchdog_read(struct ath_hw *ah);
 void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah);
+void ar9003_hw_disable_phy_restart(struct ath_hw *ah);
 void ar9003_paprd_enable(struct ath_hw *ah, bool val);
 void ar9003_paprd_populate_single_table(struct ath_hw *ah,
                                        struct ath9k_hw_cal_data *caldata,
index a198ee374b050b6d331107510be54b0618904ccd..2ca351fe6d3c0b056d4ecb8be14388aac195e85b 100644 (file)
@@ -670,7 +670,8 @@ void ath9k_tasklet(unsigned long data)
        u32 status = sc->intrstatus;
        u32 rxmask;
 
-       if (status & ATH9K_INT_FATAL) {
+       if ((status & ATH9K_INT_FATAL) ||
+           (status & ATH9K_INT_BB_WATCHDOG)) {
                ath_reset(sc, true);
                return;
        }
@@ -737,6 +738,7 @@ irqreturn_t ath_isr(int irq, void *dev)
 {
 #define SCHED_INTR (                           \
                ATH9K_INT_FATAL |               \
+               ATH9K_INT_BB_WATCHDOG |         \
                ATH9K_INT_RXORN |               \
                ATH9K_INT_RXEOL |               \
                ATH9K_INT_RX |                  \
index 17542214c93f65bfb1727547258f20e11c8440be..ba7f36ab0a74231eb82731e9292ffcd737601852 100644 (file)
@@ -689,7 +689,8 @@ static void ath_rc_rate_set_series(const struct ath_rate_table *rate_table,
 
        if (WLAN_RC_PHY_HT(rate_table->info[rix].phy)) {
                rate->flags |= IEEE80211_TX_RC_MCS;
-               if (WLAN_RC_PHY_40(rate_table->info[rix].phy))
+               if (WLAN_RC_PHY_40(rate_table->info[rix].phy) &&
+                   conf_is_ht40(&txrc->hw->conf))
                        rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
                if (WLAN_RC_PHY_SGI(rate_table->info[rix].phy))
                        rate->flags |= IEEE80211_TX_RC_SHORT_GI;
index 9ed65157bef554bcbcd1e857e3d80177a9af60bd..05960ddde24ee4fef9eb12801522eb95ea3e4825 100644 (file)
@@ -3093,7 +3093,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
        int freq;
        bool avoid = false;
        u8 length;
-       u16 tmp, core, type, count, max, numb, last, cmd;
+       u16 tmp, core, type, count, max, numb, last = 0, cmd;
        const u16 *table;
        bool phy6or5x;
 
index 7e5e85a017b5c0f9da0435f84cf94965b72a6ee3..a7a4739880dc91d7075b5eb6e1ee309e2dd16049 100644 (file)
@@ -628,11 +628,11 @@ void iwl4965_rx_reply_rx(struct iwl_priv *priv,
 
        /* rx_status carries information about the packet to mac80211 */
        rx_status.mactime = le64_to_cpu(phy_res->timestamp);
+       rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
+                               IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
        rx_status.freq =
                ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
                                                        rx_status.band);
-       rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
-                               IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
        rx_status.rate_idx =
                iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
        rx_status.flag = 0;
index f5433c74b845d54e6ac2869eb9664feb64bcd6bc..facc94e74b07991fa723b7144177beb4cc4bba93 100644 (file)
@@ -1218,10 +1218,10 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
         * receive commit_rxon request
         * abort any previous channel switch if still in process
         */
-       if (priv->switch_rxon.switch_in_progress &&
-           (priv->switch_rxon.channel != ctx->staging.channel)) {
+       if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
+           (priv->switch_channel != ctx->staging.channel)) {
                IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
-                     le16_to_cpu(priv->switch_rxon.channel));
+                     le16_to_cpu(priv->switch_channel));
                iwl_legacy_chswitch_done(priv, false);
        }
 
@@ -1237,7 +1237,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
 
                memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
                iwl_legacy_print_rx_config_cmd(priv, ctx);
-               return 0;
+               goto set_tx_power;
        }
 
        /* If we are currently associated and the new config requires
@@ -1317,6 +1317,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
 
        iwl4965_init_sensitivity(priv);
 
+set_tx_power:
        /* If we issue a new RXON command which required a tune then we must
         * send a new TXPOWER command or we won't be able to Tx any frames */
        ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
@@ -1403,9 +1404,6 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
                return rc;
        }
 
-       priv->switch_rxon.channel = cmd.channel;
-       priv->switch_rxon.switch_in_progress = true;
-
        return iwl_legacy_send_cmd_pdu(priv,
                         REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
 }
@@ -1543,7 +1541,7 @@ static void iwl4965_temperature_calib(struct iwl_priv *priv)
        s32 temp;
 
        temp = iwl4965_hw_get_temperature(priv);
-       if (temp < 0)
+       if (IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp))
                return;
 
        if (priv->temperature != temp) {
index 42df8321dae807ffc616e3d48f19ce4e45323584..3be76bd5499a960f5e7685953a18074c44504525 100644 (file)
@@ -859,12 +859,8 @@ void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success)
        if (test_bit(STATUS_EXIT_PENDING, &priv->status))
                return;
 
-       if (priv->switch_rxon.switch_in_progress) {
+       if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
                ieee80211_chswitch_done(ctx->vif, is_success);
-               mutex_lock(&priv->mutex);
-               priv->switch_rxon.switch_in_progress = false;
-               mutex_unlock(&priv->mutex);
-       }
 }
 EXPORT_SYMBOL(iwl_legacy_chswitch_done);
 
@@ -876,19 +872,19 @@ void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
        struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
        struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active;
 
-       if (priv->switch_rxon.switch_in_progress) {
-               if (!le32_to_cpu(csa->status) &&
-                   (csa->channel == priv->switch_rxon.channel)) {
-                       rxon->channel = csa->channel;
-                       ctx->staging.channel = csa->channel;
-                       IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
-                             le16_to_cpu(csa->channel));
-                       iwl_legacy_chswitch_done(priv, true);
-               } else {
-                       IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
+       if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
+               return;
+
+       if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
+               rxon->channel = csa->channel;
+               ctx->staging.channel = csa->channel;
+               IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
                              le16_to_cpu(csa->channel));
-                       iwl_legacy_chswitch_done(priv, false);
-               }
+               iwl_legacy_chswitch_done(priv, true);
+       } else {
+               IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
+                       le16_to_cpu(csa->channel));
+               iwl_legacy_chswitch_done(priv, false);
        }
 }
 EXPORT_SYMBOL(iwl_legacy_rx_csa);
index bc66c604106cb990e79b73a337395523c1d60973..c5fbda0760dea06c28f9a6c5411c87428defad07 100644 (file)
@@ -560,7 +560,7 @@ void iwl_legacy_free_geos(struct iwl_priv *priv);
 #define STATUS_SCAN_HW         15
 #define STATUS_POWER_PMI       16
 #define STATUS_FW_ERROR                17
-
+#define STATUS_CHANNEL_SWITCH_PENDING 18
 
 static inline int iwl_legacy_is_ready(struct iwl_priv *priv)
 {
index be0106c6a2daad00486a6e6fac360172c5943e8c..ea30122669ee718906991ec53de2b9a50a1fb21d 100644 (file)
@@ -854,17 +854,6 @@ struct traffic_stats {
 #endif
 };
 
-/*
- * iwl_switch_rxon: "channel switch" structure
- *
- * @ switch_in_progress: channel switch in progress
- * @ channel: new channel
- */
-struct iwl_switch_rxon {
-       bool switch_in_progress;
-       __le16 channel;
-};
-
 /*
  * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
  * to perform continuous uCode event logging operation if enabled
@@ -1115,7 +1104,7 @@ struct iwl_priv {
 
        struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
 
-       struct iwl_switch_rxon switch_rxon;
+       __le16 switch_channel;
 
        /* 1st responses from initialize and runtime uCode images.
         * _4965's initialize alive response contains some calibration data. */
index af2ae22fcfd32c22ab0b28f1bddcc64b7f873c46..7157ba52968033642953a3af849d4cd706b1694c 100644 (file)
@@ -2861,16 +2861,13 @@ void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
                goto out;
 
        if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
-           test_bit(STATUS_SCANNING, &priv->status))
+           test_bit(STATUS_SCANNING, &priv->status) ||
+           test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
                goto out;
 
        if (!iwl_legacy_is_associated_ctx(ctx))
                goto out;
 
-       /* channel switch in progress */
-       if (priv->switch_rxon.switch_in_progress == true)
-               goto out;
-
        if (priv->cfg->ops->lib->set_channel_switch) {
 
                ch = channel->hw_value;
@@ -2919,15 +2916,18 @@ void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
                         * at this point, staging_rxon has the
                         * configuration for channel switch
                         */
-                       if (priv->cfg->ops->lib->set_channel_switch(priv,
-                                                                   ch_switch))
-                               priv->switch_rxon.switch_in_progress = false;
+                       set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
+                       priv->switch_channel = cpu_to_le16(ch);
+                       if (priv->cfg->ops->lib->set_channel_switch(priv, ch_switch)) {
+                               clear_bit(STATUS_CHANNEL_SWITCH_PENDING,
+                                         &priv->status);
+                               priv->switch_channel = 0;
+                               ieee80211_chswitch_done(ctx->vif, false);
+                       }
                }
        }
 out:
        mutex_unlock(&priv->mutex);
-       if (!priv->switch_rxon.switch_in_progress)
-               ieee80211_chswitch_done(ctx->vif, false);
        IWL_DEBUG_MAC80211(priv, "leave\n");
 }
 
index 86feec86d13009c9b6266cd7bf102b8f3195af94..2282279cffc454c5089277a6cb1f279a60ad2dab 100644 (file)
@@ -177,79 +177,6 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
        return 0;
 }
 
-static int iwl2030_hw_channel_switch(struct iwl_priv *priv,
-                                    struct ieee80211_channel_switch *ch_switch)
-{
-       /*
-        * MULTI-FIXME
-        * See iwl_mac_channel_switch.
-        */
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-       struct iwl6000_channel_switch_cmd cmd;
-       const struct iwl_channel_info *ch_info;
-       u32 switch_time_in_usec, ucode_switch_time;
-       u16 ch;
-       u32 tsf_low;
-       u8 switch_count;
-       u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
-       struct ieee80211_vif *vif = ctx->vif;
-       struct iwl_host_cmd hcmd = {
-               .id = REPLY_CHANNEL_SWITCH,
-               .len = { sizeof(cmd), },
-               .flags = CMD_SYNC,
-               .data = { &cmd, },
-       };
-
-       cmd.band = priv->band == IEEE80211_BAND_2GHZ;
-       ch = ch_switch->channel->hw_value;
-       IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
-               ctx->active.channel, ch);
-       cmd.channel = cpu_to_le16(ch);
-       cmd.rxon_flags = ctx->staging.flags;
-       cmd.rxon_filter_flags = ctx->staging.filter_flags;
-       switch_count = ch_switch->count;
-       tsf_low = ch_switch->timestamp & 0x0ffffffff;
-       /*
-        * calculate the ucode channel switch time
-        * adding TSF as one of the factor for when to switch
-        */
-       if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
-               if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
-                   beacon_interval)) {
-                       switch_count -= (priv->ucode_beacon_time -
-                               tsf_low) / beacon_interval;
-               } else
-                       switch_count = 0;
-       }
-       if (switch_count <= 1)
-               cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
-       else {
-               switch_time_in_usec =
-                       vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
-               ucode_switch_time = iwl_usecs_to_beacons(priv,
-                                               switch_time_in_usec,
-                                               beacon_interval);
-               cmd.switch_time = iwl_add_beacon_time(priv,
-                                               priv->ucode_beacon_time,
-                                               ucode_switch_time,
-                                               beacon_interval);
-       }
-       IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
-                     cmd.switch_time);
-       ch_info = iwl_get_channel_info(priv, priv->band, ch);
-       if (ch_info)
-               cmd.expect_beacon = is_channel_radar(ch_info);
-       else {
-               IWL_ERR(priv, "invalid channel switch from %u to %u\n",
-                       ctx->active.channel, ch);
-               return -EFAULT;
-       }
-       priv->switch_rxon.channel = cmd.channel;
-       priv->switch_rxon.switch_in_progress = true;
-
-       return iwl_send_cmd_sync(priv, &hcmd);
-}
-
 static struct iwl_lib_ops iwl2000_lib = {
        .set_hw_params = iwl2000_hw_set_hw_params,
        .rx_handler_setup = iwlagn_rx_handler_setup,
@@ -258,7 +185,6 @@ static struct iwl_lib_ops iwl2000_lib = {
        .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
        .send_tx_power = iwlagn_send_tx_power,
        .update_chain_flags = iwl_update_chain_flags,
-       .set_channel_switch = iwl2030_hw_channel_switch,
        .apm_ops = {
                .init = iwl_apm_init,
                .config = iwl2000_nic_config,
index a70b8cfafda195a3b92e1f46d2a7e617c3faba0f..f99f9c1933524e6d06af64f191341fd67057dbce 100644 (file)
@@ -331,8 +331,6 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
                        ctx->active.channel, ch);
                return -EFAULT;
        }
-       priv->switch_rxon.channel = cmd.channel;
-       priv->switch_rxon.switch_in_progress = true;
 
        return iwl_send_cmd_sync(priv, &hcmd);
 }
@@ -425,7 +423,6 @@ static struct iwl_base_params iwl5000_base_params = {
 };
 static struct iwl_ht_params iwl5000_ht_params = {
        .ht_greenfield_support = true,
-       .use_rts_for_aggregation = true, /* use rts/cts protection */
 };
 
 #define IWL_DEVICE_5000                                                \
index f8c710db6e6f90345a8a1f42e7ce1b2d8a7d85aa..fbe565c816e32ce367c09dad8ef5762e4c819d86 100644 (file)
@@ -270,8 +270,6 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
                        ctx->active.channel, ch);
                return -EFAULT;
        }
-       priv->switch_rxon.channel = cmd.channel;
-       priv->switch_rxon.switch_in_progress = true;
 
        return iwl_send_cmd_sync(priv, &hcmd);
 }
@@ -603,19 +601,27 @@ struct iwl_cfg iwl6050_2abg_cfg = {
        IWL_DEVICE_6050,
 };
 
+#define IWL_DEVICE_6150                                                \
+       .fw_name_pre = IWL6050_FW_PRE,                          \
+       .ucode_api_max = IWL6050_UCODE_API_MAX,                 \
+       .ucode_api_min = IWL6050_UCODE_API_MIN,                 \
+       .ops = &iwl6150_ops,                                    \
+       .eeprom_ver = EEPROM_6150_EEPROM_VERSION,               \
+       .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION,       \
+       .base_params = &iwl6050_base_params,                    \
+       .need_dc_calib = true,                                  \
+       .led_mode = IWL_LED_BLINK,                              \
+       .internal_wimax_coex = true
+
 struct iwl_cfg iwl6150_bgn_cfg = {
        .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
-       .fw_name_pre = IWL6050_FW_PRE,
-       .ucode_api_max = IWL6050_UCODE_API_MAX,
-       .ucode_api_min = IWL6050_UCODE_API_MIN,
-       .eeprom_ver = EEPROM_6150_EEPROM_VERSION,
-       .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION,
-       .ops = &iwl6150_ops,
-       .base_params = &iwl6050_base_params,
+       IWL_DEVICE_6150,
        .ht_params = &iwl6000_ht_params,
-       .need_dc_calib = true,
-       .led_mode = IWL_LED_RF_STATE,
-       .internal_wimax_coex = true,
+};
+
+struct iwl_cfg iwl6150_bg_cfg = {
+       .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BG",
+       IWL_DEVICE_6150,
 };
 
 struct iwl_cfg iwl6000_3agn_cfg = {
index b12c72d63ccb94fd0e2628667ed8b1bbb3b2d5fd..23fa93deae96cce98916a467f68954845cc19759 100644 (file)
@@ -163,17 +163,9 @@ static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
                                     __le16 fc, __le32 *tx_flags)
 {
        if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS ||
-           info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+           info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT ||
+           info->flags & IEEE80211_TX_CTL_AMPDU)
                *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
-               return;
-       }
-
-       if (priv->cfg->ht_params &&
-           priv->cfg->ht_params->use_rts_for_aggregation &&
-           info->flags & IEEE80211_TX_CTL_AMPDU) {
-               *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
-               return;
-       }
 }
 
 /* Calc max signal level (dBm) among 3 possible receivers */
index a95ad84c537752c7c2c9491cc29eb8531c841b38..09f679d6046f94ddc4ed8be2ae9b5fd12e1b64a4 100644 (file)
@@ -325,6 +325,14 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
                        return 0;
        }
 
+       /*
+        * force CTS-to-self frames protection if RTS-CTS is not preferred
+        * one aggregation protection method
+        */
+       if (!(priv->cfg->ht_params &&
+             priv->cfg->ht_params->use_rts_for_aggregation))
+               ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
+
        if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
            !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
                ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
@@ -342,10 +350,10 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
         * receive commit_rxon request
         * abort any previous channel switch if still in process
         */
-       if (priv->switch_rxon.switch_in_progress &&
-           (priv->switch_rxon.channel != ctx->staging.channel)) {
+       if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
+           (priv->switch_channel != ctx->staging.channel)) {
                IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
-                     le16_to_cpu(priv->switch_rxon.channel));
+                             le16_to_cpu(priv->switch_channel));
                iwl_chswitch_done(priv, false);
        }
 
@@ -362,6 +370,11 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
                }
 
                memcpy(active, &ctx->staging, sizeof(*active));
+               /*
+                * We do not commit tx power settings while channel changing,
+                * do it now if after settings changed.
+                */
+               iwl_set_tx_power(priv, priv->tx_power_next, false);
                return 0;
        }
 
index 11c6c1169e78ad980df93572feed15184a9f2295..8e1942ebd9a07aedc7c5e4c69a04b4ff690070a6 100644 (file)
@@ -2843,16 +2843,13 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
                goto out;
 
        if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
-           test_bit(STATUS_SCANNING, &priv->status))
+           test_bit(STATUS_SCANNING, &priv->status) ||
+           test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
                goto out;
 
        if (!iwl_is_associated_ctx(ctx))
                goto out;
 
-       /* channel switch in progress */
-       if (priv->switch_rxon.switch_in_progress == true)
-               goto out;
-
        if (priv->cfg->ops->lib->set_channel_switch) {
 
                ch = channel->hw_value;
@@ -2901,15 +2898,19 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
                         * at this point, staging_rxon has the
                         * configuration for channel switch
                         */
+                       set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
+                       priv->switch_channel = cpu_to_le16(ch);
                        if (priv->cfg->ops->lib->set_channel_switch(priv,
-                                                                   ch_switch))
-                               priv->switch_rxon.switch_in_progress = false;
+                                                                   ch_switch)) {
+                               clear_bit(STATUS_CHANNEL_SWITCH_PENDING,
+                                         &priv->status);
+                               priv->switch_channel = 0;
+                               ieee80211_chswitch_done(ctx->vif, false);
+                       }
                }
        }
 out:
        mutex_unlock(&priv->mutex);
-       if (!priv->switch_rxon.switch_in_progress)
-               ieee80211_chswitch_done(ctx->vif, false);
        IWL_DEBUG_MAC80211(priv, "leave\n");
 }
 
@@ -3831,11 +3832,11 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
 
 /* 6150 WiFi/WiMax Series */
        {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)},
-       {IWL_PCI_DEVICE(0x0885, 0x1306, iwl6150_bgn_cfg)},
+       {IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_bg_cfg)},
        {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)},
-       {IWL_PCI_DEVICE(0x0885, 0x1326, iwl6150_bgn_cfg)},
+       {IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_bg_cfg)},
        {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)},
-       {IWL_PCI_DEVICE(0x0886, 0x1316, iwl6150_bgn_cfg)},
+       {IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_bg_cfg)},
 
 /* 1000 Series WiFi */
        {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)},
index 2495fe7a58cbb5f7d61399c6dacb8ebf358ede73..d1716844002eae2ea5ded5e2fe9724317964ffb8 100644 (file)
@@ -89,6 +89,7 @@ extern struct iwl_cfg iwl6000_3agn_cfg;
 extern struct iwl_cfg iwl6050_2agn_cfg;
 extern struct iwl_cfg iwl6050_2abg_cfg;
 extern struct iwl_cfg iwl6150_bgn_cfg;
+extern struct iwl_cfg iwl6150_bg_cfg;
 extern struct iwl_cfg iwl1000_bgn_cfg;
 extern struct iwl_cfg iwl1000_bg_cfg;
 extern struct iwl_cfg iwl100_bgn_cfg;
index 4653deada05b48b1383f1cda6b33c7477a1717d3..213c80c6a6682d000efff4b0d356cb5bafa691d4 100644 (file)
@@ -843,12 +843,8 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
        if (test_bit(STATUS_EXIT_PENDING, &priv->status))
                return;
 
-       if (priv->switch_rxon.switch_in_progress) {
+       if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
                ieee80211_chswitch_done(ctx->vif, is_success);
-               mutex_lock(&priv->mutex);
-               priv->switch_rxon.switch_in_progress = false;
-               mutex_unlock(&priv->mutex);
-       }
 }
 
 #ifdef CONFIG_IWLWIFI_DEBUG
index 3bb76f6ea41017448c5cc5ce99851c967d66dafe..a54d416ec345ce1b71d657de3ec92d4c5efb8ea6 100644 (file)
@@ -560,6 +560,7 @@ void iwlcore_free_geos(struct iwl_priv *priv);
 #define STATUS_POWER_PMI       16
 #define STATUS_FW_ERROR                17
 #define STATUS_DEVICE_ENABLED  18
+#define STATUS_CHANNEL_SWITCH_PENDING 19
 
 
 static inline int iwl_is_ready(struct iwl_priv *priv)
index 22a6e3ec7094e651b6afbe8245b778b25881bd77..c8de236c141be303d7ff6db5d058f4bfb093687f 100644 (file)
@@ -981,17 +981,6 @@ struct traffic_stats {
 #endif
 };
 
-/*
- * iwl_switch_rxon: "channel switch" structure
- *
- * @ switch_in_progress: channel switch in progress
- * @ channel: new channel
- */
-struct iwl_switch_rxon {
-       bool switch_in_progress;
-       __le16 channel;
-};
-
 /*
  * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
  * to perform continuous uCode event logging operation if enabled
@@ -1287,7 +1276,7 @@ struct iwl_priv {
 
        struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
 
-       struct iwl_switch_rxon switch_rxon;
+       __le16 switch_channel;
 
        struct {
                u32 error_event_table;
index 0053e9ea9021a433b98025701fcc1e9d703ea1f3..b774517aa9fa11dbe95c2f666ab88df0b915b1d9 100644 (file)
@@ -250,19 +250,19 @@ static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
        struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
        struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
 
-       if (priv->switch_rxon.switch_in_progress) {
-               if (!le32_to_cpu(csa->status) &&
-                   (csa->channel == priv->switch_rxon.channel)) {
-                       rxon->channel = csa->channel;
-                       ctx->staging.channel = csa->channel;
-                       IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
-                             le16_to_cpu(csa->channel));
-                       iwl_chswitch_done(priv, true);
-               } else {
-                       IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
+       if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
+               return;
+
+       if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
+               rxon->channel = csa->channel;
+               ctx->staging.channel = csa->channel;
+               IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
                              le16_to_cpu(csa->channel));
-                       iwl_chswitch_done(priv, false);
-               }
+               iwl_chswitch_done(priv, true);
+       } else {
+               IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
+                       le16_to_cpu(csa->channel));
+               iwl_chswitch_done(priv, false);
        }
 }
 
index 84566db486d21291196c353327a68fec6fd05dba..71c8f3fccfa1474b3dfe196b3785b75a0d8417eb 100644 (file)
@@ -994,6 +994,8 @@ static void lbs_submit_command(struct lbs_private *priv,
        cmd = cmdnode->cmdbuf;
 
        spin_lock_irqsave(&priv->driver_lock, flags);
+       priv->seqnum++;
+       cmd->seqnum = cpu_to_le16(priv->seqnum);
        priv->cur_cmd = cmdnode;
        spin_unlock_irqrestore(&priv->driver_lock, flags);
 
@@ -1621,11 +1623,9 @@ struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
        /* Copy the incoming command to the buffer */
        memcpy(cmdnode->cmdbuf, in_cmd, in_cmd_size);
 
-       /* Set sequence number, clean result, move to buffer */
-       priv->seqnum++;
+       /* Set command, clean result, move to buffer */
        cmdnode->cmdbuf->command = cpu_to_le16(command);
        cmdnode->cmdbuf->size    = cpu_to_le16(in_cmd_size);
-       cmdnode->cmdbuf->seqnum  = cpu_to_le16(priv->seqnum);
        cmdnode->cmdbuf->result  = 0;
 
        lbs_deb_host("PREP_CMD: command 0x%04x\n", command);
index a7b5cb0c2753845a581543e2ccca7963993592b0..224e9853c480605400a129fdda30b8298ca2d2bf 100644 (file)
@@ -907,7 +907,7 @@ static void if_sdio_interrupt(struct sdio_func *func)
        card = sdio_get_drvdata(func);
 
        cause = sdio_readb(card->func, IF_SDIO_H_INT_STATUS, &ret);
-       if (ret)
+       if (ret || !cause)
                goto out;
 
        lbs_deb_sdio("interrupt: 0x%X\n", (unsigned)cause);
@@ -1008,10 +1008,6 @@ static int if_sdio_probe(struct sdio_func *func,
        if (ret)
                goto release;
 
-       ret = sdio_claim_irq(func, if_sdio_interrupt);
-       if (ret)
-               goto disable;
-
        /* For 1-bit transfers to the 8686 model, we need to enable the
         * interrupt flag in the CCCR register. Set the MMC_QUIRK_LENIENT_FN0
         * bit to allow access to non-vendor registers. */
@@ -1082,6 +1078,21 @@ static int if_sdio_probe(struct sdio_func *func,
        else
                card->rx_unit = 0;
 
+       /*
+        * Set up the interrupt handler late.
+        *
+        * If we set it up earlier, the (buggy) hardware generates a spurious
+        * interrupt, even before the interrupt has been enabled, with
+        * CCCR_INTx = 0.
+        *
+        * We register the interrupt handler late so that we can handle any
+        * spurious interrupts, and also to avoid generation of that known
+        * spurious interrupt in the first place.
+        */
+       ret = sdio_claim_irq(func, if_sdio_interrupt);
+       if (ret)
+               goto disable;
+
        /*
         * Enable interrupts now that everything is set up
         */
index a0e9bc5253e06dd8001dd2733d3c85f0e34046a0..4e97e90aa3994cacc80cadbd034caa581549011e 100644 (file)
 /* Rx unit register */
 #define CARD_RX_UNIT_REG               0x63
 
-/* Event header Len*/
-#define MWIFIEX_EVENT_HEADER_LEN           8
+/* Event header len w/o 4 bytes of interface header */
+#define MWIFIEX_EVENT_HEADER_LEN           4
 
 /* Max retry number of CMD53 write */
 #define MAX_WRITE_IOMEM_RETRY          2
index 9def1e5369a1c9dcfcbbee164444902efd16884b..b2f8b8fd4d2dc911dbb4f590430f8caa60dc70ab 100644 (file)
@@ -166,7 +166,6 @@ config RT2800USB_RT35XX
 config RT2800USB_RT53XX
        bool "rt2800usb - Include support for rt53xx devices (EXPERIMENTAL)"
        depends on EXPERIMENTAL
-       default y
        ---help---
          This adds support for rt53xx wireless chipset family to the
          rt2800pci driver.
index 555180d8f4aa7055aa77a417b75fa894898964a8..b704e5b183d0bc539e8bec6d08776774854d5479 100644 (file)
@@ -250,7 +250,8 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
        if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL)
                rt2x00link_reset_tuner(rt2x00dev, false);
 
-       if (test_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags) &&
+       if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
+           test_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags) &&
            (ieee80211_flags & IEEE80211_CONF_CHANGE_PS) &&
            (conf->flags & IEEE80211_CONF_PS)) {
                beacon_diff = (long)jiffies - (long)rt2x00dev->last_beacon;
index c018d67aab8e40b7a5c719d04f14eb52a4853d9b..939821b4af2ff5b768913cc0809b05695801285c 100644 (file)
@@ -146,6 +146,9 @@ static void rt2x00lib_autowakeup(struct work_struct *work)
        struct rt2x00_dev *rt2x00dev =
            container_of(work, struct rt2x00_dev, autowakeup_work.work);
 
+       if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
+               return;
+
        if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE))
                ERROR(rt2x00dev, "Device failed to wakeup.\n");
        clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags);
@@ -1160,6 +1163,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
         * Stop all work.
         */
        cancel_work_sync(&rt2x00dev->intf_work);
+       cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
        if (rt2x00_is_usb(rt2x00dev)) {
                del_timer_sync(&rt2x00dev->txstatus_timer);
                cancel_work_sync(&rt2x00dev->rxdone_work);
index a4095284543677d28fc39852361ede477546f654..9f8ccae93317d92cf353cb595a189df0aa1960e9 100644 (file)
@@ -669,6 +669,14 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                                                         &rx_status,
                                                         (u8 *) pdesc, skb);
 
+                       new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
+                       if (unlikely(!new_skb)) {
+                               RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
+                                        DBG_DMESG,
+                                        ("can't alloc skb for rx\n"));
+                               goto done;
+                       }
+
                        pci_unmap_single(rtlpci->pdev,
                                         *((dma_addr_t *) skb->cb),
                                         rtlpci->rxbuffersize,
@@ -690,7 +698,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                        hdr = rtl_get_hdr(skb);
                        fc = rtl_get_fc(skb);
 
-                       if (!stats.crc || !stats.hwerror) {
+                       if (!stats.crc && !stats.hwerror) {
                                memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
                                       sizeof(rx_status));
 
@@ -758,15 +766,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                                rtl_lps_leave(hw);
                        }
 
-                       new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
-                       if (unlikely(!new_skb)) {
-                               RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
-                                        DBG_DMESG,
-                                        ("can't alloc skb for rx\n"));
-                               goto done;
-                       }
                        skb = new_skb;
-                       /*skb->dev = dev; */
 
                        rtlpci->rx_ring[rx_queue_idx].rx_buf[rtlpci->
                                                             rx_ring
@@ -1113,6 +1113,13 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
 
                rtlpci->rx_ring[rx_queue_idx].idx = 0;
 
+               /* If amsdu_8k is disabled, set buffersize to 4096. This
+                * change will reduce memory fragmentation.
+                */
+               if (rtlpci->rxbuffersize > 4096 &&
+                   rtlpriv->rtlhal.disable_amsdu_8k)
+                       rtlpci->rxbuffersize = 4096;
+
                for (i = 0; i < rtlpci->rxringcount; i++) {
                        struct sk_buff *skb =
                            dev_alloc_skb(rtlpci->rxbuffersize);
index 1ab6c86aac40d201d8f973c85367edf30831feba..c83fefb6662f3a2a433294c01e210098241e3c00 100644 (file)
@@ -1157,6 +1157,9 @@ struct conf_sched_scan_settings {
        /* time to wait on the channel for passive scans (in TUs) */
        u32 dwell_time_passive;
 
+       /* time to wait on the channel for DFS scans (in TUs) */
+       u32 dwell_time_dfs;
+
        /* number of probe requests to send on each channel in active scans */
        u8 num_probe_reqs;
 
index bc00e52f6445daec57326299829eee976d62a4bf..e6497dc669df096a57bc914e515b9212c18784c8 100644 (file)
@@ -311,6 +311,7 @@ static struct conf_drv_settings default_conf = {
                .min_dwell_time_active = 8,
                .max_dwell_time_active = 30,
                .dwell_time_passive    = 100,
+               .dwell_time_dfs        = 150,
                .num_probe_reqs        = 2,
                .rssi_threshold        = -90,
                .snr_threshold         = 0,
index f37e5a3919763b45f5b2f96f4f9b9d6d54e3f9ad..56f76abc754d170b8982961df3013a64ebd24ba0 100644 (file)
@@ -331,16 +331,22 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
        struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
        int i, j;
        u32 flags;
+       bool force_passive = !req->n_ssids;
 
        for (i = 0, j = start;
             i < req->n_channels && j < MAX_CHANNELS_ALL_BANDS;
             i++) {
                flags = req->channels[i]->flags;
 
-               if (!(flags & IEEE80211_CHAN_DISABLED) &&
-                   ((flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive) &&
-                   ((flags & IEEE80211_CHAN_RADAR) == radar) &&
-                   (req->channels[i]->band == band)) {
+               if (force_passive)
+                       flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+
+               if ((req->channels[i]->band == band) &&
+                   !(flags & IEEE80211_CHAN_DISABLED) &&
+                   (!!(flags & IEEE80211_CHAN_RADAR) == radar) &&
+                   /* if radar is set, we ignore the passive flag */
+                   (radar ||
+                    !!(flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive)) {
                        wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ",
                                     req->channels[i]->band,
                                     req->channels[i]->center_freq);
@@ -350,7 +356,12 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
                        wl1271_debug(DEBUG_SCAN, "max_power %d",
                                     req->channels[i]->max_power);
 
-                       if (flags & IEEE80211_CHAN_PASSIVE_SCAN) {
+                       if (flags & IEEE80211_CHAN_RADAR) {
+                               channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS;
+                               channels[j].passive_duration =
+                                       cpu_to_le16(c->dwell_time_dfs);
+                       }
+                       else if (flags & IEEE80211_CHAN_PASSIVE_SCAN) {
                                channels[j].passive_duration =
                                        cpu_to_le16(c->dwell_time_passive);
                        } else {
@@ -359,7 +370,7 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
                                channels[j].max_duration =
                                        cpu_to_le16(c->max_dwell_time_active);
                        }
-                       channels[j].tx_power_att = req->channels[j]->max_power;
+                       channels[j].tx_power_att = req->channels[i]->max_power;
                        channels[j].channel = req->channels[i]->hw_value;
 
                        j++;
@@ -386,7 +397,11 @@ wl1271_scan_sched_scan_channels(struct wl1271 *wl,
                wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
                                                    IEEE80211_BAND_2GHZ,
                                                    false, false, idx);
-       idx += cfg->active[0];
+       /*
+        * 5GHz channels always start at position 14, not immediately
+        * after the last 2.4GHz channel
+        */
+       idx = 14;
 
        cfg->passive[1] =
                wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
@@ -394,22 +409,23 @@ wl1271_scan_sched_scan_channels(struct wl1271 *wl,
                                                    false, true, idx);
        idx += cfg->passive[1];
 
-       cfg->active[1] =
+       cfg->dfs =
                wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
                                                    IEEE80211_BAND_5GHZ,
-                                                   false, false, 14);
-       idx += cfg->active[1];
+                                                   true, true, idx);
+       idx += cfg->dfs;
 
-       cfg->dfs =
+       cfg->active[1] =
                wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
                                                    IEEE80211_BAND_5GHZ,
-                                                   true, false, idx);
-       idx += cfg->dfs;
+                                                   false, false, idx);
+       idx += cfg->active[1];
 
        wl1271_debug(DEBUG_SCAN, "    2.4GHz: active %d passive %d",
                     cfg->active[0], cfg->passive[0]);
        wl1271_debug(DEBUG_SCAN, "    5GHz: active %d passive %d",
                     cfg->active[1], cfg->passive[1]);
+       wl1271_debug(DEBUG_SCAN, "    DFS: %d", cfg->dfs);
 
        return idx;
 }
@@ -421,6 +437,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
        struct wl1271_cmd_sched_scan_config *cfg = NULL;
        struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
        int i, total_channels, ret;
+       bool force_passive = !req->n_ssids;
 
        wl1271_debug(DEBUG_CMD, "cmd sched_scan scan config");
 
@@ -444,7 +461,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
        for (i = 0; i < SCAN_MAX_CYCLE_INTERVALS; i++)
                cfg->intervals[i] = cpu_to_le32(req->interval);
 
-       if (req->ssids[0].ssid_len && req->ssids[0].ssid) {
+       if (!force_passive && req->ssids[0].ssid_len && req->ssids[0].ssid) {
                cfg->filter_type = SCAN_SSID_FILTER_SPECIFIC;
                cfg->ssid_len = req->ssids[0].ssid_len;
                memcpy(cfg->ssid, req->ssids[0].ssid,
@@ -461,7 +478,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
                goto out;
        }
 
-       if (cfg->active[0]) {
+       if (!force_passive && cfg->active[0]) {
                ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid,
                                                 req->ssids[0].ssid_len,
                                                 ies->ie[IEEE80211_BAND_2GHZ],
@@ -473,7 +490,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
                }
        }
 
-       if (cfg->active[1]) {
+       if (!force_passive && cfg->active[1]) {
                ret = wl1271_cmd_build_probe_req(wl,  req->ssids[0].ssid,
                                                 req->ssids[0].ssid_len,
                                                 ies->ie[IEEE80211_BAND_5GHZ],
index c83319579ca33a4a9e4f6f5767ceb0c546a87040..a0b6c5d67b0745218bb99396115c4386c0048b4e 100644 (file)
@@ -137,6 +137,9 @@ enum {
        SCAN_BSS_TYPE_ANY,
 };
 
+#define SCAN_CHANNEL_FLAGS_DFS         BIT(0)
+#define SCAN_CHANNEL_FLAGS_DFS_ENABLED BIT(1)
+
 struct conn_scan_ch_params {
        __le16 min_duration;
        __le16 max_duration;
index 0e819943b9e479879dae4fc3f368760346feabfe..631194d498288aecff07f190cb76500ae107cd3f 100644 (file)
@@ -1533,6 +1533,31 @@ static void __exit usb_exit(void)
 module_init(usb_init);
 module_exit(usb_exit);
 
+static int zd_ep_regs_out_msg(struct usb_device *udev, void *data, int len,
+                             int *actual_length, int timeout)
+{
+       /* In USB 2.0 mode EP_REGS_OUT endpoint is interrupt type. However in
+        * USB 1.1 mode endpoint is bulk. Select correct type URB by endpoint
+        * descriptor.
+        */
+       struct usb_host_endpoint *ep;
+       unsigned int pipe;
+
+       pipe = usb_sndintpipe(udev, EP_REGS_OUT);
+       ep = usb_pipe_endpoint(udev, pipe);
+       if (!ep)
+               return -EINVAL;
+
+       if (usb_endpoint_xfer_int(&ep->desc)) {
+               return usb_interrupt_msg(udev, pipe, data, len,
+                                        actual_length, timeout);
+       } else {
+               pipe = usb_sndbulkpipe(udev, EP_REGS_OUT);
+               return usb_bulk_msg(udev, pipe, data, len, actual_length,
+                                   timeout);
+       }
+}
+
 static int usb_int_regs_length(unsigned int count)
 {
        return sizeof(struct usb_int_regs) + count * sizeof(struct reg_data);
@@ -1648,15 +1673,14 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
 
        udev = zd_usb_to_usbdev(usb);
        prepare_read_regs_int(usb);
-       r = usb_interrupt_msg(udev, usb_sndintpipe(udev, EP_REGS_OUT),
-                             req, req_len, &actual_req_len, 50 /* ms */);
+       r = zd_ep_regs_out_msg(udev, req, req_len, &actual_req_len, 50 /*ms*/);
        if (r) {
                dev_dbg_f(zd_usb_dev(usb),
-                       "error in usb_interrupt_msg(). Error number %d\n", r);
+                       "error in zd_ep_regs_out_msg(). Error number %d\n", r);
                goto error;
        }
        if (req_len != actual_req_len) {
-               dev_dbg_f(zd_usb_dev(usb), "error in usb_interrupt_msg()\n"
+               dev_dbg_f(zd_usb_dev(usb), "error in zd_ep_regs_out_msg()\n"
                        " req_len %d != actual_req_len %d\n",
                        req_len, actual_req_len);
                r = -EIO;
@@ -1818,9 +1842,17 @@ int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
                rw->value = cpu_to_le16(ioreqs[i].value);
        }
 
-       usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT),
-                        req, req_len, iowrite16v_urb_complete, usb,
-                        ep->desc.bInterval);
+       /* In USB 2.0 mode endpoint is interrupt type. However in USB 1.1 mode
+        * endpoint is bulk. Select correct type URB by endpoint descriptor.
+        */
+       if (usb_endpoint_xfer_int(&ep->desc))
+               usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT),
+                                req, req_len, iowrite16v_urb_complete, usb,
+                                ep->desc.bInterval);
+       else
+               usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_REGS_OUT),
+                                 req, req_len, iowrite16v_urb_complete, usb);
+
        urb->transfer_flags |= URB_FREE_BUFFER;
 
        /* Submit previous URB */
@@ -1924,15 +1956,14 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
        }
 
        udev = zd_usb_to_usbdev(usb);
-       r = usb_interrupt_msg(udev, usb_sndintpipe(udev, EP_REGS_OUT),
-                             req, req_len, &actual_req_len, 50 /* ms */);
+       r = zd_ep_regs_out_msg(udev, req, req_len, &actual_req_len, 50 /*ms*/);
        if (r) {
                dev_dbg_f(zd_usb_dev(usb),
-                       "error in usb_interrupt_msg(). Error number %d\n", r);
+                       "error in zd_ep_regs_out_msg(). Error number %d\n", r);
                goto out;
        }
        if (req_len != actual_req_len) {
-               dev_dbg_f(zd_usb_dev(usb), "error in usb_interrupt_msg()"
+               dev_dbg_f(zd_usb_dev(usb), "error in zd_ep_regs_out_msg()"
                        " req_len %d != actual_req_len %d\n",
                        req_len, actual_req_len);
                r = -EIO;
index c85f744270a56664cb91a720624cfc97dc29c6ee..094308e41be558b525a2198b3c7ee98cddc82359 100644 (file)
@@ -51,6 +51,7 @@ obj-$(CONFIG_X86_VISWS) += setup-irq.o
 obj-$(CONFIG_MN10300) += setup-bus.o
 obj-$(CONFIG_MICROBLAZE) += setup-bus.o
 obj-$(CONFIG_TILE) += setup-bus.o setup-irq.o
+obj-$(CONFIG_SPARC_LEON) += setup-bus.o setup-irq.o
 
 #
 # ACPI Related PCI FW Functions
index 59f17acf7f68b6d3ec6b4a4798a937f299bef8a6..f02c34d26d1b08226599cfa4c183da703b9486ed 100644 (file)
@@ -3388,7 +3388,7 @@ static void __init init_iommu_pm_ops(void)
 }
 
 #else
-static inline int init_iommu_pm_ops(void) { }
+static inline void init_iommu_pm_ops(void) {}
 #endif /* CONFIG_PM */
 
 /*
index 56098b3e17c054a678253c028221610dc17f3ff5..5f10c23dff943a515698f361278cd931d6bd5909 100644 (file)
@@ -3271,11 +3271,11 @@ void __init pci_register_set_vga_state(arch_set_vga_state_t func)
 }
 
 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
-                     unsigned int command_bits, bool change_bridge)
+                     unsigned int command_bits, u32 flags)
 {
        if (arch_set_vga_state)
                return arch_set_vga_state(dev, decode, command_bits,
-                                               change_bridge);
+                                               flags);
        return 0;
 }
 
index 435002dfc3caef7a418944e9e967af8a27c6b822..712baab3c83d58147237384fb168a4a6c3ceba1c 100644 (file)
@@ -11,6 +11,7 @@
  *
  */
 
+#include <linux/gpio.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 
index a8d03aeb40516af8dbba5beb4f18ee8a37af1a71..e7f301da290286153180fc5f1a9e02cdd9bb5f01 100644 (file)
@@ -46,7 +46,8 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
                caps.n_ext_ts = ptp->info->n_ext_ts;
                caps.n_per_out = ptp->info->n_per_out;
                caps.pps = ptp->info->pps;
-               err = copy_to_user((void __user *)arg, &caps, sizeof(caps));
+               if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
+                       err = -EFAULT;
                break;
 
        case PTP_EXTTS_REQUEST:
@@ -129,8 +130,10 @@ ssize_t ptp_read(struct posix_clock *pc,
                return -ERESTARTSYS;
        }
 
-       if (ptp->defunct)
+       if (ptp->defunct) {
+               mutex_unlock(&ptp->tsevq_mux);
                return -ENODEV;
+       }
 
        spin_lock_irqsave(&queue->lock, flags);
 
@@ -150,10 +153,8 @@ ssize_t ptp_read(struct posix_clock *pc,
 
        mutex_unlock(&ptp->tsevq_mux);
 
-       if (copy_to_user(buf, event, cnt)) {
-               mutex_unlock(&ptp->tsevq_mux);
+       if (copy_to_user(buf, event, cnt))
                return -EFAULT;
-       }
 
        return cnt;
 }
index f822e13dc04b43ca0902e273c6060f1f838fbfdb..ce2aabf5c550a2706bc39dd17ac2b1c5a94142e8 100644 (file)
@@ -1051,4 +1051,13 @@ config RTC_DRV_TILE
          Enable support for the Linux driver side of the Tilera
          hypervisor's real-time clock interface.
 
+config RTC_DRV_PUV3
+       tristate "PKUnity v3 RTC support"
+       depends on ARCH_PUV3
+       help
+         This enables support for the RTC in the PKUnity-v3 SoCs.
+
+         This drive can also be built as a module. If so, the module
+         will be called rtc-puv3.
+
 endif # RTC_CLASS
index 213d725f16d464cc0cc7527a2041f2d468d899f8..0ffefe877bfadca0fcf1730beaff016adc268ea2 100644 (file)
@@ -78,6 +78,7 @@ obj-$(CONFIG_RTC_DRV_PCF50633)        += rtc-pcf50633.o
 obj-$(CONFIG_RTC_DRV_PL030)    += rtc-pl030.o
 obj-$(CONFIG_RTC_DRV_PL031)    += rtc-pl031.o
 obj-$(CONFIG_RTC_DRV_PS3)      += rtc-ps3.o
+obj-$(CONFIG_RTC_DRV_PUV3)     += rtc-puv3.o
 obj-$(CONFIG_RTC_DRV_PXA)      += rtc-pxa.o
 obj-$(CONFIG_RTC_DRV_R9701)    += rtc-r9701.o
 obj-$(CONFIG_RTC_DRV_RP5C01)   += rtc-rp5c01.o
index ef6316acec43a3312c00c6d8ca27b5ec8314f526..df68618f6dbb53f60f1f743f1425e2060e104854 100644 (file)
@@ -318,7 +318,7 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
 }
 EXPORT_SYMBOL_GPL(rtc_read_alarm);
 
-int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
 {
        struct rtc_time tm;
        long now, scheduled;
index d0e06edb14c5157890079b08c4dcfab65b0ec66c..cace6d3aed9a59261bba613af9649b34699fb7a8 100644 (file)
@@ -421,7 +421,8 @@ static long rtc_dev_ioctl(struct file *file,
                        err = ops->ioctl(rtc->dev.parent, cmd, arg);
                        if (err == -ENOIOCTLCMD)
                                err = -ENOTTY;
-               }
+               } else
+                       err = -ENOTTY;
                break;
        }
 
index 1a84b3e227d1a449287af1efa5b10c7151782510..7317d3b9a3d54ffacdde1c40773eacf1309a075c 100644 (file)
@@ -189,7 +189,7 @@ static int __devinit m41t93_probe(struct spi_device *spi)
 
 static int __devexit m41t93_remove(struct spi_device *spi)
 {
-       struct rtc_device *rtc = platform_get_drvdata(spi);
+       struct rtc_device *rtc = spi_get_drvdata(spi);
 
        if (rtc)
                rtc_device_unregister(rtc);
similarity index 98%
rename from arch/unicore32/kernel/rtc.c
rename to drivers/rtc/rtc-puv3.c
index 8cad70b3302c86e08c8189e7c926b34c3ab78f2c..46f14b82f3ab6e987efef66d90181414e34d475c 100644 (file)
@@ -1,7 +1,5 @@
 /*
- * linux/arch/unicore32/kernel/rtc.c
- *
- * Code specific to PKUnity SoC and UniCore ISA
+ * RTC driver code specific to PKUnity SoC and UniCore ISA
  *
  *     Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
  *     Copyright (C) 2001-2010 Guan Xuetao
@@ -36,7 +34,6 @@ static int puv3_rtc_tickno  = IRQ_RTC;
 static DEFINE_SPINLOCK(puv3_rtc_pie_lock);
 
 /* IRQ Handlers */
-
 static irqreturn_t puv3_rtc_alarmirq(int irq, void *id)
 {
        struct rtc_device *rdev = id;
@@ -89,7 +86,6 @@ static int puv3_rtc_setpie(struct device *dev, int enabled)
 }
 
 /* Time read/write */
-
 static int puv3_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
 {
        rtc_time_to_tm(readl(RTC_RCNR), rtc_tm);
@@ -196,7 +192,6 @@ static void puv3_rtc_release(struct device *dev)
        struct rtc_device *rtc_dev = platform_get_drvdata(pdev);
 
        /* do not clear AIE here, it may be needed for wake */
-
        puv3_rtc_setpie(dev, 0);
        free_irq(puv3_rtc_alarmno, rtc_dev);
        free_irq(puv3_rtc_tickno, rtc_dev);
@@ -218,7 +213,6 @@ static void puv3_rtc_enable(struct platform_device *pdev, int en)
                writel(readl(RTC_RTSR) & ~RTC_RTSR_HZE, RTC_RTSR);
        } else {
                /* re-enable the device, and check it is ok */
-
                if ((readl(RTC_RTSR) & RTC_RTSR_HZE) == 0) {
                        dev_info(&pdev->dev, "rtc disabled, re-enabling\n");
                        writel(readl(RTC_RTSR) | RTC_RTSR_HZE, RTC_RTSR);
@@ -251,7 +245,6 @@ static int puv3_rtc_probe(struct platform_device *pdev)
        pr_debug("%s: probe=%p\n", __func__, pdev);
 
        /* find the IRQs */
-
        puv3_rtc_tickno = platform_get_irq(pdev, 1);
        if (puv3_rtc_tickno < 0) {
                dev_err(&pdev->dev, "no irq for rtc tick\n");
@@ -268,7 +261,6 @@ static int puv3_rtc_probe(struct platform_device *pdev)
                 puv3_rtc_tickno, puv3_rtc_alarmno);
 
        /* get the memory region */
-
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (res == NULL) {
                dev_err(&pdev->dev, "failed to get memory region resource\n");
@@ -288,7 +280,6 @@ static int puv3_rtc_probe(struct platform_device *pdev)
        puv3_rtc_enable(pdev, 1);
 
        /* register RTC and exit */
-
        rtc = rtc_device_register("pkunity", &pdev->dev, &puv3_rtcops,
                                  THIS_MODULE);
 
@@ -315,8 +306,6 @@ static int puv3_rtc_probe(struct platform_device *pdev)
 
 #ifdef CONFIG_PM
 
-/* RTC Power management control */
-
 static int ticnt_save;
 
 static int puv3_rtc_suspend(struct platform_device *pdev, pm_message_t state)
@@ -368,4 +357,3 @@ module_exit(puv3_rtc_exit);
 MODULE_DESCRIPTION("RTC Driver for the PKUnity v3 chip");
 MODULE_AUTHOR("Hu Dongliang");
 MODULE_LICENSE("GPL v2");
-
index 55e8f721e38a7209e5875c60c1af7b3c1486fb1a..570d4da10696177e6a05ed78dbeb459617d8f49c 100644 (file)
@@ -416,7 +416,7 @@ static void process_buffer_error(struct qdio_q *q, int count)
 
        /* special handling for no target buffer empty */
        if ((!q->is_input_q &&
-           (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) {
+           (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) {
                qperf_inc(q, target_full);
                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
                              q->first_to_check);
@@ -427,8 +427,8 @@ static void process_buffer_error(struct qdio_q *q, int count)
        DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
        DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
        DBF_ERROR("F14:%2x F15:%2x",
-                 q->sbal[q->first_to_check]->element[14].flags & 0xff,
-                 q->sbal[q->first_to_check]->element[15].flags & 0xff);
+                 q->sbal[q->first_to_check]->element[14].sflags,
+                 q->sbal[q->first_to_check]->element[15].sflags);
 
        /*
         * Interrupts may be avoided as long as the error is present
index 55c6aa1c9704f54e5c6f26e21a7b1f5c7610dbe0..d3cee33e554cace96b035b82c80ac87fc1485b01 100644 (file)
@@ -361,7 +361,7 @@ enum qeth_header_ids {
 
 static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
 {
-       return (sbale->flags & SBAL_FLAGS_LAST_ENTRY);
+       return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
 }
 
 enum qeth_qdio_buffer_states {
index 503678a30981f071f8e8ef9db25729817cc5d07a..dd08f7b42fb8ff26ebf91d943fa58d01376d6184 100644 (file)
@@ -890,7 +890,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
        struct sk_buff *skb;
 
        /* is PCI flag set on buffer? */
-       if (buf->buffer->element[0].flags & 0x40)
+       if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
                atomic_dec(&queue->set_pci_flags_count);
 
        skb = skb_dequeue(&buf->skb_list);
@@ -906,9 +906,11 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
                buf->is_header[i] = 0;
                buf->buffer->element[i].length = 0;
                buf->buffer->element[i].addr = NULL;
-               buf->buffer->element[i].flags = 0;
+               buf->buffer->element[i].eflags = 0;
+               buf->buffer->element[i].sflags = 0;
        }
-       buf->buffer->element[15].flags = 0;
+       buf->buffer->element[15].eflags = 0;
+       buf->buffer->element[15].sflags = 0;
        buf->next_element_to_fill = 0;
        atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
 }
@@ -2368,9 +2370,10 @@ static int qeth_init_input_buffer(struct qeth_card *card,
                buf->buffer->element[i].length = PAGE_SIZE;
                buf->buffer->element[i].addr =  pool_entry->elements[i];
                if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
-                       buf->buffer->element[i].flags = SBAL_FLAGS_LAST_ENTRY;
+                       buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
                else
-                       buf->buffer->element[i].flags = 0;
+                       buf->buffer->element[i].eflags = 0;
+               buf->buffer->element[i].sflags = 0;
        }
        return 0;
 }
@@ -2718,11 +2721,11 @@ int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf,
        if (qdio_error) {
                QETH_CARD_TEXT(card, 2, dbftext);
                QETH_CARD_TEXT_(card, 2, " F15=%02X",
-                              buf->element[15].flags & 0xff);
+                              buf->element[15].sflags);
                QETH_CARD_TEXT_(card, 2, " F14=%02X",
-                              buf->element[14].flags & 0xff);
+                              buf->element[14].sflags);
                QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
-               if ((buf->element[15].flags & 0xff) == 0x12) {
+               if ((buf->element[15].sflags) == 0x12) {
                        card->stats.rx_dropped++;
                        return 0;
                } else
@@ -2798,7 +2801,7 @@ EXPORT_SYMBOL_GPL(qeth_queue_input_buffer);
 static int qeth_handle_send_error(struct qeth_card *card,
                struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
 {
-       int sbalf15 = buffer->buffer->element[15].flags & 0xff;
+       int sbalf15 = buffer->buffer->element[15].sflags;
 
        QETH_CARD_TEXT(card, 6, "hdsnderr");
        if (card->info.type == QETH_CARD_TYPE_IQD) {
@@ -2907,8 +2910,8 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
 
        for (i = index; i < index + count; ++i) {
                buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
-               buf->buffer->element[buf->next_element_to_fill - 1].flags |=
-                               SBAL_FLAGS_LAST_ENTRY;
+               buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
+                               SBAL_EFLAGS_LAST_ENTRY;
 
                if (queue->card->info.type == QETH_CARD_TYPE_IQD)
                        continue;
@@ -2921,7 +2924,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
                                /* it's likely that we'll go to packing
                                 * mode soon */
                                atomic_inc(&queue->set_pci_flags_count);
-                               buf->buffer->element[0].flags |= 0x40;
+                               buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
                        }
                } else {
                        if (!atomic_read(&queue->set_pci_flags_count)) {
@@ -2934,7 +2937,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
                                 * further send was requested by the stack
                                 */
                                atomic_inc(&queue->set_pci_flags_count);
-                               buf->buffer->element[0].flags |= 0x40;
+                               buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
                        }
                }
        }
@@ -3180,20 +3183,20 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
                if (!length) {
                        if (first_lap)
                                if (skb_shinfo(skb)->nr_frags)
-                                       buffer->element[element].flags =
-                                               SBAL_FLAGS_FIRST_FRAG;
+                                       buffer->element[element].eflags =
+                                               SBAL_EFLAGS_FIRST_FRAG;
                                else
-                                       buffer->element[element].flags = 0;
+                                       buffer->element[element].eflags = 0;
                        else
-                               buffer->element[element].flags =
-                                   SBAL_FLAGS_MIDDLE_FRAG;
+                               buffer->element[element].eflags =
+                                   SBAL_EFLAGS_MIDDLE_FRAG;
                } else {
                        if (first_lap)
-                               buffer->element[element].flags =
-                                   SBAL_FLAGS_FIRST_FRAG;
+                               buffer->element[element].eflags =
+                                   SBAL_EFLAGS_FIRST_FRAG;
                        else
-                               buffer->element[element].flags =
-                                   SBAL_FLAGS_MIDDLE_FRAG;
+                               buffer->element[element].eflags =
+                                   SBAL_EFLAGS_MIDDLE_FRAG;
                }
                data += length_here;
                element++;
@@ -3205,12 +3208,12 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
                buffer->element[element].addr = (char *)page_to_phys(frag->page)
                        + frag->page_offset;
                buffer->element[element].length = frag->size;
-               buffer->element[element].flags = SBAL_FLAGS_MIDDLE_FRAG;
+               buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG;
                element++;
        }
 
-       if (buffer->element[element - 1].flags)
-               buffer->element[element - 1].flags = SBAL_FLAGS_LAST_FRAG;
+       if (buffer->element[element - 1].eflags)
+               buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
        *next_element_to_fill = element;
 }
 
@@ -3234,7 +3237,7 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
                /*fill first buffer entry only with header information */
                buffer->element[element].addr = skb->data;
                buffer->element[element].length = hdr_len;
-               buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
+               buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
                buf->next_element_to_fill++;
                skb->data += hdr_len;
                skb->len  -= hdr_len;
@@ -3246,7 +3249,7 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
                buffer->element[element].addr = hdr;
                buffer->element[element].length = sizeof(struct qeth_hdr) +
                                                        hd_len;
-               buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
+               buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
                buf->is_header[element] = 1;
                buf->next_element_to_fill++;
        }
index 8512b5c0ef82868d0a5ac87b0d813b84abacff81..022fb6a8cb8339a6a8854b69ffc7021a3bdce1cf 100644 (file)
@@ -640,7 +640,7 @@ static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
 }
 
 static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
-                                               u32 fsf_cmd, u32 sbtype,
+                                               u32 fsf_cmd, u8 sbtype,
                                                mempool_t *pool)
 {
        struct zfcp_adapter *adapter = qdio->adapter;
@@ -841,7 +841,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
        if (zfcp_qdio_sbal_get(qdio))
                goto out;
        req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
-                                 SBAL_FLAGS0_TYPE_READ,
+                                 SBAL_SFLAGS0_TYPE_READ,
                                  qdio->adapter->pool.scsi_abort);
        if (IS_ERR(req)) {
                req = NULL;
@@ -1012,7 +1012,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
                goto out;
 
        req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
-                                 SBAL_FLAGS0_TYPE_WRITE_READ, pool);
+                                 SBAL_SFLAGS0_TYPE_WRITE_READ, pool);
 
        if (IS_ERR(req)) {
                ret = PTR_ERR(req);
@@ -1110,7 +1110,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
                goto out;
 
        req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
-                                 SBAL_FLAGS0_TYPE_WRITE_READ, NULL);
+                                 SBAL_SFLAGS0_TYPE_WRITE_READ, NULL);
 
        if (IS_ERR(req)) {
                ret = PTR_ERR(req);
@@ -1156,7 +1156,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
                goto out;
 
        req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
-                                 SBAL_FLAGS0_TYPE_READ,
+                                 SBAL_SFLAGS0_TYPE_READ,
                                  qdio->adapter->pool.erp_req);
 
        if (IS_ERR(req)) {
@@ -1198,7 +1198,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
                goto out_unlock;
 
        req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
-                                 SBAL_FLAGS0_TYPE_READ, NULL);
+                                 SBAL_SFLAGS0_TYPE_READ, NULL);
 
        if (IS_ERR(req)) {
                retval = PTR_ERR(req);
@@ -1250,7 +1250,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
                goto out;
 
        req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
-                                 SBAL_FLAGS0_TYPE_READ,
+                                 SBAL_SFLAGS0_TYPE_READ,
                                  qdio->adapter->pool.erp_req);
 
        if (IS_ERR(req)) {
@@ -1296,7 +1296,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
                goto out_unlock;
 
        req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
-                                 SBAL_FLAGS0_TYPE_READ, NULL);
+                                 SBAL_SFLAGS0_TYPE_READ, NULL);
 
        if (IS_ERR(req)) {
                retval = PTR_ERR(req);
@@ -1412,7 +1412,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
                goto out;
 
        req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
-                                 SBAL_FLAGS0_TYPE_READ,
+                                 SBAL_SFLAGS0_TYPE_READ,
                                  qdio->adapter->pool.erp_req);
 
        if (IS_ERR(req)) {
@@ -1478,7 +1478,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
                goto out;
 
        req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
-                                 SBAL_FLAGS0_TYPE_READ,
+                                 SBAL_SFLAGS0_TYPE_READ,
                                  qdio->adapter->pool.erp_req);
 
        if (IS_ERR(req)) {
@@ -1553,7 +1553,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
                goto out;
 
        req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
-                                 SBAL_FLAGS0_TYPE_READ,
+                                 SBAL_SFLAGS0_TYPE_READ,
                                  qdio->adapter->pool.erp_req);
 
        if (IS_ERR(req)) {
@@ -1606,7 +1606,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
                goto out;
 
        req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
-                                 SBAL_FLAGS0_TYPE_READ,
+                                 SBAL_SFLAGS0_TYPE_READ,
                                  qdio->adapter->pool.erp_req);
 
        if (IS_ERR(req)) {
@@ -1698,7 +1698,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
                goto out;
 
        req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
-                                 SBAL_FLAGS0_TYPE_READ,
+                                 SBAL_SFLAGS0_TYPE_READ,
                                  qdio->adapter->pool.erp_req);
 
        if (IS_ERR(req)) {
@@ -1812,7 +1812,7 @@ int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action)
                goto out;
 
        req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
-                                 SBAL_FLAGS0_TYPE_READ,
+                                 SBAL_SFLAGS0_TYPE_READ,
                                  adapter->pool.erp_req);
 
        if (IS_ERR(req)) {
@@ -1901,7 +1901,7 @@ int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action)
                goto out;
 
        req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
-                                 SBAL_FLAGS0_TYPE_READ,
+                                 SBAL_SFLAGS0_TYPE_READ,
                                  qdio->adapter->pool.erp_req);
 
        if (IS_ERR(req)) {
@@ -2161,7 +2161,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
 {
        struct zfcp_fsf_req *req;
        struct fcp_cmnd *fcp_cmnd;
-       unsigned int sbtype = SBAL_FLAGS0_TYPE_READ;
+       u8 sbtype = SBAL_SFLAGS0_TYPE_READ;
        int real_bytes, retval = -EIO, dix_bytes = 0;
        struct scsi_device *sdev = scsi_cmnd->device;
        struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
@@ -2181,7 +2181,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
        }
 
        if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE)
-               sbtype = SBAL_FLAGS0_TYPE_WRITE;
+               sbtype = SBAL_SFLAGS0_TYPE_WRITE;
 
        req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
                                  sbtype, adapter->pool.scsi_req);
@@ -2280,7 +2280,7 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
                goto out;
 
        req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
-                                 SBAL_FLAGS0_TYPE_WRITE,
+                                 SBAL_SFLAGS0_TYPE_WRITE,
                                  qdio->adapter->pool.scsi_req);
 
        if (IS_ERR(req)) {
@@ -2328,17 +2328,18 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
        struct zfcp_qdio *qdio = adapter->qdio;
        struct zfcp_fsf_req *req = NULL;
        struct fsf_qtcb_bottom_support *bottom;
-       int direction, retval = -EIO, bytes;
+       int retval = -EIO, bytes;
+       u8 direction;
 
        if (!(adapter->adapter_features & FSF_FEATURE_CFDC))
                return ERR_PTR(-EOPNOTSUPP);
 
        switch (fsf_cfdc->command) {
        case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
-               direction = SBAL_FLAGS0_TYPE_WRITE;
+               direction = SBAL_SFLAGS0_TYPE_WRITE;
                break;
        case FSF_QTCB_UPLOAD_CONTROL_FILE:
-               direction = SBAL_FLAGS0_TYPE_READ;
+               direction = SBAL_SFLAGS0_TYPE_READ;
                break;
        default:
                return ERR_PTR(-EINVAL);
@@ -2413,7 +2414,7 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
                fsf_req->qdio_req.sbal_response = sbal_idx;
                zfcp_fsf_req_complete(fsf_req);
 
-               if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY))
+               if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY))
                        break;
        }
 }
index 98e97d90835b673e770af3911e876537644073b7..d9c40ea73eef4864e6a477d2803b61309e037a1a 100644 (file)
@@ -124,7 +124,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
 
        /* set last entry flag in current SBALE of current SBAL */
        sbale = zfcp_qdio_sbale_curr(qdio, q_req);
-       sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
+       sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
 
        /* don't exceed last allowed SBAL */
        if (q_req->sbal_last == q_req->sbal_limit)
@@ -132,7 +132,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
 
        /* set chaining flag in first SBALE of current SBAL */
        sbale = zfcp_qdio_sbale_req(qdio, q_req);
-       sbale->flags |= SBAL_FLAGS0_MORE_SBALS;
+       sbale->sflags |= SBAL_SFLAGS0_MORE_SBALS;
 
        /* calculate index of next SBAL */
        q_req->sbal_last++;
@@ -147,7 +147,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
 
        /* set storage-block type for new SBAL */
        sbale = zfcp_qdio_sbale_curr(qdio, q_req);
-       sbale->flags |= q_req->sbtype;
+       sbale->sflags |= q_req->sbtype;
 
        return sbale;
 }
@@ -177,7 +177,7 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
 
        /* set storage-block type for this request */
        sbale = zfcp_qdio_sbale_req(qdio, q_req);
-       sbale->flags |= q_req->sbtype;
+       sbale->sflags |= q_req->sbtype;
 
        for (; sg; sg = sg_next(sg)) {
                sbale = zfcp_qdio_sbale_next(qdio, q_req);
@@ -384,7 +384,8 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
        for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
                sbale = &(qdio->res_q[cc]->element[0]);
                sbale->length = 0;
-               sbale->flags = SBAL_FLAGS_LAST_ENTRY;
+               sbale->eflags = SBAL_EFLAGS_LAST_ENTRY;
+               sbale->sflags = 0;
                sbale->addr = NULL;
        }
 
index 2297d8d3e947b0aa16ba1d4c0d68251c6eade33d..54e22ace012b601f5a3cc8dd3cc33505458b8cfd 100644 (file)
@@ -67,7 +67,7 @@ struct zfcp_qdio {
  * @qdio_outb_usage: usage of outbound queue
  */
 struct zfcp_qdio_req {
-       u32     sbtype;
+       u     sbtype;
        u8      sbal_number;
        u8      sbal_first;
        u8      sbal_last;
@@ -116,7 +116,7 @@ zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
  */
 static inline
 void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
-                       unsigned long req_id, u32 sbtype, void *data, u32 len)
+                       unsigned long req_id, u8 sbtype, void *data, u32 len)
 {
        struct qdio_buffer_element *sbale;
        int count = min(atomic_read(&qdio->req_q_free),
@@ -131,7 +131,8 @@ void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
 
        sbale = zfcp_qdio_sbale_req(qdio, q_req);
        sbale->addr = (void *) req_id;
-       sbale->flags = SBAL_FLAGS0_COMMAND | sbtype;
+       sbale->eflags = 0;
+       sbale->sflags = SBAL_SFLAGS0_COMMAND | sbtype;
 
        if (unlikely(!data))
                return;
@@ -173,7 +174,7 @@ void zfcp_qdio_set_sbale_last(struct zfcp_qdio *qdio,
        struct qdio_buffer_element *sbale;
 
        sbale = zfcp_qdio_sbale_curr(qdio, q_req);
-       sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
+       sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
 }
 
 /**
index 58584dc0724ade4fb4b2d5fc12d48b962d2b91bd..44e8ca398efa790083006131f5adb3bf37cd57aa 100644 (file)
@@ -297,7 +297,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
                kfree(sdev);
                goto out;
        }
-
+       blk_get_queue(sdev->request_queue);
        sdev->request_queue->queuedata = sdev;
        scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
 
index e63912510fb9349b31ede55df1c0b20011b6235e..e0bd3f790fca1bf50e4e66e3b2f30f9a130547f3 100644 (file)
@@ -322,6 +322,7 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
                kfree(evt);
        }
 
+       blk_put_queue(sdev->request_queue);
        /* NULL queue means the device can't be used */
        sdev->request_queue = NULL;
 
index 4f64183b27fa6283c80d4fa2d37aa61c61c127fe..7e9c39951ecb87c086c9ab439df893a5c0980752 100644 (file)
@@ -635,7 +635,7 @@ static void clks_core_resume(void)
        struct clk *clkp;
 
        list_for_each_entry(clkp, &clock_list, node) {
-               if (likely(clkp->ops)) {
+               if (likely(clkp->usecount && clkp->ops)) {
                        unsigned long rate = clkp->rate;
 
                        if (likely(clkp->ops->set_parent))
index 6a9e58dd36c7e17cd5d4a7bbd58ed10ab4476f32..d18ce9e946d8d085b9ecf0c8fefe9cc88cd99334 100644 (file)
@@ -1861,6 +1861,7 @@ static int pl022_setup(struct spi_device *spi)
        }
        if ((clk_freq.cpsdvsr < CPSDVR_MIN)
            || (clk_freq.cpsdvsr > CPSDVR_MAX)) {
+               status = -EINVAL;
                dev_err(&spi->dev,
                        "cpsdvsr is configured incorrectly\n");
                goto err_config_params;
index 6f86ba0175ac9636e564f44cf23ff41223de6b8a..969cdd2fe124602d1e65c4218190ed3e7d78ccf6 100644 (file)
@@ -298,7 +298,7 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
        unsigned int            count, c;
        unsigned long           base, tx_reg, rx_reg;
        int                     word_len, data_type, element_count;
-       int                     elements;
+       int                     elements = 0;
        u32                     l;
        u8                      * rx;
        const u8                * tx;
index f706dba165cf6812fc364271d3086424ce12aa36..cc880c95e7de7dfee978319ebfb7aecd74a4f355 100644 (file)
@@ -681,13 +681,14 @@ static void bfin_spi_pump_transfers(unsigned long data)
        drv_data->cs_change = transfer->cs_change;
 
        /* Bits per word setup */
-       bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word;
-       if ((bits_per_word > 0) && (bits_per_word % 16 == 0)) {
+       bits_per_word = transfer->bits_per_word ? :
+               message->spi->bits_per_word ? : 8;
+       if (bits_per_word % 16 == 0) {
                drv_data->n_bytes = bits_per_word/8;
                drv_data->len = (transfer->len) >> 1;
                cr_width = BIT_CTL_WORDSIZE;
                drv_data->ops = &bfin_bfin_spi_transfer_ops_u16;
-       } else if ((bits_per_word > 0) && (bits_per_word % 8 == 0)) {
+       } else if (bits_per_word % 8 == 0) {
                drv_data->n_bytes = bits_per_word/8;
                drv_data->len = transfer->len;
                cr_width = 0;
index 82feb348c8bbf3762195aaf2cf07668792f72721..2a20dabec76d722d1c311aad9e1e34a13382b6d2 100644 (file)
@@ -539,10 +539,12 @@ void ssb_pcicore_init(struct ssb_pcicore *pc)
        if (!pc->hostmode)
                ssb_pcicore_init_clientmode(pc);
 
-       /* Additional always once-executed workarounds */
-       ssb_pcicore_serdes_workaround(pc);
-       /* TODO: ASPM */
-       /* TODO: Clock Request Update */
+       /* Additional PCIe always once-executed workarounds */
+       if (dev->id.coreid == SSB_DEV_PCIE) {
+               ssb_pcicore_serdes_workaround(pc);
+               /* TODO: ASPM */
+               /* TODO: Clock Request Update */
+       }
 }
 
 static u32 ssb_pcie_read(struct ssb_pcicore *pc, u32 address)
index dfc16f955eb882765aef90c63003d01ce95927f9..196284dc2f36615520aff66bf8174625e2c4da44 100644 (file)
@@ -24,23 +24,6 @@ menuconfig STAGING
 
 if STAGING
 
-config STAGING_EXCLUDE_BUILD
-       bool "Exclude Staging drivers from being built" if STAGING
-       default y
-       ---help---
-         Are you sure you really want to build the staging drivers?
-         They taint your kernel, don't live up to the normal Linux
-         kernel quality standards, are a bit crufty around the edges,
-         and might go off and kick your dog when you aren't paying
-         attention.
-
-         Say N here to be able to select and build the Staging drivers.
-         This option is primarily here to prevent them from being built
-         when selecting 'make allyesconfg' and 'make allmodconfig' so
-         don't be all that put off, your dog will be just fine.
-
-if !STAGING_EXCLUDE_BUILD
-
 source "drivers/staging/tty/Kconfig"
 
 source "drivers/staging/generic_serial/Kconfig"
@@ -177,5 +160,4 @@ source "drivers/staging/mei/Kconfig"
 
 source "drivers/staging/nvec/Kconfig"
 
-endif # !STAGING_EXCLUDE_BUILD
 endif # STAGING
index 876308858b824c706770eda11e4038d1763da24a..8b1620b1b2d0b0f614004726a3814155d039c543 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/delay.h>
 #include <linux/firmware.h>
 #include <linux/slab.h>
-#include <staging/altera.h>
+#include "altera.h"
 #include "altera-exprt.h"
 #include "altera-jtag.h"
 
index 05aad351b120c40c7e7ee3bde5d2b622a3fdd441..9cd5e76880c0cfcb5f3cdaedac093f2897c52d87 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/string.h>
 #include <linux/firmware.h>
 #include <linux/slab.h>
-#include <staging/altera.h>
+#include "altera.h"
 #include "altera-exprt.h"
 #include "altera-jtag.h"
 
index 1f15e1fb1ab224b6ed0c925463d9e9fed30c0da2..afd6cc16a2b8cf8f3ac8a374e1968a97b2a1db0b 100644 (file)
@@ -1,6 +1,7 @@
 config ATH6K_LEGACY
        tristate "Atheros AR6003 support (non mac80211)"
         depends on MMC && WLAN
+       depends on CFG80211
         select WIRELESS_EXT
         select WEXT_PRIV
        help
index 77dfb4070c1d11f470457d89c149345ddf524212..d3a774dbb7e8b3cec2faf25bde0270b0caad8772 100644 (file)
@@ -870,7 +870,8 @@ ar6k_cfg80211_scanComplete_event(struct ar6_softc *ar, int status)
     if(ar->scan_request)
     {
         /* Translate data to cfg80211 mgmt format */
-        wmi_iterate_nodes(ar->arWmi, ar6k_cfg80211_scan_node, ar->wdev->wiphy);
+       if (ar->arWmi)
+               wmi_iterate_nodes(ar->arWmi, ar6k_cfg80211_scan_node, ar->wdev->wiphy);
 
         cfg80211_scan_done(ar->scan_request,
             ((status & A_ECANCELED) || (status & A_EBUSY)) ? true : false);
index 929ceaf363bea7df2b8e51a4d114bcb1acaa5ff8..15e1b05ca92d2c835bcd3b16bb7f94ee5b1daab8 100644 (file)
@@ -64,8 +64,6 @@ wl_iw_extra_params_t g_wl_iw_params;
 extern bool wl_iw_conn_status_str(u32 event_type, u32 status,
                                  u32 reason, char *stringBuf, uint buflen);
 
-uint wl_msg_level = WL_ERROR_VAL;
-
 #define MAX_WLIW_IOCTL_LEN 1024
 
 #ifdef CONFIG_WIRELESS_EXT
index 1c45c11a774eaa60921f4ca0d69cec7e7481f8ea..aa87b1b6a44adea18ca675ba7962dcf512dd61f8 100644 (file)
@@ -542,6 +542,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
        unsigned long irqflags;
        int ret = -ENOMEM;
        uint32_t tt_pages;
+       struct drm_connector *connector;
+       struct psb_intel_output *psb_intel_output;
 
        dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
        if (dev_priv == NULL)
@@ -663,7 +665,18 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
                drm_kms_helper_poll_init(dev);
        }
 
-       ret = psb_backlight_init(dev);
+       /* Only add backlight support if we have LVDS output */
+       list_for_each_entry(connector, &dev->mode_config.connector_list,
+                           head) {
+               psb_intel_output = to_psb_intel_output(connector);
+
+               switch (psb_intel_output->type) {
+               case INTEL_OUTPUT_LVDS:
+                       ret = psb_backlight_init(dev);
+                       break;
+               }
+       }
+
        if (ret)
                return ret;
 #if 0
index 99c03a2e06bdb32e8a20f579be0f4c975314805c..084c36bbfe86997428b52158dbedaa48c7d69611 100644 (file)
@@ -441,6 +441,16 @@ static int psbfb_create(struct psb_fbdev *fbdev,
        info->screen_size = size;
        memset(info->screen_base, 0, size);
 
+       if (dev_priv->pg->stolen_size) {
+               info->apertures = alloc_apertures(1);
+               if (!info->apertures) {
+                       ret = -ENOMEM;
+                       goto out_err0;
+               }
+               info->apertures->ranges[0].base = dev->mode_config.fb_base;
+               info->apertures->ranges[0].size = dev_priv->pg->stolen_size;
+       }
+
        drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
        drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
                                sizes->fb_width, sizes->fb_height);
index 48ac8ba7f40bc41d5e317ec14cf3f897073e0742..417965da5e240002769ad700c3ee4208da4aa529 100644 (file)
@@ -154,10 +154,15 @@ static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
 
        fill_detail_timing_data(panel_fixed_mode, dvo_timing);
 
-       dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
-
-       DRM_DEBUG("Found panel mode in BIOS VBT tables:\n");
-       drm_mode_debug_printmodeline(panel_fixed_mode);
+       if (panel_fixed_mode->htotal > 0 && panel_fixed_mode->vtotal > 0) {
+               dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
+               DRM_DEBUG("Found panel mode in BIOS VBT tables:\n");
+               drm_mode_debug_printmodeline(panel_fixed_mode);
+       } else {
+               DRM_DEBUG("Ignoring bogus LVDS VBT mode.\n");
+               dev_priv->lvds_vbt = 0;
+               kfree(panel_fixed_mode);
+       }
 
        return;
 }
index 0b9b85424dfaa1fdbe7b931e6fcf98f68ef9c9bf..4cc1a5bfab40c0705c31beca87757068a7b8487d 100644 (file)
@@ -81,7 +81,6 @@ struct adis16201_state {
 
 int adis16201_set_irq(struct iio_dev *indio_dev, bool enable);
 
-#ifdef CONFIG_IIO_RING_BUFFER
 enum adis16201_scan {
        ADIS16201_SCAN_SUPPLY,
        ADIS16201_SCAN_ACC_X,
@@ -92,6 +91,7 @@ enum adis16201_scan {
        ADIS16201_SCAN_INCLI_Y,
 };
 
+#ifdef CONFIG_IIO_RING_BUFFER
 void adis16201_remove_trigger(struct iio_dev *indio_dev);
 int adis16201_probe_trigger(struct iio_dev *indio_dev);
 
index 8bb8ce50c2483d339f584a2c83ad82c771ee2108..175e21bb9b403d79a0a2810c6730712791a1a62d 100644 (file)
@@ -76,7 +76,6 @@ struct adis16203_state {
 
 int adis16203_set_irq(struct iio_dev *indio_dev, bool enable);
 
-#ifdef CONFIG_IIO_RING_BUFFER
 enum adis16203_scan {
        ADIS16203_SCAN_SUPPLY,
        ADIS16203_SCAN_AUX_ADC,
@@ -85,6 +84,7 @@ enum adis16203_scan {
        ADIS16203_SCAN_INCLI_Y,
 };
 
+#ifdef CONFIG_IIO_RING_BUFFER
 void adis16203_remove_trigger(struct iio_dev *indio_dev);
 int adis16203_probe_trigger(struct iio_dev *indio_dev);
 
index 881768df47a6b813a417dcffe1a3f9cce2ecf898..2fe34d21b6aa2268658d4c6148ad07fa11087747 100644 (file)
@@ -195,7 +195,7 @@ static const struct iio_info max517_info = {
 };
 
 static const struct iio_info max518_info = {
-       .attrs = &max517_attribute_group,
+       .attrs = &max518_attribute_group,
        .driver_module = THIS_MODULE,
 };
 
index 2589a7e167e4e94772fe36e8b215ea904f0873ea..3612373ddede3ff90a642432d260f931c23729f1 100644 (file)
@@ -137,13 +137,13 @@ static irqreturn_t adis16400_trigger_handler(int irq, void *p)
                if (st->variant->flags & ADIS16400_NO_BURST) {
                        ret = adis16350_spi_read_all(&indio_dev->dev, st->rx);
                        if (ret < 0)
-                               return ret;
+                               goto err;
                        for (; i < ring->scan_count; i++)
                                data[i] = *(s16 *)(st->rx + i*2);
                } else {
                        ret = adis16400_spi_read_burst(&indio_dev->dev, st->rx);
                        if (ret < 0)
-                               return ret;
+                               goto err;
                        for (; i < indio_dev->ring->scan_count; i++) {
                                j = __ffs(mask);
                                mask &= ~(1 << j);
@@ -158,9 +158,13 @@ static irqreturn_t adis16400_trigger_handler(int irq, void *p)
        ring->access->store_to(indio_dev->ring, (u8 *) data, pf->timestamp);
 
        iio_trigger_notify_done(indio_dev->trig);
-       kfree(data);
 
+       kfree(data);
        return IRQ_HANDLED;
+
+err:
+       kfree(data);
+       return ret;
 }
 
 void adis16400_unconfigure_ring(struct iio_dev *indio_dev)
index 615902333fb0d9cb34b555c25a535362bb781042..d504aa251cedb5fe280224c6970ed94c137c7ecd 100644 (file)
@@ -294,6 +294,7 @@ struct iio_poll_func
        pf->h = h;
        pf->thread = thread;
        pf->type = type;
+       pf->private_data = private;
 
        return pf;
 }
index 2818851c07619288821dc6a48e73f7c72c903daa..d1ffa32cd141105b1f9fe4f064ef8b06a53b01e1 100644 (file)
@@ -205,10 +205,10 @@ int mei_hw_init(struct mei_device *dev)
                        "host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
                        dev->host_hw_state, dev->me_hw_state);
 
-               if (!(dev->host_hw_state & H_RDY) != H_RDY)
+               if (!(dev->host_hw_state & H_RDY))
                        dev_dbg(&dev->pdev->dev, "host turn off H_RDY.\n");
 
-               if (!(dev->me_hw_state & ME_RDY_HRA) != ME_RDY_HRA)
+               if (!(dev->me_hw_state & ME_RDY_HRA))
                        dev_dbg(&dev->pdev->dev, "ME turn off ME_RDY.\n");
 
                printk(KERN_ERR "mei: link layer initialization failed.\n");
index b05306766870d4f0f8410e5189eba2ace54f9185..fe40e0b6f6752b16b118ebc0940342d89bb559f9 100644 (file)
@@ -2,6 +2,7 @@ config FB_OLPC_DCON
        tristate "One Laptop Per Child Display CONtroller support"
        depends on OLPC && FB
        select I2C
+       select BACKLIGHT_CLASS_DEVICE
        ---help---
          Add support for the OLPC XO DCON controller.  This controller is
          only available on OLPC platforms.   Unless you have one of these
index bddb0312b31eb2543f2baa75751c6a6e5d5c6109..cdae497d54675dd2feefdec7628d1f645decc4e1 100644 (file)
@@ -2328,7 +2328,7 @@ Switch_Fail:
 
                        retval = sd_send_cmd_get_rsp(chip, IO_SEND_OP_COND, 0, SD_RSP_TYPE_R4, rsp, 5);
                        if (retval == STATUS_SUCCESS) {
-                               int func_num = (rsp[1] >> 4) && 0x07;
+                               int func_num = (rsp[1] >> 4) & 0x07;
                                if (func_num) {
                                        RTSX_DEBUGP("SD_IO card (Function number: %d)!\n", func_num);
                                        chip->sd_io = 1;
index 6e99ec87fee0b26505bdb81fc1f9147c463c049f..8cbea42b69bc75a87a61a60652c80e389eefa14b 100644 (file)
@@ -26,6 +26,8 @@
 static int stub_probe(struct usb_interface *interface,
                      const struct usb_device_id *id);
 static void stub_disconnect(struct usb_interface *interface);
+static int stub_pre_reset(struct usb_interface *interface);
+static int stub_post_reset(struct usb_interface *interface);
 
 /*
  * Define device IDs here if you want to explicitly limit exportable devices.
@@ -59,6 +61,8 @@ struct usb_driver stub_driver = {
        .probe          = stub_probe,
        .disconnect     = stub_disconnect,
        .id_table       = stub_table,
+       .pre_reset      = stub_pre_reset,
+       .post_reset     = stub_post_reset,
 };
 
 /*
@@ -541,3 +545,20 @@ static void stub_disconnect(struct usb_interface *interface)
                del_match_busid((char *)udev_busid);
        }
 }
+
+/* 
+ * Presence of pre_reset and post_reset prevents the driver from being unbound
+ * when the device is being reset
+ */
+int stub_pre_reset(struct usb_interface *interface)
+{
+       dev_dbg(&interface->dev, "pre_reset\n");
+       return 0;
+}
+
+int stub_post_reset(struct usb_interface *interface)
+{
+       dev_dbg(&interface->dev, "post_reset\n");
+       return 0;
+}
index a5c1fa1f0430c7d5a5b4921a55a212f347ae52ca..bc57844600b96ede1e0e0ee9da32819df1fc43a8 100644 (file)
@@ -175,16 +175,18 @@ static int tweak_reset_device_cmd(struct urb *urb)
        dev_info(&urb->dev->dev, "usb_queue_reset_device\n");
 
        /*
-        * usb_lock_device_for_reset caused a deadlock: it causes the driver
-        * to unbind. In the shutdown the rx thread is signalled to shut down
-        * but this thread is pending in the usb_lock_device_for_reset.
-        *
-        * Instead queue the reset.
-        *
-        * Unfortunatly an existing usbip connection will be dropped due to
-        * driver unbinding.
+        * With the implementation of pre_reset and post_reset the driver no 
+        * longer unbinds. This allows the use of synchronous reset.
         */
-       usb_queue_reset_device(sdev->interface);
+
+       if (usb_lock_device_for_reset(sdev->udev, sdev->interface)<0)
+       {
+               dev_err(&urb->dev->dev, "could not obtain lock to reset device\n");
+               return 0;
+       }
+       usb_reset_device(sdev->udev);
+       usb_unlock_device(sdev->udev);
+
        return 0;
 }
 
index a4c42a75a3bfea0f0143e8dd70454fde5dddf524..09e8c7d53af3e7d72ebfe25b45100789f4f5185a 100644 (file)
@@ -2128,8 +2128,8 @@ static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
        gsm->tty = NULL;
 }
 
-static unsigned int gsmld_receive_buf(struct tty_struct *tty,
-               const unsigned char *cp, char *fp, int count)
+static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+                             char *fp, int count)
 {
        struct gsm_mux *gsm = tty->disc_data;
        const unsigned char *dp;
@@ -2162,8 +2162,6 @@ static unsigned int gsmld_receive_buf(struct tty_struct *tty,
        }
        /* FASYNC if needed ? */
        /* If clogged call tty_throttle(tty); */
-
-       return count;
 }
 
 /**
index cac666314aef4a5f453c5031caaa5b8fac4f0508..cea56033b34c2b490e796ae75df0fbea4e550a3f 100644 (file)
@@ -188,8 +188,8 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
                                    poll_table *wait);
 static int n_hdlc_tty_open(struct tty_struct *tty);
 static void n_hdlc_tty_close(struct tty_struct *tty);
-static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
-               const __u8 *cp, char *fp, int count);
+static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *cp,
+                              char *fp, int count);
 static void n_hdlc_tty_wakeup(struct tty_struct *tty);
 
 #define bset(p,b)      ((p)[(b) >> 5] |= (1 << ((b) & 0x1f)))
@@ -509,8 +509,8 @@ static void n_hdlc_tty_wakeup(struct tty_struct *tty)
  * Called by tty low level driver when receive data is available. Data is
  * interpreted as one HDLC frame.
  */
-static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
-               const __u8 *data, char *flags, int count)
+static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data,
+                              char *flags, int count)
 {
        register struct n_hdlc *n_hdlc = tty2n_hdlc (tty);
        register struct n_hdlc_buf *buf;
@@ -521,20 +521,20 @@ static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
                
        /* This can happen if stuff comes in on the backup tty */
        if (!n_hdlc || tty != n_hdlc->tty)
-               return -ENODEV;
+               return;
                
        /* verify line is using HDLC discipline */
        if (n_hdlc->magic != HDLC_MAGIC) {
                printk("%s(%d) line not using HDLC discipline\n",
                        __FILE__,__LINE__);
-               return -EINVAL;
+               return;
        }
        
        if ( count>maxframe ) {
                if (debuglevel >= DEBUG_LEVEL_INFO)     
                        printk("%s(%d) rx count>maxframesize, data discarded\n",
                               __FILE__,__LINE__);
-               return -EINVAL;
+               return;
        }
 
        /* get a free HDLC buffer */    
@@ -550,7 +550,7 @@ static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
                if (debuglevel >= DEBUG_LEVEL_INFO)     
                        printk("%s(%d) no more rx buffers, data discarded\n",
                               __FILE__,__LINE__);
-               return -EINVAL;
+               return;
        }
                
        /* copy received data to HDLC buffer */
@@ -565,8 +565,6 @@ static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
        if (n_hdlc->tty->fasync != NULL)
                kill_fasync (&n_hdlc->tty->fasync, SIGIO, POLL_IN);
 
-       return count;
-
 }      /* end of n_hdlc_tty_receive() */
 
 /**
index a4bc39c21a436476b7ea8363f6c4d892882a5520..5c6c31459a2f6618cb7cf9d83c7100cf1a6d86ea 100644 (file)
@@ -139,8 +139,8 @@ static int r3964_ioctl(struct tty_struct *tty, struct file *file,
 static void r3964_set_termios(struct tty_struct *tty, struct ktermios *old);
 static unsigned int r3964_poll(struct tty_struct *tty, struct file *file,
                struct poll_table_struct *wait);
-static unsigned int r3964_receive_buf(struct tty_struct *tty,
-               const unsigned char *cp, char *fp, int count);
+static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+               char *fp, int count);
 
 static struct tty_ldisc_ops tty_ldisc_N_R3964 = {
        .owner = THIS_MODULE,
@@ -1239,8 +1239,8 @@ static unsigned int r3964_poll(struct tty_struct *tty, struct file *file,
        return result;
 }
 
-static unsigned int r3964_receive_buf(struct tty_struct *tty,
-               const unsigned char *cp, char *fp, int count)
+static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+                       char *fp, int count)
 {
        struct r3964_info *pInfo = tty->disc_data;
        const unsigned char *p;
@@ -1257,8 +1257,6 @@ static unsigned int r3964_receive_buf(struct tty_struct *tty,
                }
 
        }
-
-       return count;
 }
 
 MODULE_LICENSE("GPL");
index 95d0a9c2dd13e3e625d8b287d911d327cebe2ee4..0ad32888091c16c1c27de4c032517df583ea4c9f 100644 (file)
@@ -81,6 +81,38 @@ static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
        return put_user(x, ptr);
 }
 
+/**
+ *     n_tty_set__room -       receive space
+ *     @tty: terminal
+ *
+ *     Called by the driver to find out how much data it is
+ *     permitted to feed to the line discipline without any being lost
+ *     and thus to manage flow control. Not serialized. Answers for the
+ *     "instant".
+ */
+
+static void n_tty_set_room(struct tty_struct *tty)
+{
+       /* tty->read_cnt is not read locked ? */
+       int     left = N_TTY_BUF_SIZE - tty->read_cnt - 1;
+       int old_left;
+
+       /*
+        * If we are doing input canonicalization, and there are no
+        * pending newlines, let characters through without limit, so
+        * that erase characters will be handled.  Other excess
+        * characters will be beeped.
+        */
+       if (left <= 0)
+               left = tty->icanon && !tty->canon_data;
+       old_left = tty->receive_room;
+       tty->receive_room = left;
+
+       /* Did this open up the receive buffer? We may need to flip */
+       if (left && !old_left)
+               schedule_work(&tty->buf.work);
+}
+
 static void put_tty_queue_nolock(unsigned char c, struct tty_struct *tty)
 {
        if (tty->read_cnt < N_TTY_BUF_SIZE) {
@@ -152,6 +184,7 @@ static void reset_buffer_flags(struct tty_struct *tty)
 
        tty->canon_head = tty->canon_data = tty->erasing = 0;
        memset(&tty->read_flags, 0, sizeof tty->read_flags);
+       n_tty_set_room(tty);
        check_unthrottle(tty);
 }
 
@@ -1327,19 +1360,17 @@ static void n_tty_write_wakeup(struct tty_struct *tty)
  *     calls one at a time and in order (or using flush_to_ldisc)
  */
 
-static unsigned int n_tty_receive_buf(struct tty_struct *tty,
-               const unsigned char *cp, char *fp, int count)
+static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+                             char *fp, int count)
 {
        const unsigned char *p;
        char *f, flags = TTY_NORMAL;
        int     i;
        char    buf[64];
        unsigned long cpuflags;
-       int left;
-       int ret = 0;
 
        if (!tty->read_buf)
-               return 0;
+               return;
 
        if (tty->real_raw) {
                spin_lock_irqsave(&tty->read_lock, cpuflags);
@@ -1349,7 +1380,6 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
                memcpy(tty->read_buf + tty->read_head, cp, i);
                tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1);
                tty->read_cnt += i;
-               ret += i;
                cp += i;
                count -= i;
 
@@ -1359,10 +1389,8 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
                memcpy(tty->read_buf + tty->read_head, cp, i);
                tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1);
                tty->read_cnt += i;
-               ret += i;
                spin_unlock_irqrestore(&tty->read_lock, cpuflags);
        } else {
-               ret = count;
                for (i = count, p = cp, f = fp; i; i--, p++) {
                        if (f)
                                flags = *f++;
@@ -1390,6 +1418,8 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
                        tty->ops->flush_chars(tty);
        }
 
+       n_tty_set_room(tty);
+
        if ((!tty->icanon && (tty->read_cnt >= tty->minimum_to_wake)) ||
                L_EXTPROC(tty)) {
                kill_fasync(&tty->fasync, SIGIO, POLL_IN);
@@ -1402,12 +1432,8 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
         * mode.  We don't want to throttle the driver if we're in
         * canonical mode and don't have a newline yet!
         */
-       left = N_TTY_BUF_SIZE - tty->read_cnt - 1;
-
-       if (left < TTY_THRESHOLD_THROTTLE)
+       if (tty->receive_room < TTY_THRESHOLD_THROTTLE)
                tty_throttle(tty);
-
-       return ret;
 }
 
 int is_ignored(int sig)
@@ -1451,6 +1477,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
        if (test_bit(TTY_HW_COOK_IN, &tty->flags)) {
                tty->raw = 1;
                tty->real_raw = 1;
+               n_tty_set_room(tty);
                return;
        }
        if (I_ISTRIP(tty) || I_IUCLC(tty) || I_IGNCR(tty) ||
@@ -1503,6 +1530,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
                else
                        tty->real_raw = 0;
        }
+       n_tty_set_room(tty);
        /* The termios change make the tty ready for I/O */
        wake_up_interruptible(&tty->write_wait);
        wake_up_interruptible(&tty->read_wait);
@@ -1784,6 +1812,8 @@ do_it_again:
                                retval = -ERESTARTSYS;
                                break;
                        }
+                       /* FIXME: does n_tty_set_room need locking ? */
+                       n_tty_set_room(tty);
                        timeout = schedule_timeout(timeout);
                        continue;
                }
@@ -1855,8 +1885,10 @@ do_it_again:
                 * longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode,
                 * we won't get any more characters.
                 */
-               if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE)
+               if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE) {
+                       n_tty_set_room(tty);
                        check_unthrottle(tty);
+               }
 
                if (b - buf >= minimum)
                        break;
@@ -1878,6 +1910,7 @@ do_it_again:
        } else if (test_and_clear_bit(TTY_PUSH, &tty->flags))
                 goto do_it_again;
 
+       n_tty_set_room(tty);
        return retval;
 }
 
index f2cb7503fcb213115b3b15722495c335a0aad984..465210930890b72ecdcaf1c70167b7211f43ed9e 100644 (file)
@@ -1397,6 +1397,7 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
        int fifosize, base_baud;
        int port_type;
        struct pch_uart_driver_data *board;
+       const char *board_name;
 
        board = &drv_dat[id->driver_data];
        port_type = board->port_type;
@@ -1412,7 +1413,8 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
        base_baud = 1843200; /* 1.8432MHz */
 
        /* quirk for CM-iTC board */
-       if (strstr(dmi_get_system_info(DMI_BOARD_NAME), "CM-iTC"))
+       board_name = dmi_get_system_info(DMI_BOARD_NAME);
+       if (board_name && strstr(board_name, "CM-iTC"))
                base_baud = 192000000; /* 192.0MHz */
 
        switch (port_type) {
index 46de2e075dacda019589cc5462a0d60f6eea8d51..6c9b7cd6778a9613542d739358f309a38fa56070 100644 (file)
@@ -413,10 +413,8 @@ static void flush_to_ldisc(struct work_struct *work)
        spin_lock_irqsave(&tty->buf.lock, flags);
 
        if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) {
-               struct tty_buffer *head, *tail = tty->buf.tail;
-               int seen_tail = 0;
+               struct tty_buffer *head;
                while ((head = tty->buf.head) != NULL) {
-                       int copied;
                        int count;
                        char *char_buf;
                        unsigned char *flag_buf;
@@ -425,15 +423,6 @@ static void flush_to_ldisc(struct work_struct *work)
                        if (!count) {
                                if (head->next == NULL)
                                        break;
-                               /*
-                                 There's a possibility tty might get new buffer
-                                 added during the unlock window below. We could
-                                 end up spinning in here forever hogging the CPU
-                                 completely. To avoid this let's have a rest each
-                                 time we processed the tail buffer.
-                               */
-                               if (tail == head)
-                                       seen_tail = 1;
                                tty->buf.head = head->next;
                                tty_buffer_free(tty, head);
                                continue;
@@ -443,19 +432,17 @@ static void flush_to_ldisc(struct work_struct *work)
                           line discipline as we want to empty the queue */
                        if (test_bit(TTY_FLUSHPENDING, &tty->flags))
                                break;
+                       if (!tty->receive_room)
+                               break;
+                       if (count > tty->receive_room)
+                               count = tty->receive_room;
                        char_buf = head->char_buf_ptr + head->read;
                        flag_buf = head->flag_buf_ptr + head->read;
+                       head->read += count;
                        spin_unlock_irqrestore(&tty->buf.lock, flags);
-                       copied = disc->ops->receive_buf(tty, char_buf,
+                       disc->ops->receive_buf(tty, char_buf,
                                                        flag_buf, count);
                        spin_lock_irqsave(&tty->buf.lock, flags);
-
-                       head->read += copied;
-
-                       if (copied == 0 || seen_tail) {
-                               schedule_work(&tty->buf.work);
-                               break;
-                       }
                }
                clear_bit(TTY_FLUSHING, &tty->flags);
        }
index 67b1d0d7c8acb924fdc481cfc75b36fa6c925de8..fb864e7fcd13f684e30108a02189f6b99c9315fc 100644 (file)
@@ -332,7 +332,8 @@ int paste_selection(struct tty_struct *tty)
                        continue;
                }
                count = sel_buffer_lth - pasted;
-               count = tty->ldisc->ops->receive_buf(tty, sel_buffer + pasted,
+               count = min(count, tty->receive_room);
+               tty->ldisc->ops->receive_buf(tty, sel_buffer + pasted,
                                                                NULL, count);
                pasted += count;
        }
index 395a347f2ebbea52d86b20cb97bec70548b6a34a..dac7676ce21bb6d9121d8fa4d178ea5cc0708438 100644 (file)
@@ -1530,6 +1530,8 @@ static const struct usb_device_id acm_ids[] = {
        { NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */
        { NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */
        { NOKIA_PCSUITE_ACM_INFO(0x0302), }, /* Nokia N8 */
+       { NOKIA_PCSUITE_ACM_INFO(0x0335), }, /* Nokia E7 */
+       { NOKIA_PCSUITE_ACM_INFO(0x03cd), }, /* Nokia C7 */
        { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
 
        /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
index 79a58c3a2e2a3a254b48a9f2fb7ac78c5c3bd27e..90ae1753dda16bab13f0cebb759d961fb5598cbc 100644 (file)
@@ -339,7 +339,8 @@ static int get_hub_status(struct usb_device *hdev,
 {
        int i, status = -ETIMEDOUT;
 
-       for (i = 0; i < USB_STS_RETRIES && status == -ETIMEDOUT; i++) {
+       for (i = 0; i < USB_STS_RETRIES &&
+                       (status == -ETIMEDOUT || status == -EPIPE); i++) {
                status = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
                        USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_HUB, 0, 0,
                        data, sizeof(*data), USB_STS_TIMEOUT);
@@ -355,7 +356,8 @@ static int get_port_status(struct usb_device *hdev, int port1,
 {
        int i, status = -ETIMEDOUT;
 
-       for (i = 0; i < USB_STS_RETRIES && status == -ETIMEDOUT; i++) {
+       for (i = 0; i < USB_STS_RETRIES &&
+                       (status == -ETIMEDOUT || status == -EPIPE); i++) {
                status = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
                        USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_PORT, 0, port1,
                        data, sizeof(*data), USB_STS_TIMEOUT);
index 1b125c224dcf8d4d73f5a094edb6f4417c8e88e4..2278dad886e2e8b028b2809eb39ccba8c09dff46 100644 (file)
@@ -389,7 +389,6 @@ static int usbfs_rmdir(struct inode *dir, struct dentry *dentry)
        mutex_unlock(&inode->i_mutex);
        if (!error)
                d_delete(dentry);
-       dput(dentry);
        return error;
 }
 
index 58456d1aec21611dcb395ef343ce80e4c9bc315e..029e288805b6c1ab1eeb99d6235093b0b4ee1058 100644 (file)
@@ -632,13 +632,10 @@ config USB_DUMMY_HCD
 
 endchoice
 
+# Selected by UDC drivers that support high-speed operation.
 config USB_GADGET_DUALSPEED
        bool
        depends on USB_GADGET
-       default n
-       help
-         Means that gadget drivers should include extra descriptors
-         and code to handle dual-speed controllers.
 
 #
 # USB Gadget Drivers
index 6e42aab75806e12c58e7b572e1dc953a13a32fa0..95e8138cd48fd9ff9be91f170777f562d3c88ede 100644 (file)
@@ -60,6 +60,7 @@
 #include <linux/device.h>
 #include <linux/io.h>
 #include <linux/irq.h>
+#include <linux/prefetch.h>
 
 #include <asm/byteorder.h>
 #include <asm/system.h>
index 41dc093c0a1b943fa44a69629dbd19e8cc5f098c..f4690ffcb4890af84e0ba88a82dd929e3e2fab82 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/clk.h>
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
+#include <linux/prefetch.h>
 
 #include <asm/byteorder.h>
 #include <mach/hardware.h>
index 61ff927928ab9620231eddce21056a4160488e5f..d3dcabc1a5fca0e7ad221236285d4064b69abd0d 100644 (file)
@@ -1906,6 +1906,7 @@ static int dummy_hcd_probe(struct platform_device *pdev)
        if (!hcd)
                return -ENOMEM;
        the_controller = hcd_to_dummy (hcd);
+       hcd->has_tt = 1;
 
        retval = usb_add_hcd(hcd, 0, 0);
        if (retval != 0) {
index a01383f71f38639c5539f5b82431ef1186ee6b60..a56876aaf76cdc1dfbec0a331f70739a2a27b04f 100644 (file)
@@ -431,8 +431,10 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
 
        /* halt any endpoint by doing a "wrong direction" i/o call */
        if (!usb_endpoint_dir_in(&data->desc)) {
-               if (usb_endpoint_xfer_isoc(&data->desc))
+               if (usb_endpoint_xfer_isoc(&data->desc)) {
+                       mutex_unlock(&data->lock);
                        return -EINVAL;
+               }
                DBG (data->dev, "%s halt\n", data->name);
                spin_lock_irq (&data->dev->lock);
                if (likely (data->ep != NULL))
index b62b2640deb0b7c9e6a0f9cf81f2c8242fdf37d1..b1a8146b9d50d4180ca762e9ab47c7f41899d736 100644 (file)
@@ -2083,7 +2083,7 @@ out:
 }
 
 #ifdef CONFIG_PM
-static int mv_udc_suspend(struct platform_device *_dev, pm_message_t state)
+static int mv_udc_suspend(struct device *_dev)
 {
        struct mv_udc *udc = the_controller;
 
@@ -2092,7 +2092,7 @@ static int mv_udc_suspend(struct platform_device *_dev, pm_message_t state)
        return 0;
 }
 
-static int mv_udc_resume(struct platform_device *_dev)
+static int mv_udc_resume(struct device *_dev)
 {
        struct mv_udc *udc = the_controller;
        int retval;
@@ -2100,7 +2100,7 @@ static int mv_udc_resume(struct platform_device *_dev)
        retval = mv_udc_phy_init(udc->phy_regs);
        if (retval) {
                dev_err(_dev, "phy initialization error %d\n", retval);
-               goto error;
+               return retval;
        }
        udc_reset(udc);
        ep0_reset(udc);
@@ -2122,7 +2122,7 @@ static struct platform_driver udc_driver = {
                .owner  = THIS_MODULE,
                .name   = "pxa-u2o",
 #ifdef CONFIG_PM
-               .pm     = mv_udc_pm_ops,
+               .pm     = &mv_udc_pm_ops,
 #endif
        },
 };
index 24696f7fa6a9a7a412c229f8667edd1895e1226d..476d88e1ae97daaaf67dd78b5e8565b9ff7d7e9e 100644 (file)
@@ -63,6 +63,7 @@
 #include <linux/device.h>
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
+#include <linux/prefetch.h>
 
 #include <asm/byteorder.h>
 #include <asm/io.h>
index 365c02fc25fcf0f89668b79ce2a72933ada22a40..774545494cf222ed8515474f48d5d346317c5619 100644 (file)
@@ -2216,7 +2216,6 @@ static int __init pxa25x_udc_probe(struct platform_device *pdev)
                if (retval != 0) {
                        pr_err("%s: can't get irq %i, err %d\n",
                                driver_name, LUBBOCK_USB_DISC_IRQ, retval);
-lubbock_fail0:
                        goto err_irq_lub;
                }
                retval = request_irq(LUBBOCK_USB_IRQ,
@@ -2226,7 +2225,6 @@ lubbock_fail0:
                if (retval != 0) {
                        pr_err("%s: can't get irq %i, err %d\n",
                                driver_name, LUBBOCK_USB_IRQ, retval);
-                       free_irq(LUBBOCK_USB_DISC_IRQ, dev);
                        goto lubbock_fail0;
                }
        } else
@@ -2236,10 +2234,11 @@ lubbock_fail0:
        return 0;
 
 #ifdef CONFIG_ARCH_LUBBOCK
+lubbock_fail0:
        free_irq(LUBBOCK_USB_DISC_IRQ, dev);
  err_irq_lub:
-#endif
        free_irq(irq, dev);
+#endif
  err_irq1:
        if (gpio_is_valid(dev->mach->gpio_pullup))
                gpio_free(dev->mach->gpio_pullup);
index acb9cc418df9a59b4db4ee57bdcbe68144475d6e..0dfee282878a86ef70fdd9b03bb09c1cf9823108 100644 (file)
@@ -2680,9 +2680,9 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
 
        writel(0, hsotg->regs + S3C_DAINTMSK);
 
-       dev_info(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
-                readl(hsotg->regs + S3C_DIEPCTL0),
-                readl(hsotg->regs + S3C_DOEPCTL0));
+       dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
+               readl(hsotg->regs + S3C_DIEPCTL0),
+               readl(hsotg->regs + S3C_DOEPCTL0));
 
        /* enable in and out endpoint interrupts */
        s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_OEPInt | S3C_GINTSTS_IEPInt);
@@ -2701,7 +2701,7 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
        udelay(10);  /* see openiboot */
        __bic32(hsotg->regs + S3C_DCTL, S3C_DCTL_PWROnPrgDone);
 
-       dev_info(hsotg->dev, "DCTL=0x%08x\n", readl(hsotg->regs + S3C_DCTL));
+       dev_dbg(hsotg->dev, "DCTL=0x%08x\n", readl(hsotg->regs + S3C_DCTL));
 
        /* S3C_DxEPCTL_USBActEp says RO in manual, but seems to be set by
           writing to the EPCTL register.. */
@@ -2721,9 +2721,9 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
 
        s3c_hsotg_enqueue_setup(hsotg);
 
-       dev_info(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
-                readl(hsotg->regs + S3C_DIEPCTL0),
-                readl(hsotg->regs + S3C_DOEPCTL0));
+       dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
+               readl(hsotg->regs + S3C_DIEPCTL0),
+               readl(hsotg->regs + S3C_DOEPCTL0));
 
        /* clear global NAKs */
        writel(S3C_DCTL_CGOUTNak | S3C_DCTL_CGNPInNAK,
@@ -2921,9 +2921,9 @@ static void s3c_hsotg_init(struct s3c_hsotg *hsotg)
 
        /* setup fifos */
 
-       dev_info(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
-                readl(hsotg->regs + S3C_GRXFSIZ),
-                readl(hsotg->regs + S3C_GNPTXFSIZ));
+       dev_dbg(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
+               readl(hsotg->regs + S3C_GRXFSIZ),
+               readl(hsotg->regs + S3C_GNPTXFSIZ));
 
        s3c_hsotg_init_fifo(hsotg);
 
@@ -2945,6 +2945,7 @@ static void s3c_hsotg_init(struct s3c_hsotg *hsotg)
 
 static void s3c_hsotg_dump(struct s3c_hsotg *hsotg)
 {
+#ifdef DEBUG
        struct device *dev = hsotg->dev;
        void __iomem *regs = hsotg->regs;
        u32 val;
@@ -2987,6 +2988,7 @@ static void s3c_hsotg_dump(struct s3c_hsotg *hsotg)
 
        dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n",
                 readl(regs + S3C_DVBUSDIS), readl(regs + S3C_DVBUSPULSE));
+#endif
 }
 
 
index cfe3cf56d6bd5e531682f928b02023b6c863660e..d5e3e1e586265c093a1002664af6f55cebb883bd 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/clk.h>
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
+#include <linux/prefetch.h>
 
 #include <mach/regs-s3c2443-clock.h>
 #include <plat/udc.h>
@@ -1301,7 +1302,8 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
        hsudc->uclk = clk_get(&pdev->dev, "usb-device");
        if (IS_ERR(hsudc->uclk)) {
                dev_err(dev, "failed to find usb-device clock source\n");
-               return PTR_ERR(hsudc->uclk);
+               ret = PTR_ERR(hsudc->uclk);
+               goto err_clk;
        }
        clk_enable(hsudc->uclk);
 
@@ -1310,7 +1312,8 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
        disable_irq(hsudc->irq);
        local_irq_enable();
        return 0;
-
+err_clk:
+       free_irq(hsudc->irq, hsudc);
 err_irq:
        iounmap(hsudc->regs);
 
index 6d8b04061d5d6772f076c9792315a8ee8c42499d..100f2635cf0a1d2ee3ef0a175c685e6f35a2b213 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/platform_device.h>
 #include <linux/clk.h>
 #include <linux/gpio.h>
+#include <linux/prefetch.h>
 
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
index afef7b0a419567ab92643d0809a1da5f1a4f13a9..80be5472783a4f30e8a791a205958f176ff2dbf1 100644 (file)
@@ -312,8 +312,10 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
                return PTR_ERR(usb_clk);
 
        hcd = usb_create_hcd (driver, &pdev->dev, "pxa27x");
-       if (!hcd)
-               return -ENOMEM;
+       if (!hcd) {
+               retval = -ENOMEM;
+               goto err0;
+       }
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!r) {
@@ -368,6 +370,7 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
        release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
  err1:
        usb_put_hcd(hcd);
+ err0:
        clk_put(usb_clk);
        return retval;
 }
index 2e0486178dbe4c45dd984eab0663bd3a2d97f392..1f50b4468e87bfe90f7b14b1327187a7395609f3 100644 (file)
@@ -438,13 +438,13 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci,
        struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
 
        switch (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state))) {
-       case 0:
+       case SLOT_STATE_ENABLED:
                return "enabled/disabled";
-       case 1:
+       case SLOT_STATE_DEFAULT:
                return "default";
-       case 2:
+       case SLOT_STATE_ADDRESSED:
                return "addressed";
-       case 3:
+       case SLOT_STATE_CONFIGURED:
                return "configured";
        default:
                return "reserved";
index 26caba4c1950a04dc71488d33fcca3328b7cb391..0f8e1d29a858e6c5e00e287a11f8ff1ba2b212e1 100644 (file)
@@ -985,9 +985,19 @@ static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
        interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
        if (interval != ep->desc.bInterval - 1)
                dev_warn(&udev->dev,
-                        "ep %#x - rounding interval to %d microframes\n",
+                        "ep %#x - rounding interval to %d %sframes\n",
                         ep->desc.bEndpointAddress,
-                        1 << interval);
+                        1 << interval,
+                        udev->speed == USB_SPEED_FULL ? "" : "micro");
+
+       if (udev->speed == USB_SPEED_FULL) {
+               /*
+                * Full speed isoc endpoints specify interval in frames,
+                * not microframes. We are using microframes everywhere,
+                * so adjust accordingly.
+                */
+               interval += 3;  /* 1 frame = 2^3 uframes */
+       }
 
        return interval;
 }
index c408e9f6a707a51f396b94554e3d07970d29fe84..17541d09eabbc9b867c5be1dcdeba349b45bfa82 100644 (file)
@@ -106,12 +106,22 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
 
        /* Look for vendor-specific quirks */
        if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
-                       pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
-                       pdev->revision == 0x0) {
+                       pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK) {
+               if (pdev->revision == 0x0) {
                        xhci->quirks |= XHCI_RESET_EP_QUIRK;
                        xhci_dbg(xhci, "QUIRK: Fresco Logic xHC needs configure"
                                        " endpoint cmd after reset endpoint\n");
+               }
+               /* Fresco Logic confirms: all revisions of this chip do not
+                * support MSI, even though some of them claim to in their PCI
+                * capabilities.
+                */
+               xhci->quirks |= XHCI_BROKEN_MSI;
+               xhci_dbg(xhci, "QUIRK: Fresco Logic revision %u "
+                               "has broken MSI implementation\n",
+                               pdev->revision);
        }
+
        if (pdev->vendor == PCI_VENDOR_ID_NEC)
                xhci->quirks |= XHCI_NEC_HOST;
 
index cc1485bfed385dc7df6b51a7de70500e453776ef..800f417c730900271a98ccf0410d7617afc3aa20 100644 (file)
@@ -1782,7 +1782,7 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
        struct usb_iso_packet_descriptor *frame;
        int idx;
 
-       ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
+       ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
        urb_priv = td->urb->hcpriv;
        idx = urb_priv->td_cnt;
        frame = &td->urb->iso_frame_desc[idx];
index d9660eb97eb9a4ede02ff49111b46d71d4ac89bb..06e7023258d0f4e3e4b603d6c2ae68b1fbd38f4f 100644 (file)
@@ -430,12 +430,19 @@ int xhci_run(struct usb_hcd *hcd)
                free_irq(hcd->irq, hcd);
        hcd->irq = -1;
 
+       /* Some Fresco Logic host controllers advertise MSI, but fail to
+        * generate interrupts.  Don't even try to enable MSI.
+        */
+       if (xhci->quirks & XHCI_BROKEN_MSI)
+               goto legacy_irq;
+
        ret = xhci_setup_msix(xhci);
        if (ret)
                /* fall back to msi*/
                ret = xhci_setup_msi(xhci);
 
        if (ret) {
+legacy_irq:
                /* fall back to legacy interrupt*/
                ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
                                        hcd->irq_descr, hcd);
@@ -1849,8 +1856,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
 
        /* Free any rings that were dropped, but not changed. */
        for (i = 1; i < 31; ++i) {
-               if ((ctrl_ctx->drop_flags & (1 << (i + 1))) &&
-                               !(ctrl_ctx->add_flags & (1 << (i + 1))))
+               if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
+                   !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1))))
                        xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
        }
        xhci_zero_in_ctx(xhci, virt_dev);
@@ -2467,6 +2474,7 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
        struct xhci_command *reset_device_cmd;
        int timeleft;
        int last_freed_endpoint;
+       struct xhci_slot_ctx *slot_ctx;
 
        ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
        if (ret <= 0)
@@ -2499,6 +2507,12 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
                        return -EINVAL;
        }
 
+       /* If device is not setup, there is no point in resetting it */
+       slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
+       if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
+                                               SLOT_STATE_DISABLED)
+               return 0;
+
        xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
        /* Allocate the command structure that holds the struct completion.
         * Assume we're in process context, since the normal device reset
index ac0196e7fcf11dba0c1df87ffb24ea57cd6c379f..7d1ea3bf5e1fa0187210f6c89ad87a9f53f1f8e2 100644 (file)
@@ -560,6 +560,11 @@ struct xhci_slot_ctx {
 #define SLOT_STATE     (0x1f << 27)
 #define GET_SLOT_STATE(p)      (((p) & (0x1f << 27)) >> 27)
 
+#define SLOT_STATE_DISABLED    0
+#define SLOT_STATE_ENABLED     SLOT_STATE_DISABLED
+#define SLOT_STATE_DEFAULT     1
+#define SLOT_STATE_ADDRESSED   2
+#define SLOT_STATE_CONFIGURED  3
 
 /**
  * struct xhci_ep_ctx
@@ -1302,6 +1307,7 @@ struct xhci_hcd {
  * commands.
  */
 #define XHCI_EP_LIMIT_QUIRK    (1 << 5)
+#define XHCI_BROKEN_MSI                (1 << 6)
        unsigned int            num_active_eps;
        unsigned int            limit_active_eps;
        /* There are two roothubs to keep track of bus suspend info for */
index ab8e1001e5e288ae9fd06417567cc62b4d4897f5..c71b0372786e00482bf2f9fadf4bf6293721d27f 100644 (file)
@@ -96,6 +96,7 @@
 #include <linux/init.h>
 #include <linux/list.h>
 #include <linux/kobject.h>
+#include <linux/prefetch.h>
 #include <linux/platform_device.h>
 #include <linux/io.h>
 
index 206cfabc92863e0d409e8c76a32bf3129883eeda..547486ccd0592d21a2b2a5ca8bf4480192ffaf73 100644 (file)
@@ -1380,5 +1380,6 @@ void __devexit usbhs_mod_gadget_remove(struct usbhs_priv *priv)
 {
        struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
 
+       kfree(gpriv->uep);
        kfree(gpriv);
 }
index e8dbde55f6c51553216b53b7d0c973ac2059c41e..1627289775538eaa2b1967e2547c54717c82baa3 100644 (file)
@@ -647,6 +647,7 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, EVER_ECO_PRO_CDS) },
        { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_1_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_2_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_3_PID) },
        { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_0_PID) },
        { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_1_PID) },
        { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_2_PID) },
index 1d946cd238ba2989da8bb197a3b0eb8c44f68570..ab1fcdf3c378e954882a8a7db9bc1e4cb672aed8 100644 (file)
  */
 #define FTDI_4N_GALAXY_DE_1_PID        0xF3C0
 #define FTDI_4N_GALAXY_DE_2_PID        0xF3C1
+#define FTDI_4N_GALAXY_DE_3_PID        0xF3C2
 
 /*
  * Linx Technologies product ids
index 318dd00040a3fdb28fbce657d7a3880d12ba505d..60b25d8ea0e2c14376ee4fad71d21a680f0502f7 100644 (file)
@@ -311,10 +311,6 @@ static void option_instat_callback(struct urb *urb);
 #define ZTE_PRODUCT_AC2726                     0xfff5
 #define ZTE_PRODUCT_AC8710T                    0xffff
 
-/* ZTE PRODUCTS -- alternate vendor ID */
-#define ZTE_VENDOR_ID2                         0x1d6b
-#define ZTE_PRODUCT_MF_330                     0x0002
-
 #define BENQ_VENDOR_ID                         0x04a5
 #define BENQ_PRODUCT_H10                       0x4068
 
@@ -340,11 +336,12 @@ static void option_instat_callback(struct urb *urb);
 #define TOSHIBA_PRODUCT_G450                   0x0d45
 
 #define ALINK_VENDOR_ID                                0x1e0e
+#define ALINK_PRODUCT_PH300                    0x9100
 #define ALINK_PRODUCT_3GU                      0x9200
 
 /* ALCATEL PRODUCTS */
 #define ALCATEL_VENDOR_ID                      0x1bbb
-#define ALCATEL_PRODUCT_X060S                  0x0000
+#define ALCATEL_PRODUCT_X060S_X200             0x0000
 
 #define PIRELLI_VENDOR_ID                      0x1266
 #define PIRELLI_PRODUCT_C100_1                 0x1002
@@ -379,6 +376,9 @@ static void option_instat_callback(struct urb *urb);
  * It seems to contain a Qualcomm QSC6240/6290 chipset            */
 #define FOUR_G_SYSTEMS_PRODUCT_W14             0x9603
 
+/* Zoom */
+#define ZOOM_PRODUCT_4597                      0x9607
+
 /* Haier products */
 #define HAIER_VENDOR_ID                                0x201e
 #define HAIER_PRODUCT_CE100                    0x2009
@@ -432,6 +432,20 @@ static const struct option_blacklist_info four_g_w14_blacklist = {
        .reason = OPTION_BLACKLIST_SENDSETUP
 };
 
+static const u8 alcatel_x200_no_sendsetup[] = { 0, 1 };
+static const struct option_blacklist_info alcatel_x200_blacklist = {
+       .infolen = ARRAY_SIZE(alcatel_x200_no_sendsetup),
+       .ifaceinfo = alcatel_x200_no_sendsetup,
+       .reason = OPTION_BLACKLIST_SENDSETUP
+};
+
+static const u8 zte_k3765_z_no_sendsetup[] = { 0, 1, 2 };
+static const struct option_blacklist_info zte_k3765_z_blacklist = {
+       .infolen = ARRAY_SIZE(zte_k3765_z_no_sendsetup),
+       .ifaceinfo = zte_k3765_z_no_sendsetup,
+       .reason = OPTION_BLACKLIST_SENDSETUP
+};
+
 static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
        { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -916,13 +930,13 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
+         0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) },
-       { USB_DEVICE(ZTE_VENDOR_ID2, ZTE_PRODUCT_MF_330) },
        { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
        { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
        { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */
@@ -935,13 +949,17 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_G450) },
        { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */
        { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
+       { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
        { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
-       { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) },
+       { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
+         .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
+       },
        { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
        { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
        { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
          .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
        },
+       { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
        { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
        /* Pirelli  */
        { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1)},
index 00418995d8e9e549c835ec209e1f1e5dbf7e5076..e8ae21b2d387c11a1a1038ae98855d9db1228dc1 100644 (file)
@@ -819,6 +819,35 @@ Retry_Sense:
                }
        }
 
+       /*
+        * Some devices don't work or return incorrect data the first
+        * time they get a READ(10) command, or for the first READ(10)
+        * after a media change.  If the INITIAL_READ10 flag is set,
+        * keep track of whether READ(10) commands succeed.  If the
+        * previous one succeeded and this one failed, set the REDO_READ10
+        * flag to force a retry.
+        */
+       if (unlikely((us->fflags & US_FL_INITIAL_READ10) &&
+                       srb->cmnd[0] == READ_10)) {
+               if (srb->result == SAM_STAT_GOOD) {
+                       set_bit(US_FLIDX_READ10_WORKED, &us->dflags);
+               } else if (test_bit(US_FLIDX_READ10_WORKED, &us->dflags)) {
+                       clear_bit(US_FLIDX_READ10_WORKED, &us->dflags);
+                       set_bit(US_FLIDX_REDO_READ10, &us->dflags);
+               }
+
+               /*
+                * Next, if the REDO_READ10 flag is set, return a result
+                * code that will cause the SCSI core to retry the READ(10)
+                * command immediately.
+                */
+               if (test_bit(US_FLIDX_REDO_READ10, &us->dflags)) {
+                       clear_bit(US_FLIDX_REDO_READ10, &us->dflags);
+                       srb->result = DID_IMM_RETRY << 16;
+                       srb->sense_buffer[0] = 0;
+               }
+       }
+
        /* Did we transfer less than the minimum amount required? */
        if ((srb->result == SAM_STAT_GOOD || srb->sense_buffer[2] == 0) &&
                        scsi_bufflen(srb) - scsi_get_resid(srb) < srb->underflow)
index c1602b8c55949e0b5c1e4a99042d1409f953008f..ccff3483eebc87d0c82a562cbffa3f5ab36ab878 100644 (file)
@@ -1114,6 +1114,16 @@ UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_FIX_CAPACITY ),
 
+/* Reported by Paul Hartman <paul.hartman+linux@gmail.com>
+ * This card reader returns "Illegal Request, Logical Block Address
+ * Out of Range" for the first READ(10) after a new card is inserted.
+ */
+UNUSUAL_DEV(  0x090c, 0x6000, 0x0100, 0x0100,
+               "Feiya",
+               "SD/SDHC Card Reader",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_INITIAL_READ10 ),
+
 /* This Pentax still camera is not conformant
  * to the USB storage specification: -
  * - It does not like the INQUIRY command. So we must handle this command
@@ -1888,6 +1898,15 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_NO_READ_DISC_INFO ),
 
+/* Reported by Sven Geggus <sven-usbst@geggus.net>
+ * This encrypted pen drive returns bogus data for the initial READ(10).
+ */
+UNUSUAL_DEV(  0x1b1c, 0x1ab5, 0x0200, 0x0200,
+               "Corsair",
+               "Padlock v2",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_INITIAL_READ10 ),
+
 /* Patch by Richard Schütz <r.schtz@t-online.de>
  * This external hard drive enclosure uses a JMicron chip which
  * needs the US_FL_IGNORE_RESIDUE flag to work properly. */
index 5ee7ac42e08f3bddb892109d07e99d6a216e2509..0ca095820f3e7ca10c98e27a4eaeea728e5c533e 100644 (file)
@@ -440,7 +440,8 @@ static void adjust_quirks(struct us_data *us)
                        US_FL_NOT_LOCKABLE | US_FL_MAX_SECTORS_64 |
                        US_FL_CAPACITY_OK | US_FL_IGNORE_RESIDUE |
                        US_FL_SINGLE_LUN | US_FL_NO_WP_DETECT |
-                       US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16);
+                       US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
+                       US_FL_INITIAL_READ10);
 
        p = quirks;
        while (*p) {
@@ -490,6 +491,9 @@ static void adjust_quirks(struct us_data *us)
                case 'm':
                        f |= US_FL_MAX_SECTORS_64;
                        break;
+               case 'n':
+                       f |= US_FL_INITIAL_READ10;
+                       break;
                case 'o':
                        f |= US_FL_CAPACITY_OK;
                        break;
@@ -953,6 +957,13 @@ int usb_stor_probe2(struct us_data *us)
        if (result)
                goto BadDevice;
 
+       /*
+        * If the device returns invalid data for the first READ(10)
+        * command, indicate the command should be retried.
+        */
+       if (us->fflags & US_FL_INITIAL_READ10)
+               set_bit(US_FLIDX_REDO_READ10, &us->dflags);
+
        /* Acquire all the other resources and add the host */
        result = usb_stor_acquire_resources(us);
        if (result)
index 89d3bfff98df47d34b78b516aeaa19b101d572b5..7b0f2113632efb52ada464fc17dd83a74f34ea5a 100644 (file)
@@ -73,6 +73,8 @@ struct us_unusual_dev {
 #define US_FLIDX_RESETTING     4       /* device reset in progress */
 #define US_FLIDX_TIMED_OUT     5       /* SCSI midlayer timed out  */
 #define US_FLIDX_DONT_SCAN     6       /* don't scan (disconnect)  */
+#define US_FLIDX_REDO_READ10   7       /* redo READ(10) command    */
+#define US_FLIDX_READ10_WORKED 8       /* previous READ(10) succeeded */
 
 #define USB_STOR_STRING_LEN 32
 
index 3ec4923c2d84e875eb27bc1119c1765ea7e73989..c22e8d39a2cb1371ff88bc6840cacbbbc8123c37 100644 (file)
@@ -515,11 +515,10 @@ static int __devinit arcfb_probe(struct platform_device *dev)
 
        /* We need a flat backing store for the Arc's
           less-flat actual paged framebuffer */
-       if (!(videomemory = vmalloc(videomemorysize)))
+       videomemory = vzalloc(videomemorysize);
+       if (!videomemory)
                return retval;
 
-       memset(videomemory, 0, videomemorysize);
-
        info = framebuffer_alloc(sizeof(struct arcfb_par), &dev->dev);
        if (!info)
                goto err;
index ebb893c49e9027b9d9cb3aa897e83db394026ff0..d7aaec5667bfd6ca1e1407e52c8c503577339405 100644 (file)
@@ -248,10 +248,6 @@ static int atyfb_sync(struct fb_info *info);
 
 static int aty_init(struct fb_info *info);
 
-#ifdef CONFIG_ATARI
-static int store_video_par(char *videopar, unsigned char m64_num);
-#endif
-
 static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc);
 
 static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc);
@@ -2268,11 +2264,13 @@ error:
        return;
 }
 
+#ifdef CONFIG_PCI
 static void aty_bl_exit(struct backlight_device *bd)
 {
        backlight_device_unregister(bd);
        printk("aty: Backlight unloaded\n");
 }
+#endif /* CONFIG_PCI */
 
 #endif /* CONFIG_FB_ATY_BACKLIGHT */
 
@@ -2789,7 +2787,7 @@ aty_init_exit:
        return ret;
 }
 
-#ifdef CONFIG_ATARI
+#if defined(CONFIG_ATARI) && !defined(MODULE)
 static int __devinit store_video_par(char *video_str, unsigned char m64_num)
 {
        char *p;
@@ -2818,7 +2816,7 @@ static int __devinit store_video_par(char *video_str, unsigned char m64_num)
        phys_vmembase[m64_num] = 0;
        return -1;
 }
-#endif /* CONFIG_ATARI */
+#endif /* CONFIG_ATARI && !MODULE */
 
 /*
  * Blank the display.
index 0c9373bedd1f39d61d8b391bcc54a630d6d981c8..2d93c8d61ad5e2aa9ea2cd48d30581aad7cf8fb4 100644 (file)
@@ -302,6 +302,18 @@ config BACKLIGHT_ADP8860
          To compile this driver as a module, choose M here: the module will
          be called adp8860_bl.
 
+config BACKLIGHT_ADP8870
+       tristate "Backlight Driver for ADP8870 using WLED"
+       depends on BACKLIGHT_CLASS_DEVICE && I2C
+       select NEW_LEDS
+       select LEDS_CLASS
+       help
+         If you have a LCD backlight connected to the ADP8870,
+         say Y here to enable this driver.
+
+         To compile this driver as a module, choose M here: the module will
+         be called adp8870_bl.
+
 config BACKLIGHT_88PM860X
        tristate "Backlight Driver for 88PM8606 using WLED"
        depends on MFD_88PM860X
index b9ca8490df87850dc746e5a4661b67f5294589a4..ee72adb8786ed2f9f71de431987f0f5e11da7fc5 100644 (file)
@@ -34,6 +34,7 @@ obj-$(CONFIG_BACKLIGHT_WM831X)        += wm831x_bl.o
 obj-$(CONFIG_BACKLIGHT_ADX)    += adx_bl.o
 obj-$(CONFIG_BACKLIGHT_ADP5520)        += adp5520_bl.o
 obj-$(CONFIG_BACKLIGHT_ADP8860)        += adp8860_bl.o
+obj-$(CONFIG_BACKLIGHT_ADP8870)        += adp8870_bl.o
 obj-$(CONFIG_BACKLIGHT_88PM860X) += 88pm860x_bl.o
 obj-$(CONFIG_BACKLIGHT_PCF50633)       += pcf50633-backlight.o
 
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
new file mode 100644 (file)
index 0000000..05a8832
--- /dev/null
@@ -0,0 +1,1012 @@
+/*
+ * Backlight driver for Analog Devices ADP8870 Backlight Devices
+ *
+ * Copyright 2009-2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/pm.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/fb.h>
+#include <linux/backlight.h>
+#include <linux/leds.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+
+#include <linux/i2c/adp8870.h>
+#define ADP8870_EXT_FEATURES
+#define ADP8870_USE_LEDS
+
+
+#define ADP8870_MFDVID 0x00  /* Manufacturer and device ID */
+#define ADP8870_MDCR   0x01  /* Device mode and status */
+#define ADP8870_INT_STAT 0x02  /* Interrupts status */
+#define ADP8870_INT_EN 0x03  /* Interrupts enable */
+#define ADP8870_CFGR   0x04  /* Configuration register */
+#define ADP8870_BLSEL  0x05  /* Sink enable backlight or independent */
+#define ADP8870_PWMLED 0x06  /* PWM Enable Selection Register */
+#define ADP8870_BLOFF  0x07  /* Backlight off timeout */
+#define ADP8870_BLDIM  0x08  /* Backlight dim timeout */
+#define ADP8870_BLFR   0x09  /* Backlight fade in and out rates */
+#define ADP8870_BLMX1  0x0A  /* Backlight (Brightness Level 1-daylight) maximum current */
+#define ADP8870_BLDM1  0x0B  /* Backlight (Brightness Level 1-daylight) dim current */
+#define ADP8870_BLMX2  0x0C  /* Backlight (Brightness Level 2-bright) maximum current */
+#define ADP8870_BLDM2  0x0D  /* Backlight (Brightness Level 2-bright) dim current */
+#define ADP8870_BLMX3  0x0E  /* Backlight (Brightness Level 3-office) maximum current */
+#define ADP8870_BLDM3  0x0F  /* Backlight (Brightness Level 3-office) dim current */
+#define ADP8870_BLMX4  0x10  /* Backlight (Brightness Level 4-indoor) maximum current */
+#define ADP8870_BLDM4  0x11  /* Backlight (Brightness Level 4-indoor) dim current */
+#define ADP8870_BLMX5  0x12  /* Backlight (Brightness Level 5-dark) maximum current */
+#define ADP8870_BLDM5  0x13  /* Backlight (Brightness Level 5-dark) dim current */
+#define ADP8870_ISCLAW 0x1A  /* Independent sink current fade law register */
+#define ADP8870_ISCC   0x1B  /* Independent sink current control register */
+#define ADP8870_ISCT1  0x1C  /* Independent Sink Current Timer Register LED[7:5] */
+#define ADP8870_ISCT2  0x1D  /* Independent Sink Current Timer Register LED[4:1] */
+#define ADP8870_ISCF   0x1E  /* Independent sink current fade register */
+#define ADP8870_ISC1   0x1F  /* Independent Sink Current LED1 */
+#define ADP8870_ISC2   0x20  /* Independent Sink Current LED2 */
+#define ADP8870_ISC3   0x21  /* Independent Sink Current LED3 */
+#define ADP8870_ISC4   0x22  /* Independent Sink Current LED4 */
+#define ADP8870_ISC5   0x23  /* Independent Sink Current LED5 */
+#define ADP8870_ISC6   0x24  /* Independent Sink Current LED6 */
+#define ADP8870_ISC7   0x25  /* Independent Sink Current LED7 (Brightness Level 1-daylight) */
+#define ADP8870_ISC7_L2        0x26  /* Independent Sink Current LED7 (Brightness Level 2-bright) */
+#define ADP8870_ISC7_L3        0x27  /* Independent Sink Current LED7 (Brightness Level 3-office) */
+#define ADP8870_ISC7_L4        0x28  /* Independent Sink Current LED7 (Brightness Level 4-indoor) */
+#define ADP8870_ISC7_L5        0x29  /* Independent Sink Current LED7 (Brightness Level 5-dark) */
+#define ADP8870_CMP_CTL        0x2D  /* ALS Comparator Control Register */
+#define ADP8870_ALS1_EN        0x2E  /* Main ALS comparator level enable */
+#define ADP8870_ALS2_EN        0x2F  /* Second ALS comparator level enable */
+#define ADP8870_ALS1_STAT 0x30  /* Main ALS Comparator Status Register */
+#define ADP8870_ALS2_STAT 0x31  /* Second ALS Comparator Status Register */
+#define ADP8870_L2TRP  0x32  /* L2 comparator reference */
+#define ADP8870_L2HYS  0x33  /* L2 hysteresis */
+#define ADP8870_L3TRP  0x34  /* L3 comparator reference */
+#define ADP8870_L3HYS  0x35  /* L3 hysteresis */
+#define ADP8870_L4TRP  0x36  /* L4 comparator reference */
+#define ADP8870_L4HYS  0x37  /* L4 hysteresis */
+#define ADP8870_L5TRP  0x38  /* L5 comparator reference */
+#define ADP8870_L5HYS  0x39  /* L5 hysteresis */
+#define ADP8870_PH1LEVL        0x40  /* First phototransistor ambient light level-low byte register */
+#define ADP8870_PH1LEVH        0x41  /* First phototransistor ambient light level-high byte register */
+#define ADP8870_PH2LEVL        0x42  /* Second phototransistor ambient light level-low byte register */
+#define ADP8870_PH2LEVH        0x43  /* Second phototransistor ambient light level-high byte register */
+
+#define ADP8870_MANUFID                0x3  /* Analog Devices AD8870 Manufacturer and device ID */
+#define ADP8870_DEVID(x)       ((x) & 0xF)
+#define ADP8870_MANID(x)       ((x) >> 4)
+
+/* MDCR Device mode and status */
+#define D7ALSEN                        (1 << 7)
+#define INT_CFG                        (1 << 6)
+#define NSTBY                  (1 << 5)
+#define DIM_EN                 (1 << 4)
+#define GDWN_DIS               (1 << 3)
+#define SIS_EN                 (1 << 2)
+#define CMP_AUTOEN             (1 << 1)
+#define BLEN                   (1 << 0)
+
+/* ADP8870_ALS1_EN Main ALS comparator level enable */
+#define L5_EN                  (1 << 3)
+#define L4_EN                  (1 << 2)
+#define L3_EN                  (1 << 1)
+#define L2_EN                  (1 << 0)
+
+#define CFGR_BLV_SHIFT         3
+#define CFGR_BLV_MASK          0x7
+#define ADP8870_FLAG_LED_MASK  0xFF
+
+#define FADE_VAL(in, out)      ((0xF & (in)) | ((0xF & (out)) << 4))
+#define BL_CFGR_VAL(law, blv)  ((((blv) & CFGR_BLV_MASK) << CFGR_BLV_SHIFT) | ((0x3 & (law)) << 1))
+#define ALS_CMPR_CFG_VAL(filt) ((0x7 & (filt)) << 1)
+
+struct adp8870_bl {
+       struct i2c_client *client;
+       struct backlight_device *bl;
+       struct adp8870_led *led;
+       struct adp8870_backlight_platform_data *pdata;
+       struct mutex lock;
+       unsigned long cached_daylight_max;
+       int id;
+       int revid;
+       int current_brightness;
+};
+
+struct adp8870_led {
+       struct led_classdev     cdev;
+       struct work_struct      work;
+       struct i2c_client       *client;
+       enum led_brightness     new_brightness;
+       int                     id;
+       int                     flags;
+};
+
+static int adp8870_read(struct i2c_client *client, int reg, uint8_t *val)
+{
+       int ret;
+
+       ret = i2c_smbus_read_byte_data(client, reg);
+       if (ret < 0) {
+               dev_err(&client->dev, "failed reading at 0x%02x\n", reg);
+               return ret;
+       }
+
+       *val = ret;
+       return 0;
+}
+
+
+static int adp8870_write(struct i2c_client *client, u8 reg, u8 val)
+{
+       int ret = i2c_smbus_write_byte_data(client, reg, val);
+       if (ret)
+               dev_err(&client->dev, "failed to write\n");
+
+       return ret;
+}
+
+static int adp8870_set_bits(struct i2c_client *client, int reg, uint8_t bit_mask)
+{
+       struct adp8870_bl *data = i2c_get_clientdata(client);
+       uint8_t reg_val;
+       int ret;
+
+       mutex_lock(&data->lock);
+
+       ret = adp8870_read(client, reg, &reg_val);
+
+       if (!ret && ((reg_val & bit_mask) == 0)) {
+               reg_val |= bit_mask;
+               ret = adp8870_write(client, reg, reg_val);
+       }
+
+       mutex_unlock(&data->lock);
+       return ret;
+}
+
+static int adp8870_clr_bits(struct i2c_client *client, int reg, uint8_t bit_mask)
+{
+       struct adp8870_bl *data = i2c_get_clientdata(client);
+       uint8_t reg_val;
+       int ret;
+
+       mutex_lock(&data->lock);
+
+       ret = adp8870_read(client, reg, &reg_val);
+
+       if (!ret && (reg_val & bit_mask)) {
+               reg_val &= ~bit_mask;
+               ret = adp8870_write(client, reg, reg_val);
+       }
+
+       mutex_unlock(&data->lock);
+       return ret;
+}
+
+/*
+ * Independent sink / LED
+ */
+#if defined(ADP8870_USE_LEDS)
+static void adp8870_led_work(struct work_struct *work)
+{
+       struct adp8870_led *led = container_of(work, struct adp8870_led, work);
+       adp8870_write(led->client, ADP8870_ISC1 + led->id - 1,
+                        led->new_brightness >> 1);
+}
+
+static void adp8870_led_set(struct led_classdev *led_cdev,
+                          enum led_brightness value)
+{
+       struct adp8870_led *led;
+
+       led = container_of(led_cdev, struct adp8870_led, cdev);
+       led->new_brightness = value;
+       /*
+        * Use workqueue for IO since I2C operations can sleep.
+        */
+       schedule_work(&led->work);
+}
+
+static int adp8870_led_setup(struct adp8870_led *led)
+{
+       struct i2c_client *client = led->client;
+       int ret = 0;
+
+       ret = adp8870_write(client, ADP8870_ISC1 + led->id - 1, 0);
+       if (ret)
+               return ret;
+
+       ret = adp8870_set_bits(client, ADP8870_ISCC, 1 << (led->id - 1));
+       if (ret)
+               return ret;
+
+       if (led->id > 4)
+               ret = adp8870_set_bits(client, ADP8870_ISCT1,
+                               (led->flags & 0x3) << ((led->id - 5) * 2));
+       else
+               ret = adp8870_set_bits(client, ADP8870_ISCT2,
+                               (led->flags & 0x3) << ((led->id - 1) * 2));
+
+       return ret;
+}
+
+static int __devinit adp8870_led_probe(struct i2c_client *client)
+{
+       struct adp8870_backlight_platform_data *pdata =
+               client->dev.platform_data;
+       struct adp8870_bl *data = i2c_get_clientdata(client);
+       struct adp8870_led *led, *led_dat;
+       struct led_info *cur_led;
+       int ret, i;
+
+
+       led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
+       if (led == NULL) {
+               dev_err(&client->dev, "failed to alloc memory\n");
+               return -ENOMEM;
+       }
+
+       ret = adp8870_write(client, ADP8870_ISCLAW, pdata->led_fade_law);
+       if (ret)
+               goto err_free;
+
+       ret = adp8870_write(client, ADP8870_ISCT1,
+                       (pdata->led_on_time & 0x3) << 6);
+       if (ret)
+               goto err_free;
+
+       ret = adp8870_write(client, ADP8870_ISCF,
+                       FADE_VAL(pdata->led_fade_in, pdata->led_fade_out));
+       if (ret)
+               goto err_free;
+
+       for (i = 0; i < pdata->num_leds; ++i) {
+               cur_led = &pdata->leds[i];
+               led_dat = &led[i];
+
+               led_dat->id = cur_led->flags & ADP8870_FLAG_LED_MASK;
+
+               if (led_dat->id > 7 || led_dat->id < 1) {
+                       dev_err(&client->dev, "Invalid LED ID %d\n",
+                               led_dat->id);
+                       goto err;
+               }
+
+               if (pdata->bl_led_assign & (1 << (led_dat->id - 1))) {
+                       dev_err(&client->dev, "LED %d used by Backlight\n",
+                               led_dat->id);
+                       goto err;
+               }
+
+               led_dat->cdev.name = cur_led->name;
+               led_dat->cdev.default_trigger = cur_led->default_trigger;
+               led_dat->cdev.brightness_set = adp8870_led_set;
+               led_dat->cdev.brightness = LED_OFF;
+               led_dat->flags = cur_led->flags >> FLAG_OFFT_SHIFT;
+               led_dat->client = client;
+               led_dat->new_brightness = LED_OFF;
+               INIT_WORK(&led_dat->work, adp8870_led_work);
+
+               ret = led_classdev_register(&client->dev, &led_dat->cdev);
+               if (ret) {
+                       dev_err(&client->dev, "failed to register LED %d\n",
+                               led_dat->id);
+                       goto err;
+               }
+
+               ret = adp8870_led_setup(led_dat);
+               if (ret) {
+                       dev_err(&client->dev, "failed to write\n");
+                       i++;
+                       goto err;
+               }
+       }
+
+       data->led = led;
+
+       return 0;
+
+ err:
+       for (i = i - 1; i >= 0; --i) {
+               led_classdev_unregister(&led[i].cdev);
+               cancel_work_sync(&led[i].work);
+       }
+
+ err_free:
+       kfree(led);
+
+       return ret;
+}
+
+static int __devexit adp8870_led_remove(struct i2c_client *client)
+{
+       struct adp8870_backlight_platform_data *pdata =
+               client->dev.platform_data;
+       struct adp8870_bl *data = i2c_get_clientdata(client);
+       int i;
+
+       for (i = 0; i < pdata->num_leds; i++) {
+               led_classdev_unregister(&data->led[i].cdev);
+               cancel_work_sync(&data->led[i].work);
+       }
+
+       kfree(data->led);
+       return 0;
+}
+#else
+static int __devinit adp8870_led_probe(struct i2c_client *client)
+{
+       return 0;
+}
+
+static int __devexit adp8870_led_remove(struct i2c_client *client)
+{
+       return 0;
+}
+#endif
+
+static int adp8870_bl_set(struct backlight_device *bl, int brightness)
+{
+       struct adp8870_bl *data = bl_get_data(bl);
+       struct i2c_client *client = data->client;
+       int ret = 0;
+
+       if (data->pdata->en_ambl_sens) {
+               if ((brightness > 0) && (brightness < ADP8870_MAX_BRIGHTNESS)) {
+                       /* Disable Ambient Light auto adjust */
+                       ret = adp8870_clr_bits(client, ADP8870_MDCR,
+                                       CMP_AUTOEN);
+                       if (ret)
+                               return ret;
+                       ret = adp8870_write(client, ADP8870_BLMX1, brightness);
+                       if (ret)
+                               return ret;
+               } else {
+                       /*
+                        * MAX_BRIGHTNESS -> Enable Ambient Light auto adjust
+                        * restore daylight l1 sysfs brightness
+                        */
+                       ret = adp8870_write(client, ADP8870_BLMX1,
+                                        data->cached_daylight_max);
+                       if (ret)
+                               return ret;
+
+                       ret = adp8870_set_bits(client, ADP8870_MDCR,
+                                        CMP_AUTOEN);
+                       if (ret)
+                               return ret;
+               }
+       } else {
+               ret = adp8870_write(client, ADP8870_BLMX1, brightness);
+               if (ret)
+                       return ret;
+       }
+
+       if (data->current_brightness && brightness == 0)
+               ret = adp8870_set_bits(client,
+                               ADP8870_MDCR, DIM_EN);
+       else if (data->current_brightness == 0 && brightness)
+               ret = adp8870_clr_bits(client,
+                               ADP8870_MDCR, DIM_EN);
+
+       if (!ret)
+               data->current_brightness = brightness;
+
+       return ret;
+}
+
+static int adp8870_bl_update_status(struct backlight_device *bl)
+{
+       int brightness = bl->props.brightness;
+       if (bl->props.power != FB_BLANK_UNBLANK)
+               brightness = 0;
+
+       if (bl->props.fb_blank != FB_BLANK_UNBLANK)
+               brightness = 0;
+
+       return adp8870_bl_set(bl, brightness);
+}
+
+static int adp8870_bl_get_brightness(struct backlight_device *bl)
+{
+       struct adp8870_bl *data = bl_get_data(bl);
+
+       return data->current_brightness;
+}
+
+static const struct backlight_ops adp8870_bl_ops = {
+       .update_status  = adp8870_bl_update_status,
+       .get_brightness = adp8870_bl_get_brightness,
+};
+
+static int adp8870_bl_setup(struct backlight_device *bl)
+{
+       struct adp8870_bl *data = bl_get_data(bl);
+       struct i2c_client *client = data->client;
+       struct adp8870_backlight_platform_data *pdata = data->pdata;
+       int ret = 0;
+
+       ret = adp8870_write(client, ADP8870_BLSEL, ~pdata->bl_led_assign);
+       if (ret)
+               return ret;
+
+       ret = adp8870_write(client, ADP8870_PWMLED, pdata->pwm_assign);
+       if (ret)
+               return ret;
+
+       ret = adp8870_write(client, ADP8870_BLMX1, pdata->l1_daylight_max);
+       if (ret)
+               return ret;
+
+       ret = adp8870_write(client, ADP8870_BLDM1, pdata->l1_daylight_dim);
+       if (ret)
+               return ret;
+
+       if (pdata->en_ambl_sens) {
+               data->cached_daylight_max = pdata->l1_daylight_max;
+               ret = adp8870_write(client, ADP8870_BLMX2,
+                                               pdata->l2_bright_max);
+               if (ret)
+                       return ret;
+               ret = adp8870_write(client, ADP8870_BLDM2,
+                                               pdata->l2_bright_dim);
+               if (ret)
+                       return ret;
+
+               ret = adp8870_write(client, ADP8870_BLMX3,
+                                               pdata->l3_office_max);
+               if (ret)
+                       return ret;
+               ret = adp8870_write(client, ADP8870_BLDM3,
+                                               pdata->l3_office_dim);
+               if (ret)
+                       return ret;
+
+               ret = adp8870_write(client, ADP8870_BLMX4,
+                                               pdata->l4_indoor_max);
+               if (ret)
+                       return ret;
+
+               ret = adp8870_write(client, ADP8870_BLDM4,
+                                               pdata->l4_indor_dim);
+               if (ret)
+                       return ret;
+
+               ret = adp8870_write(client, ADP8870_BLMX5,
+                                               pdata->l5_dark_max);
+               if (ret)
+                       return ret;
+
+               ret = adp8870_write(client, ADP8870_BLDM5,
+                                               pdata->l5_dark_dim);
+               if (ret)
+                       return ret;
+
+               ret = adp8870_write(client, ADP8870_L2TRP, pdata->l2_trip);
+               if (ret)
+                       return ret;
+
+               ret = adp8870_write(client, ADP8870_L2HYS, pdata->l2_hyst);
+               if (ret)
+                       return ret;
+
+               ret = adp8870_write(client, ADP8870_L3TRP, pdata->l3_trip);
+               if (ret)
+                       return ret;
+
+               ret = adp8870_write(client, ADP8870_L3HYS, pdata->l3_hyst);
+               if (ret)
+                       return ret;
+
+               ret = adp8870_write(client, ADP8870_L4TRP, pdata->l4_trip);
+               if (ret)
+                       return ret;
+
+               ret = adp8870_write(client, ADP8870_L4HYS, pdata->l4_hyst);
+               if (ret)
+                       return ret;
+
+               ret = adp8870_write(client, ADP8870_L5TRP, pdata->l5_trip);
+               if (ret)
+                       return ret;
+
+               ret = adp8870_write(client, ADP8870_L5HYS, pdata->l5_hyst);
+               if (ret)
+                       return ret;
+
+               ret = adp8870_write(client, ADP8870_ALS1_EN, L5_EN | L4_EN |
+                                               L3_EN | L2_EN);
+               if (ret)
+                       return ret;
+
+               ret = adp8870_write(client, ADP8870_CMP_CTL,
+                       ALS_CMPR_CFG_VAL(pdata->abml_filt));
+               if (ret)
+                       return ret;
+       }
+
+       ret = adp8870_write(client, ADP8870_CFGR,
+                       BL_CFGR_VAL(pdata->bl_fade_law, 0));
+       if (ret)
+               return ret;
+
+       ret = adp8870_write(client, ADP8870_BLFR, FADE_VAL(pdata->bl_fade_in,
+                       pdata->bl_fade_out));
+       if (ret)
+               return ret;
+       /*
+        * ADP8870 Rev0 requires GDWN_DIS bit set
+        */
+
+       ret = adp8870_set_bits(client, ADP8870_MDCR, BLEN | DIM_EN | NSTBY |
+                       (data->revid == 0 ? GDWN_DIS : 0));
+
+       return ret;
+}
+
+static ssize_t adp8870_show(struct device *dev, char *buf, int reg)
+{
+       struct adp8870_bl *data = dev_get_drvdata(dev);
+       int error;
+       uint8_t reg_val;
+
+       mutex_lock(&data->lock);
+       error = adp8870_read(data->client, reg, &reg_val);
+       mutex_unlock(&data->lock);
+
+       if (error < 0)
+               return error;
+
+       return sprintf(buf, "%u\n", reg_val);
+}
+
+static ssize_t adp8870_store(struct device *dev, const char *buf,
+                        size_t count, int reg)
+{
+       struct adp8870_bl *data = dev_get_drvdata(dev);
+       unsigned long val;
+       int ret;
+
+       ret = strict_strtoul(buf, 10, &val);
+       if (ret)
+               return ret;
+
+       mutex_lock(&data->lock);
+       adp8870_write(data->client, reg, val);
+       mutex_unlock(&data->lock);
+
+       return count;
+}
+
+static ssize_t adp8870_bl_l5_dark_max_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       return adp8870_show(dev, buf, ADP8870_BLMX5);
+}
+
+static ssize_t adp8870_bl_l5_dark_max_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       return adp8870_store(dev, buf, count, ADP8870_BLMX5);
+}
+static DEVICE_ATTR(l5_dark_max, 0664, adp8870_bl_l5_dark_max_show,
+                       adp8870_bl_l5_dark_max_store);
+
+
+static ssize_t adp8870_bl_l4_indoor_max_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       return adp8870_show(dev, buf, ADP8870_BLMX4);
+}
+
+static ssize_t adp8870_bl_l4_indoor_max_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       return adp8870_store(dev, buf, count, ADP8870_BLMX4);
+}
+static DEVICE_ATTR(l4_indoor_max, 0664, adp8870_bl_l4_indoor_max_show,
+                       adp8870_bl_l4_indoor_max_store);
+
+
+static ssize_t adp8870_bl_l3_office_max_show(struct device *dev,
+                                    struct device_attribute *attr, char *buf)
+{
+       return adp8870_show(dev, buf, ADP8870_BLMX3);
+}
+
+static ssize_t adp8870_bl_l3_office_max_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       return adp8870_store(dev, buf, count, ADP8870_BLMX3);
+}
+
+static DEVICE_ATTR(l3_office_max, 0664, adp8870_bl_l3_office_max_show,
+                       adp8870_bl_l3_office_max_store);
+
+static ssize_t adp8870_bl_l2_bright_max_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       return adp8870_show(dev, buf, ADP8870_BLMX2);
+}
+
+static ssize_t adp8870_bl_l2_bright_max_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       return adp8870_store(dev, buf, count, ADP8870_BLMX2);
+}
+static DEVICE_ATTR(l2_bright_max, 0664, adp8870_bl_l2_bright_max_show,
+                       adp8870_bl_l2_bright_max_store);
+
+static ssize_t adp8870_bl_l1_daylight_max_show(struct device *dev,
+                       struct device_attribute *attr, char *buf)
+{
+       return adp8870_show(dev, buf, ADP8870_BLMX1);
+}
+
+static ssize_t adp8870_bl_l1_daylight_max_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct adp8870_bl *data = dev_get_drvdata(dev);
+       int ret = strict_strtoul(buf, 10, &data->cached_daylight_max);
+       if (ret)
+               return ret;
+
+       return adp8870_store(dev, buf, count, ADP8870_BLMX1);
+}
+static DEVICE_ATTR(l1_daylight_max, 0664, adp8870_bl_l1_daylight_max_show,
+                       adp8870_bl_l1_daylight_max_store);
+
+static ssize_t adp8870_bl_l5_dark_dim_show(struct device *dev,
+                       struct device_attribute *attr, char *buf)
+{
+       return adp8870_show(dev, buf, ADP8870_BLDM5);
+}
+
+static ssize_t adp8870_bl_l5_dark_dim_store(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf, size_t count)
+{
+       return adp8870_store(dev, buf, count, ADP8870_BLDM5);
+}
+static DEVICE_ATTR(l5_dark_dim, 0664, adp8870_bl_l5_dark_dim_show,
+                       adp8870_bl_l5_dark_dim_store);
+
+static ssize_t adp8870_bl_l4_indoor_dim_show(struct device *dev,
+                       struct device_attribute *attr, char *buf)
+{
+       return adp8870_show(dev, buf, ADP8870_BLDM4);
+}
+
+static ssize_t adp8870_bl_l4_indoor_dim_store(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf, size_t count)
+{
+       return adp8870_store(dev, buf, count, ADP8870_BLDM4);
+}
+static DEVICE_ATTR(l4_indoor_dim, 0664, adp8870_bl_l4_indoor_dim_show,
+                       adp8870_bl_l4_indoor_dim_store);
+
+
+static ssize_t adp8870_bl_l3_office_dim_show(struct device *dev,
+                       struct device_attribute *attr, char *buf)
+{
+       return adp8870_show(dev, buf, ADP8870_BLDM3);
+}
+
+static ssize_t adp8870_bl_l3_office_dim_store(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf, size_t count)
+{
+       return adp8870_store(dev, buf, count, ADP8870_BLDM3);
+}
+static DEVICE_ATTR(l3_office_dim, 0664, adp8870_bl_l3_office_dim_show,
+                       adp8870_bl_l3_office_dim_store);
+
+static ssize_t adp8870_bl_l2_bright_dim_show(struct device *dev,
+                       struct device_attribute *attr, char *buf)
+{
+       return adp8870_show(dev, buf, ADP8870_BLDM2);
+}
+
+static ssize_t adp8870_bl_l2_bright_dim_store(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf, size_t count)
+{
+       return adp8870_store(dev, buf, count, ADP8870_BLDM2);
+}
+static DEVICE_ATTR(l2_bright_dim, 0664, adp8870_bl_l2_bright_dim_show,
+                       adp8870_bl_l2_bright_dim_store);
+
+static ssize_t adp8870_bl_l1_daylight_dim_show(struct device *dev,
+                                    struct device_attribute *attr, char *buf)
+{
+       return adp8870_show(dev, buf, ADP8870_BLDM1);
+}
+
+static ssize_t adp8870_bl_l1_daylight_dim_store(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf, size_t count)
+{
+       return adp8870_store(dev, buf, count, ADP8870_BLDM1);
+}
+static DEVICE_ATTR(l1_daylight_dim, 0664, adp8870_bl_l1_daylight_dim_show,
+                       adp8870_bl_l1_daylight_dim_store);
+
+#ifdef ADP8870_EXT_FEATURES
+static ssize_t adp8870_bl_ambient_light_level_show(struct device *dev,
+                                    struct device_attribute *attr, char *buf)
+{
+       struct adp8870_bl *data = dev_get_drvdata(dev);
+       int error;
+       uint8_t reg_val;
+       uint16_t ret_val;
+
+       mutex_lock(&data->lock);
+       error = adp8870_read(data->client, ADP8870_PH1LEVL, &reg_val);
+       if (error < 0) {
+               mutex_unlock(&data->lock);
+               return error;
+       }
+       ret_val = reg_val;
+       error = adp8870_read(data->client, ADP8870_PH1LEVH, &reg_val);
+       mutex_unlock(&data->lock);
+
+       if (error < 0)
+               return error;
+
+       /* Return 13-bit conversion value for the first light sensor */
+       ret_val += (reg_val & 0x1F) << 8;
+
+       return sprintf(buf, "%u\n", ret_val);
+}
+static DEVICE_ATTR(ambient_light_level, 0444,
+               adp8870_bl_ambient_light_level_show, NULL);
+
+static ssize_t adp8870_bl_ambient_light_zone_show(struct device *dev,
+                                    struct device_attribute *attr, char *buf)
+{
+       struct adp8870_bl *data = dev_get_drvdata(dev);
+       int error;
+       uint8_t reg_val;
+
+       mutex_lock(&data->lock);
+       error = adp8870_read(data->client, ADP8870_CFGR, &reg_val);
+       mutex_unlock(&data->lock);
+
+       if (error < 0)
+               return error;
+
+       return sprintf(buf, "%u\n",
+               ((reg_val >> CFGR_BLV_SHIFT) & CFGR_BLV_MASK) + 1);
+}
+
+static ssize_t adp8870_bl_ambient_light_zone_store(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf, size_t count)
+{
+       struct adp8870_bl *data = dev_get_drvdata(dev);
+       unsigned long val;
+       uint8_t reg_val;
+       int ret;
+
+       ret = strict_strtoul(buf, 10, &val);
+       if (ret)
+               return ret;
+
+       if (val == 0) {
+               /* Enable automatic ambient light sensing */
+               adp8870_set_bits(data->client, ADP8870_MDCR, CMP_AUTOEN);
+       } else if ((val > 0) && (val < 6)) {
+               /* Disable automatic ambient light sensing */
+               adp8870_clr_bits(data->client, ADP8870_MDCR, CMP_AUTOEN);
+
+               /* Set user supplied ambient light zone */
+               mutex_lock(&data->lock);
+               adp8870_read(data->client, ADP8870_CFGR, &reg_val);
+               reg_val &= ~(CFGR_BLV_MASK << CFGR_BLV_SHIFT);
+               reg_val |= (val - 1) << CFGR_BLV_SHIFT;
+               adp8870_write(data->client, ADP8870_CFGR, reg_val);
+               mutex_unlock(&data->lock);
+       }
+
+       return count;
+}
+static DEVICE_ATTR(ambient_light_zone, 0664,
+               adp8870_bl_ambient_light_zone_show,
+               adp8870_bl_ambient_light_zone_store);
+#endif
+
+static struct attribute *adp8870_bl_attributes[] = {
+       &dev_attr_l5_dark_max.attr,
+       &dev_attr_l5_dark_dim.attr,
+       &dev_attr_l4_indoor_max.attr,
+       &dev_attr_l4_indoor_dim.attr,
+       &dev_attr_l3_office_max.attr,
+       &dev_attr_l3_office_dim.attr,
+       &dev_attr_l2_bright_max.attr,
+       &dev_attr_l2_bright_dim.attr,
+       &dev_attr_l1_daylight_max.attr,
+       &dev_attr_l1_daylight_dim.attr,
+#ifdef ADP8870_EXT_FEATURES
+       &dev_attr_ambient_light_level.attr,
+       &dev_attr_ambient_light_zone.attr,
+#endif
+       NULL
+};
+
+static const struct attribute_group adp8870_bl_attr_group = {
+       .attrs = adp8870_bl_attributes,
+};
+
+static int __devinit adp8870_probe(struct i2c_client *client,
+                                       const struct i2c_device_id *id)
+{
+       struct backlight_properties props;
+       struct backlight_device *bl;
+       struct adp8870_bl *data;
+       struct adp8870_backlight_platform_data *pdata =
+               client->dev.platform_data;
+       uint8_t reg_val;
+       int ret;
+
+       if (!i2c_check_functionality(client->adapter,
+                                       I2C_FUNC_SMBUS_BYTE_DATA)) {
+               dev_err(&client->dev, "SMBUS Byte Data not Supported\n");
+               return -EIO;
+       }
+
+       if (!pdata) {
+               dev_err(&client->dev, "no platform data?\n");
+               return -EINVAL;
+       }
+
+       ret = adp8870_read(client, ADP8870_MFDVID, &reg_val);
+       if (ret < 0)
+               return -EIO;
+
+       if (ADP8870_MANID(reg_val) != ADP8870_MANUFID) {
+               dev_err(&client->dev, "failed to probe\n");
+               return -ENODEV;
+       }
+
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (data == NULL)
+               return -ENOMEM;
+
+       data->revid = ADP8870_DEVID(reg_val);
+       data->client = client;
+       data->pdata = pdata;
+       data->id = id->driver_data;
+       data->current_brightness = 0;
+       i2c_set_clientdata(client, data);
+
+       mutex_init(&data->lock);
+
+       memset(&props, 0, sizeof(props));
+       props.type = BACKLIGHT_RAW;
+       props.max_brightness = props.brightness = ADP8870_MAX_BRIGHTNESS;
+       bl = backlight_device_register(dev_driver_string(&client->dev),
+                       &client->dev, data, &adp8870_bl_ops, &props);
+       if (IS_ERR(bl)) {
+               dev_err(&client->dev, "failed to register backlight\n");
+               ret = PTR_ERR(bl);
+               goto out2;
+       }
+
+       data->bl = bl;
+
+       if (pdata->en_ambl_sens)
+               ret = sysfs_create_group(&bl->dev.kobj,
+                       &adp8870_bl_attr_group);
+
+       if (ret) {
+               dev_err(&client->dev, "failed to register sysfs\n");
+               goto out1;
+       }
+
+       ret = adp8870_bl_setup(bl);
+       if (ret) {
+               ret = -EIO;
+               goto out;
+       }
+
+       backlight_update_status(bl);
+
+       dev_info(&client->dev, "Rev.%d Backlight\n", data->revid);
+
+       if (pdata->num_leds)
+               adp8870_led_probe(client);
+
+       return 0;
+
+out:
+       if (data->pdata->en_ambl_sens)
+               sysfs_remove_group(&data->bl->dev.kobj,
+                       &adp8870_bl_attr_group);
+out1:
+       backlight_device_unregister(bl);
+out2:
+       i2c_set_clientdata(client, NULL);
+       kfree(data);
+
+       return ret;
+}
+
+static int __devexit adp8870_remove(struct i2c_client *client)
+{
+       struct adp8870_bl *data = i2c_get_clientdata(client);
+
+       adp8870_clr_bits(client, ADP8870_MDCR, NSTBY);
+
+       if (data->led)
+               adp8870_led_remove(client);
+
+       if (data->pdata->en_ambl_sens)
+               sysfs_remove_group(&data->bl->dev.kobj,
+                       &adp8870_bl_attr_group);
+
+       backlight_device_unregister(data->bl);
+       i2c_set_clientdata(client, NULL);
+       kfree(data);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int adp8870_i2c_suspend(struct i2c_client *client, pm_message_t message)
+{
+       adp8870_clr_bits(client, ADP8870_MDCR, NSTBY);
+
+       return 0;
+}
+
+static int adp8870_i2c_resume(struct i2c_client *client)
+{
+       adp8870_set_bits(client, ADP8870_MDCR, NSTBY);
+
+       return 0;
+}
+#else
+#define adp8870_i2c_suspend NULL
+#define adp8870_i2c_resume NULL
+#endif
+
+static const struct i2c_device_id adp8870_id[] = {
+       { "adp8870", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, adp8870_id);
+
+static struct i2c_driver adp8870_driver = {
+       .driver = {
+               .name = KBUILD_MODNAME,
+       },
+       .probe    = adp8870_probe,
+       .remove   = __devexit_p(adp8870_remove),
+       .suspend = adp8870_i2c_suspend,
+       .resume  = adp8870_i2c_resume,
+       .id_table = adp8870_id,
+};
+
+static int __init adp8870_init(void)
+{
+       return i2c_add_driver(&adp8870_driver);
+}
+module_init(adp8870_init);
+
+static void __exit adp8870_exit(void)
+{
+       i2c_del_driver(&adp8870_driver);
+}
+module_exit(adp8870_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("ADP8870 Backlight driver");
+MODULE_ALIAS("platform:adp8870-backlight");
index 47c21fb2c82fc50b2ac1329c30c1055b96874391..bea53c1a4950143d149a6694334dc8606f37fdd0 100644 (file)
@@ -789,6 +789,7 @@ static int __devinit bfin_lq035_probe(struct platform_device *pdev)
        i2c_add_driver(&ad5280_driver);
 
        memset(&props, 0, sizeof(props));
+       props.type = BACKLIGHT_RAW;
        props.max_brightness = MAX_BRIGHENESS;
        bl_dev = backlight_device_register("bf537-bl", NULL, NULL,
                                           &bfin_lq035fb_bl_ops, &props);
index ebda6876d3a9efe26c2883ac7e73f8fad020c4fc..377dde3d5bfc8954aaccfc643b9664408e0c9da1 100644 (file)
@@ -1101,12 +1101,10 @@ static int __devinit broadsheetfb_probe(struct platform_device *dev)
 
        videomemorysize = roundup((dpyw*dpyh), PAGE_SIZE);
 
-       videomemory = vmalloc(videomemorysize);
+       videomemory = vzalloc(videomemorysize);
        if (!videomemory)
                goto err_fb_rel;
 
-       memset(videomemory, 0, videomemorysize);
-
        info->screen_base = (char *)videomemory;
        info->fbops = &broadsheetfb_ops;
 
index fb205843c2c726da59b7f5f20c0c338ddba005d6..784139aed0793cab282cf52f3f3c3503a85a9a5a 100644 (file)
@@ -16,6 +16,8 @@
 #include <linux/pci.h>
 #include <video/vga.h>
 
+static bool request_mem_succeeded = false;
+
 static struct fb_var_screeninfo efifb_defined __devinitdata = {
        .activate               = FB_ACTIVATE_NOW,
        .height                 = -1,
@@ -281,7 +283,9 @@ static void efifb_destroy(struct fb_info *info)
 {
        if (info->screen_base)
                iounmap(info->screen_base);
-       release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size);
+       if (request_mem_succeeded)
+               release_mem_region(info->apertures->ranges[0].base,
+                                  info->apertures->ranges[0].size);
        framebuffer_release(info);
 }
 
@@ -326,14 +330,13 @@ static int __init efifb_setup(char *options)
        return 0;
 }
 
-static int __devinit efifb_probe(struct platform_device *dev)
+static int __init efifb_probe(struct platform_device *dev)
 {
        struct fb_info *info;
        int err;
        unsigned int size_vmode;
        unsigned int size_remap;
        unsigned int size_total;
-       int request_succeeded = 0;
 
        if (!screen_info.lfb_depth)
                screen_info.lfb_depth = 32;
@@ -387,7 +390,7 @@ static int __devinit efifb_probe(struct platform_device *dev)
        efifb_fix.smem_len = size_remap;
 
        if (request_mem_region(efifb_fix.smem_start, size_remap, "efifb")) {
-               request_succeeded = 1;
+               request_mem_succeeded = true;
        } else {
                /* We cannot make this fatal. Sometimes this comes from magic
                   spaces our resource handlers simply don't know about */
@@ -413,7 +416,7 @@ static int __devinit efifb_probe(struct platform_device *dev)
        info->apertures->ranges[0].base = efifb_fix.smem_start;
        info->apertures->ranges[0].size = size_remap;
 
-       info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
+       info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len);
        if (!info->screen_base) {
                printk(KERN_ERR "efifb: abort, cannot ioremap video memory "
                                "0x%x @ 0x%lx\n",
@@ -491,13 +494,12 @@ err_unmap:
 err_release_fb:
        framebuffer_release(info);
 err_release_mem:
-       if (request_succeeded)
+       if (request_mem_succeeded)
                release_mem_region(efifb_fix.smem_start, size_total);
        return err;
 }
 
 static struct platform_driver efifb_driver = {
-       .probe  = efifb_probe,
        .driver = {
                .name   = "efifb",
        },
@@ -528,13 +530,21 @@ static int __init efifb_init(void)
        if (!screen_info.lfb_linelength)
                return -ENODEV;
 
-       ret = platform_driver_register(&efifb_driver);
+       ret = platform_device_register(&efifb_device);
+       if (ret)
+               return ret;
 
-       if (!ret) {
-               ret = platform_device_register(&efifb_device);
-               if (ret)
-                       platform_driver_unregister(&efifb_driver);
+       /*
+        * This is not just an optimization.  We will interfere
+        * with a real driver if we get reprobed, so don't allow
+        * it.
+        */
+       ret = platform_driver_probe(&efifb_driver, efifb_probe);
+       if (ret) {
+               platform_device_unregister(&efifb_device);
+               return ret;
        }
+
        return ret;
 }
 module_init(efifb_init);
index 1b94643ecbcfff4dd63eca7b52953ba2f702f21e..fbef15f7a21803051e778b89436cd08c2f0f0561 100644 (file)
@@ -231,11 +231,10 @@ static int __devinit hecubafb_probe(struct platform_device *dev)
 
        videomemorysize = (DPY_W*DPY_H)/8;
 
-       if (!(videomemory = vmalloc(videomemorysize)))
+       videomemory = vzalloc(videomemorysize);
+       if (!videomemory)
                return retval;
 
-       memset(videomemory, 0, videomemorysize);
-
        info = framebuffer_alloc(sizeof(struct hecubafb_par), &dev->dev);
        if (!info)
                goto err_fballoc;
index d2ccfd6e662cbe1fda2da5d766750e21793ebc4c..f135dbead07d8921068de3513dafb2c32ea10973 100644 (file)
@@ -856,10 +856,10 @@ failed_platform_init:
                dma_free_writecombine(&pdev->dev,fbi->map_size,fbi->map_cpu,
                        fbi->map_dma);
 failed_map:
-       clk_put(fbi->clk);
-failed_getclock:
        iounmap(fbi->regs);
 failed_ioremap:
+       clk_put(fbi->clk);
+failed_getclock:
        release_mem_region(res->start, resource_size(res));
 failed_req:
        kfree(info->pseudo_palette);
index ed64edfd2c43b7a97e693ff30b6e26972caaf7f7..97d45e5115e254647454d7c5b7598053944c7273 100644 (file)
@@ -628,12 +628,10 @@ static int __devinit metronomefb_probe(struct platform_device *dev)
        /* we need to add a spare page because our csum caching scheme walks
         * to the end of the page */
        videomemorysize = PAGE_SIZE + (fw * fh);
-       videomemory = vmalloc(videomemorysize);
+       videomemory = vzalloc(videomemorysize);
        if (!videomemory)
                goto err_fb_rel;
 
-       memset(videomemory, 0, videomemorysize);
-
        info->screen_base = (char __force __iomem *)videomemory;
        info->fbops = &metronomefb_ops;
 
index 48c3ea8652b66e0b7862a392c5d4a04b4df95f32..cb175fe7abc0829f65d7aeb3c54082bb80a8999a 100644 (file)
@@ -1128,3 +1128,4 @@ EXPORT_SYMBOL(fb_find_best_mode);
 EXPORT_SYMBOL(fb_find_nearest_mode);
 EXPORT_SYMBOL(fb_videomode_to_modelist);
 EXPORT_SYMBOL(fb_find_mode);
+EXPORT_SYMBOL(fb_find_mode_cvt);
index 35f61dd0cb3a43f5a9e3f70ac0dc5a07136a91b1..bb95ec56d25d9943aef4150595c0f190c9bb0cbc 100644 (file)
@@ -623,19 +623,21 @@ static int __devinit pxa168fb_probe(struct platform_device *pdev)
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (res == NULL) {
                dev_err(&pdev->dev, "no IO memory defined\n");
-               return -ENOENT;
+               ret = -ENOENT;
+               goto failed_put_clk;
        }
 
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
                dev_err(&pdev->dev, "no IRQ defined\n");
-               return -ENOENT;
+               ret = -ENOENT;
+               goto failed_put_clk;
        }
 
        info = framebuffer_alloc(sizeof(struct pxa168fb_info), &pdev->dev);
        if (info == NULL) {
-               clk_put(clk);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto failed_put_clk;
        }
 
        /* Initialize private data */
@@ -671,7 +673,7 @@ static int __devinit pxa168fb_probe(struct platform_device *pdev)
        fbi->reg_base = ioremap_nocache(res->start, resource_size(res));
        if (fbi->reg_base == NULL) {
                ret = -ENOMEM;
-               goto failed;
+               goto failed_free_info;
        }
 
        /*
@@ -683,7 +685,7 @@ static int __devinit pxa168fb_probe(struct platform_device *pdev)
                                                &fbi->fb_start_dma, GFP_KERNEL);
        if (info->screen_base == NULL) {
                ret = -ENOMEM;
-               goto failed;
+               goto failed_free_info;
        }
 
        info->fix.smem_start = (unsigned long)fbi->fb_start_dma;
@@ -772,8 +774,9 @@ failed_free_clk:
 failed_free_fbmem:
        dma_free_coherent(fbi->dev, info->fix.smem_len,
                        info->screen_base, fbi->fb_start_dma);
-failed:
+failed_free_info:
        kfree(info);
+failed_put_clk:
        clk_put(clk);
 
        dev_err(&pdev->dev, "frame buffer device init failed with %d\n", ret);
index 0352afa49a392716762f7304cbdd00ae1770b396..4aecf213c9be46cb1f37ee38d231aabcf9d089d3 100644 (file)
@@ -235,13 +235,12 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var,
                            struct fb_info *info)
 {
        struct s3c_fb_win *win = info->par;
-       struct s3c_fb_pd_win *windata = win->windata;
        struct s3c_fb *sfb = win->parent;
 
        dev_dbg(sfb->dev, "checking parameters\n");
 
-       var->xres_virtual = max((unsigned int)windata->virtual_x, var->xres);
-       var->yres_virtual = max((unsigned int)windata->virtual_y, var->yres);
+       var->xres_virtual = max(var->xres_virtual, var->xres);
+       var->yres_virtual = max(var->yres_virtual, var->yres);
 
        if (!s3c_fb_validate_win_bpp(win, var->bits_per_pixel)) {
                dev_dbg(sfb->dev, "win %d: unsupported bpp %d\n",
@@ -558,6 +557,13 @@ static int s3c_fb_set_par(struct fb_info *info)
        vidosd_set_alpha(win, alpha);
        vidosd_set_size(win, data);
 
+       /* Enable DMA channel for this window */
+       if (sfb->variant.has_shadowcon) {
+               data = readl(sfb->regs + SHADOWCON);
+               data |= SHADOWCON_CHx_ENABLE(win_no);
+               writel(data, sfb->regs + SHADOWCON);
+       }
+
        data = WINCONx_ENWIN;
 
        /* note, since we have to round up the bits-per-pixel, we end up
@@ -637,13 +643,6 @@ static int s3c_fb_set_par(struct fb_info *info)
        writel(data, regs + sfb->variant.wincon + (win_no * 4));
        writel(0x0, regs + sfb->variant.winmap + (win_no * 4));
 
-       /* Enable DMA channel for this window */
-       if (sfb->variant.has_shadowcon) {
-               data = readl(sfb->regs + SHADOWCON);
-               data |= SHADOWCON_CHx_ENABLE(win_no);
-               writel(data, sfb->regs + SHADOWCON);
-       }
-
        shadow_protect_win(win, 0);
 
        return 0;
@@ -1487,11 +1486,10 @@ static int __devexit s3c_fb_remove(struct platform_device *pdev)
 
        release_mem_region(sfb->regs_res->start, resource_size(sfb->regs_res));
 
-       kfree(sfb);
-
        pm_runtime_put_sync(sfb->dev);
        pm_runtime_disable(sfb->dev);
 
+       kfree(sfb);
        return 0;
 }
 
index 3b7f2f5bae71dc78b98bf8e4d7befd1b75c05bba..4de541ca9c52c14d38cb95762ba86034e71d59fd 100644 (file)
@@ -2237,6 +2237,22 @@ static int __devinit savagefb_probe(struct pci_dev* dev,
                                 &info->modelist);
 #endif
        info->var = savagefb_var800x600x8;
+       /* if a panel was detected, default to a CVT mode instead */
+       if (par->SavagePanelWidth) {
+               struct fb_videomode cvt_mode;
+
+               memset(&cvt_mode, 0, sizeof(cvt_mode));
+               cvt_mode.xres = par->SavagePanelWidth;
+               cvt_mode.yres = par->SavagePanelHeight;
+               cvt_mode.refresh = 60;
+               /* FIXME: if we know there is only the panel
+                * we can enable reduced blanking as well */
+               if (fb_find_mode_cvt(&cvt_mode, 0, 0))
+                       printk(KERN_WARNING "No CVT mode found for panel\n");
+               else if (fb_find_mode(&info->var, info, NULL, NULL, 0,
+                                     &cvt_mode, 0) != 3)
+                       info->var = savagefb_var800x600x8;
+       }
 
        if (mode_option) {
                fb_find_mode(&info->var, info, mode_option,
index 6ae40b630dc9eb7c9c39641d4a0d2c27f7f1b1b3..7d54e2c612f774c292088507d24bba47e8d4f605 100644 (file)
@@ -1127,23 +1127,16 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work)
                struct fb_info *info = hdmi->info;
                unsigned long parent_rate = 0, hdmi_rate;
 
-               /* A device has been plugged in */
-               pm_runtime_get_sync(hdmi->dev);
-
                ret = sh_hdmi_read_edid(hdmi, &hdmi_rate, &parent_rate);
-               if (ret < 0) {
-                       pm_runtime_put(hdmi->dev);
+               if (ret < 0)
                        goto out;
-               }
 
                hdmi->hp_state = HDMI_HOTPLUG_EDID_DONE;
 
                /* Reconfigure the clock */
                ret = sh_hdmi_clk_configure(hdmi, hdmi_rate, parent_rate);
-               if (ret < 0) {
-                       pm_runtime_put(hdmi->dev);
+               if (ret < 0)
                        goto out;
-               }
 
                msleep(10);
                sh_hdmi_configure(hdmi);
@@ -1191,7 +1184,6 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work)
                fb_set_suspend(hdmi->info, 1);
 
                console_unlock();
-               pm_runtime_put(hdmi->dev);
        }
 
 out:
@@ -1312,7 +1304,7 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
        INIT_DELAYED_WORK(&hdmi->edid_work, sh_hdmi_edid_work_fn);
 
        pm_runtime_enable(&pdev->dev);
-       pm_runtime_resume(&pdev->dev);
+       pm_runtime_get_sync(&pdev->dev);
 
        /* Product and revision IDs are 0 in sh-mobile version */
        dev_info(&pdev->dev, "Detected HDMI controller 0x%x:0x%x\n",
@@ -1340,7 +1332,7 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
 ecodec:
        free_irq(irq, hdmi);
 ereqirq:
-       pm_runtime_suspend(&pdev->dev);
+       pm_runtime_put(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
        iounmap(hdmi->base);
 emap:
@@ -1377,7 +1369,7 @@ static int __exit sh_hdmi_remove(struct platform_device *pdev)
        free_irq(irq, hdmi);
        /* Wait for already scheduled work */
        cancel_delayed_work_sync(&hdmi->edid_work);
-       pm_runtime_suspend(&pdev->dev);
+       pm_runtime_put(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
        clk_disable(hdmi->hdmi_clk);
        clk_put(hdmi->hdmi_clk);
index 404c03b4b7c7c4e570646a3fb4461e8a70a8855f..019dbd3f12b247d3fec1fb4836dd8f36cf5a3819 100644 (file)
@@ -470,7 +470,7 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
        unsigned long tmp;
        int bpp = 0;
        unsigned long ldddsr;
-       int k, m;
+       int k, m, ret;
 
        /* enable clocks before accessing the hardware */
        for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
@@ -540,7 +540,7 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
 
                board_cfg = &ch->cfg.board_cfg;
                if (board_cfg->setup_sys) {
-                       int ret = board_cfg->setup_sys(board_cfg->board_data,
+                       ret = board_cfg->setup_sys(board_cfg->board_data,
                                                ch, &sh_mobile_lcdc_sys_bus_ops);
                        if (ret)
                                return ret;
index 53b2c5aae06791becfea4721a3d0ad702e24f109..305c975b1787ea0628d600d2ac89c89cd0afdee9 100644 (file)
@@ -1265,9 +1265,11 @@ static void vga16fb_imageblit(struct fb_info *info, const struct fb_image *image
 
 static void vga16fb_destroy(struct fb_info *info)
 {
+       struct platform_device *dev = container_of(info->device, struct platform_device, dev);
        iounmap(info->screen_base);
        fb_dealloc_cmap(&info->cmap);
        /* XXX unshare VGA regions */
+       platform_set_drvdata(dev, NULL);
        framebuffer_release(info);
 }
 
index a20218c2fda8be0a98b4a219bff743db2a0b5018..beac52fc1c0eea6c1cadf337983c3b2b5c78614d 100644 (file)
@@ -395,10 +395,9 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
        spin_lock_init(&info->dirty_lock);
        spin_lock_init(&info->resize_lock);
 
-       info->fb = vmalloc(fb_size);
+       info->fb = vzalloc(fb_size);
        if (info->fb == NULL)
                goto error_nomem;
-       memset(info->fb, 0, fb_size);
 
        info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
index 00d615d7aa216d52387e8d67952851a42802a326..979d6eed9a0ff5178657ad5a36a1f203b87814b2 100644 (file)
@@ -42,7 +42,7 @@ config W1_MASTER_MXC
 
 config W1_MASTER_DS1WM
        tristate "Maxim DS1WM 1-wire busmaster"
-       depends on W1
+       depends on W1 && GENERIC_HARDIRQS
        help
          Say Y here to enable the DS1WM 1-wire driver, such as that
          in HP iPAQ devices like h5xxx, h2200, and ASIC3-based like
index 3ff822b481454847487b2c3eeaaa18d354f33721..553da68bd510c8ef0c7fecc2f5676778d6525954 100644 (file)
@@ -626,6 +626,9 @@ int xen_allocate_pirq_gsi(unsigned gsi)
  *
  * Note: We don't assign an event channel until the irq actually started
  * up.  Return an existing irq if we've already got one for the gsi.
+ *
+ * Shareable implies level triggered, not shareable implies edge
+ * triggered here.
  */
 int xen_bind_pirq_gsi_to_irq(unsigned gsi,
                             unsigned pirq, int shareable, char *name)
@@ -664,16 +667,13 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
 
        pirq_query_unmask(irq);
        /* We try to use the handler with the appropriate semantic for the
-        * type of interrupt: if the interrupt doesn't need an eoi
-        * (pirq_needs_eoi returns false), we treat it like an edge
-        * triggered interrupt so we use handle_edge_irq.
-        * As a matter of fact this only happens when the corresponding
-        * physical interrupt is edge triggered or an msi.
+        * type of interrupt: if the interrupt is an edge triggered
+        * interrupt we use handle_edge_irq.
         *
-        * On the other hand if the interrupt needs an eoi (pirq_needs_eoi
-        * returns true) we treat it like a level triggered interrupt so we
-        * use handle_fasteoi_irq like the native code does for this kind of
+        * On the other hand if the interrupt is level triggered we use
+        * handle_fasteoi_irq like the native code does for this kind of
         * interrupts.
+        *
         * Depending on the Xen version, pirq_needs_eoi might return true
         * not only for level triggered interrupts but for edge triggered
         * interrupts too. In any case Xen always honors the eoi mechanism,
@@ -681,7 +681,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
         * hasn't received an eoi yet. Therefore using the fasteoi handler
         * is the right choice either way.
         */
-       if (pirq_needs_eoi(irq))
+       if (shareable)
                irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
                                handle_fasteoi_irq, name);
        else
index 65ea21a974920f299afbaaa1c6e379930098f09b..6e8c15a23201a3a280948cf25142001347a0d6cb 100644 (file)
@@ -147,9 +147,15 @@ void __init xen_swiotlb_init(int verbose)
 {
        unsigned long bytes;
        int rc;
-
-       xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
-       xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
+       unsigned long nr_tbl;
+
+       nr_tbl = swioltb_nr_tbl();
+       if (nr_tbl)
+               xen_io_tlb_nslabs = nr_tbl;
+       else {
+               xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
+               xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
+       }
 
        bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
 
index 20c106f2492740f7de1615ee7712207b74a33828..1b0b19550015d2f19949b5b85d84556ecf7adb36 100644 (file)
@@ -584,11 +584,11 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
 
 success:
        d_add(dentry, inode);
-       _leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%llu }",
+       _leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%u }",
               fid.vnode,
               fid.unique,
               dentry->d_inode->i_ino,
-              (unsigned long long)dentry->d_inode->i_version);
+              dentry->d_inode->i_generation);
 
        return NULL;
 }
@@ -671,10 +671,10 @@ static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd)
                 * been deleted and replaced, and the original vnode ID has
                 * been reused */
                if (fid.unique != vnode->fid.unique) {
-                       _debug("%s: file deleted (uq %u -> %u I:%llu)",
+                       _debug("%s: file deleted (uq %u -> %u I:%u)",
                               dentry->d_name.name, fid.unique,
                               vnode->fid.unique,
-                              (unsigned long long)dentry->d_inode->i_version);
+                              dentry->d_inode->i_generation);
                        spin_lock(&vnode->lock);
                        set_bit(AFS_VNODE_DELETED, &vnode->flags);
                        spin_unlock(&vnode->lock);
index 4bd0218473a9bb407a9c86be49f0400461621f24..346e3289abd70549987ce9f490e1d8b210b66bd1 100644 (file)
@@ -89,7 +89,7 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
                        i_size_write(&vnode->vfs_inode, size);
                        vnode->vfs_inode.i_uid = status->owner;
                        vnode->vfs_inode.i_gid = status->group;
-                       vnode->vfs_inode.i_version = vnode->fid.unique;
+                       vnode->vfs_inode.i_generation = vnode->fid.unique;
                        vnode->vfs_inode.i_nlink = status->nlink;
 
                        mode = vnode->vfs_inode.i_mode;
@@ -102,6 +102,7 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
                vnode->vfs_inode.i_ctime.tv_sec = status->mtime_server;
                vnode->vfs_inode.i_mtime        = vnode->vfs_inode.i_ctime;
                vnode->vfs_inode.i_atime        = vnode->vfs_inode.i_ctime;
+               vnode->vfs_inode.i_version      = data_version;
        }
 
        expected_version = status->data_version;
index db66c5201474dc9b380ab21419fa61f7ff26fa78..0fdab6e03d8781d60ea7968c5c9886b555178dcd 100644 (file)
@@ -75,7 +75,8 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key)
        inode->i_ctime.tv_nsec  = 0;
        inode->i_atime          = inode->i_mtime = inode->i_ctime;
        inode->i_blocks         = 0;
-       inode->i_version        = vnode->fid.unique;
+       inode->i_generation     = vnode->fid.unique;
+       inode->i_version        = vnode->status.data_version;
        inode->i_mapping->a_ops = &afs_fs_aops;
 
        /* check to see whether a symbolic link is really a mountpoint */
@@ -100,7 +101,7 @@ static int afs_iget5_test(struct inode *inode, void *opaque)
        struct afs_iget_data *data = opaque;
 
        return inode->i_ino == data->fid.vnode &&
-               inode->i_version == data->fid.unique;
+               inode->i_generation == data->fid.unique;
 }
 
 /*
@@ -122,7 +123,7 @@ static int afs_iget5_set(struct inode *inode, void *opaque)
        struct afs_vnode *vnode = AFS_FS_I(inode);
 
        inode->i_ino = data->fid.vnode;
-       inode->i_version = data->fid.unique;
+       inode->i_generation = data->fid.unique;
        vnode->fid = data->fid;
        vnode->volume = data->volume;
 
@@ -380,8 +381,7 @@ int afs_getattr(struct vfsmount *mnt, struct dentry *dentry,
 
        inode = dentry->d_inode;
 
-       _enter("{ ino=%lu v=%llu }", inode->i_ino,
-               (unsigned long long)inode->i_version);
+       _enter("{ ino=%lu v=%u }", inode->i_ino, inode->i_generation);
 
        generic_fillattr(inode, stat);
        return 0;
index fb240e8766d63f7178374c40594881609201cae4..356dcf0929e8f0de36a166a2054e34c5ef3396c3 100644 (file)
@@ -31,8 +31,8 @@
 static void afs_i_init_once(void *foo);
 static struct dentry *afs_mount(struct file_system_type *fs_type,
                      int flags, const char *dev_name, void *data);
+static void afs_kill_super(struct super_block *sb);
 static struct inode *afs_alloc_inode(struct super_block *sb);
-static void afs_put_super(struct super_block *sb);
 static void afs_destroy_inode(struct inode *inode);
 static int afs_statfs(struct dentry *dentry, struct kstatfs *buf);
 
@@ -40,7 +40,7 @@ struct file_system_type afs_fs_type = {
        .owner          = THIS_MODULE,
        .name           = "afs",
        .mount          = afs_mount,
-       .kill_sb        = kill_anon_super,
+       .kill_sb        = afs_kill_super,
        .fs_flags       = 0,
 };
 
@@ -50,7 +50,6 @@ static const struct super_operations afs_super_ops = {
        .drop_inode     = afs_drop_inode,
        .destroy_inode  = afs_destroy_inode,
        .evict_inode    = afs_evict_inode,
-       .put_super      = afs_put_super,
        .show_options   = generic_show_options,
 };
 
@@ -282,19 +281,25 @@ static int afs_parse_device_name(struct afs_mount_params *params,
  */
 static int afs_test_super(struct super_block *sb, void *data)
 {
-       struct afs_mount_params *params = data;
+       struct afs_super_info *as1 = data;
        struct afs_super_info *as = sb->s_fs_info;
 
-       return as->volume == params->volume;
+       return as->volume == as1->volume;
+}
+
+static int afs_set_super(struct super_block *sb, void *data)
+{
+       sb->s_fs_info = data;
+       return set_anon_super(sb, NULL);
 }
 
 /*
  * fill in the superblock
  */
-static int afs_fill_super(struct super_block *sb, void *data)
+static int afs_fill_super(struct super_block *sb,
+                         struct afs_mount_params *params)
 {
-       struct afs_mount_params *params = data;
-       struct afs_super_info *as = NULL;
+       struct afs_super_info *as = sb->s_fs_info;
        struct afs_fid fid;
        struct dentry *root = NULL;
        struct inode *inode = NULL;
@@ -302,23 +307,13 @@ static int afs_fill_super(struct super_block *sb, void *data)
 
        _enter("");
 
-       /* allocate a superblock info record */
-       as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL);
-       if (!as) {
-               _leave(" = -ENOMEM");
-               return -ENOMEM;
-       }
-
-       afs_get_volume(params->volume);
-       as->volume = params->volume;
-
        /* fill in the superblock */
        sb->s_blocksize         = PAGE_CACHE_SIZE;
        sb->s_blocksize_bits    = PAGE_CACHE_SHIFT;
        sb->s_magic             = AFS_FS_MAGIC;
        sb->s_op                = &afs_super_ops;
-       sb->s_fs_info           = as;
        sb->s_bdi               = &as->volume->bdi;
+       strlcpy(sb->s_id, as->volume->vlocation->vldb.name, sizeof(sb->s_id));
 
        /* allocate the root inode and dentry */
        fid.vid         = as->volume->vid;
@@ -326,7 +321,7 @@ static int afs_fill_super(struct super_block *sb, void *data)
        fid.unique      = 1;
        inode = afs_iget(sb, params->key, &fid, NULL, NULL);
        if (IS_ERR(inode))
-               goto error_inode;
+               return PTR_ERR(inode);
 
        if (params->autocell)
                set_bit(AFS_VNODE_AUTOCELL, &AFS_FS_I(inode)->flags);
@@ -342,16 +337,8 @@ static int afs_fill_super(struct super_block *sb, void *data)
        _leave(" = 0");
        return 0;
 
-error_inode:
-       ret = PTR_ERR(inode);
-       inode = NULL;
 error:
        iput(inode);
-       afs_put_volume(as->volume);
-       kfree(as);
-
-       sb->s_fs_info = NULL;
-
        _leave(" = %d", ret);
        return ret;
 }
@@ -367,6 +354,7 @@ static struct dentry *afs_mount(struct file_system_type *fs_type,
        struct afs_volume *vol;
        struct key *key;
        char *new_opts = kstrdup(options, GFP_KERNEL);
+       struct afs_super_info *as;
        int ret;
 
        _enter(",,%s,%p", dev_name, options);
@@ -399,12 +387,22 @@ static struct dentry *afs_mount(struct file_system_type *fs_type,
                ret = PTR_ERR(vol);
                goto error;
        }
-       params.volume = vol;
+
+       /* allocate a superblock info record */
+       as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL);
+       if (!as) {
+               ret = -ENOMEM;
+               afs_put_volume(vol);
+               goto error;
+       }
+       as->volume = vol;
 
        /* allocate a deviceless superblock */
-       sb = sget(fs_type, afs_test_super, set_anon_super, &params);
+       sb = sget(fs_type, afs_test_super, afs_set_super, as);
        if (IS_ERR(sb)) {
                ret = PTR_ERR(sb);
+               afs_put_volume(vol);
+               kfree(as);
                goto error;
        }
 
@@ -422,16 +420,16 @@ static struct dentry *afs_mount(struct file_system_type *fs_type,
        } else {
                _debug("reuse");
                ASSERTCMP(sb->s_flags, &, MS_ACTIVE);
+               afs_put_volume(vol);
+               kfree(as);
        }
 
-       afs_put_volume(params.volume);
        afs_put_cell(params.cell);
        kfree(new_opts);
        _leave(" = 0 [%p]", sb);
        return dget(sb->s_root);
 
 error:
-       afs_put_volume(params.volume);
        afs_put_cell(params.cell);
        key_put(params.key);
        kfree(new_opts);
@@ -439,18 +437,12 @@ error:
        return ERR_PTR(ret);
 }
 
-/*
- * finish the unmounting process on the superblock
- */
-static void afs_put_super(struct super_block *sb)
+static void afs_kill_super(struct super_block *sb)
 {
        struct afs_super_info *as = sb->s_fs_info;
-
-       _enter("");
-
+       kill_anon_super(sb);
        afs_put_volume(as->volume);
-
-       _leave("");
+       kfree(as);
 }
 
 /*
index 789b3afb342328dcfe551d39227a81f09ffb3470..b806285ff85304bf71032c4b64cc3fb3cf49dda0 100644 (file)
@@ -84,23 +84,21 @@ void afs_put_writeback(struct afs_writeback *wb)
  * partly or wholly fill a page that's under preparation for writing
  */
 static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
-                        loff_t pos, unsigned len, struct page *page)
+                        loff_t pos, struct page *page)
 {
        loff_t i_size;
-       unsigned eof;
        int ret;
+       int len;
 
-       _enter(",,%llu,%u", (unsigned long long)pos, len);
-
-       ASSERTCMP(len, <=, PAGE_CACHE_SIZE);
+       _enter(",,%llu", (unsigned long long)pos);
 
        i_size = i_size_read(&vnode->vfs_inode);
-       if (pos + len > i_size)
-               eof = i_size;
+       if (pos + PAGE_CACHE_SIZE > i_size)
+               len = i_size - pos;
        else
-               eof = PAGE_CACHE_SIZE;
+               len = PAGE_CACHE_SIZE;
 
-       ret = afs_vnode_fetch_data(vnode, key, 0, eof, page);
+       ret = afs_vnode_fetch_data(vnode, key, pos, len, page);
        if (ret < 0) {
                if (ret == -ENOENT) {
                        _debug("got NOENT from server"
@@ -153,9 +151,8 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
        *pagep = page;
        /* page won't leak in error case: it eventually gets cleaned off LRU */
 
-       if (!PageUptodate(page)) {
-               _debug("not up to date");
-               ret = afs_fill_page(vnode, key, pos, len, page);
+       if (!PageUptodate(page) && len != PAGE_CACHE_SIZE) {
+               ret = afs_fill_page(vnode, key, index << PAGE_CACHE_SHIFT, page);
                if (ret < 0) {
                        kfree(candidate);
                        _leave(" = %d [prep]", ret);
index 1f2b1997833340fc4e80b0c5a3c115a7f949fd15..1a2421f908f0a471f028d7e0f320caeed18592bd 100644 (file)
@@ -1272,8 +1272,8 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
                 * individual writeable reference is too fragile given the
                 * way @mode is used in blkdev_get/put().
                 */
-               if ((disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE) &&
-                   !res && (mode & FMODE_WRITE) && !bdev->bd_write_holder) {
+               if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder &&
+                   (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) {
                        bdev->bd_write_holder = true;
                        disk_block_events(disk);
                }
index 93b1aa9320143e921a31dde1d0586529ce75ea8e..52d7eca8c7bfe9d599e60cd0918b36c7f5fefe42 100644 (file)
@@ -121,9 +121,6 @@ struct btrfs_inode {
         */
        u64 index_cnt;
 
-       /* the start of block group preferred for allocations. */
-       u64 block_group;
-
        /* the fsync log has some corner cases that mean we have to check
         * directories to see if any unlinks have been done before
         * the directory was logged.  See tree-log.c for all the
index b0e18d986e0ac37f432dbad1141aea714d34f6a0..2e667868e0d2b8b75649572d10602ac56679a0d0 100644 (file)
@@ -43,8 +43,6 @@ struct btrfs_path *btrfs_alloc_path(void)
 {
        struct btrfs_path *path;
        path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
-       if (path)
-               path->reada = 1;
        return path;
 }
 
@@ -1224,11 +1222,13 @@ static void reada_for_search(struct btrfs_root *root,
        u64 search;
        u64 target;
        u64 nread = 0;
+       u64 gen;
        int direction = path->reada;
        struct extent_buffer *eb;
        u32 nr;
        u32 blocksize;
        u32 nscan = 0;
+       bool map = true;
 
        if (level != 1)
                return;
@@ -1250,7 +1250,19 @@ static void reada_for_search(struct btrfs_root *root,
 
        nritems = btrfs_header_nritems(node);
        nr = slot;
+       if (node->map_token || path->skip_locking)
+               map = false;
+
        while (1) {
+               if (map && !node->map_token) {
+                       unsigned long offset = btrfs_node_key_ptr_offset(nr);
+                       map_private_extent_buffer(node, offset,
+                                                 sizeof(struct btrfs_key_ptr),
+                                                 &node->map_token,
+                                                 &node->kaddr,
+                                                 &node->map_start,
+                                                 &node->map_len, KM_USER1);
+               }
                if (direction < 0) {
                        if (nr == 0)
                                break;
@@ -1268,14 +1280,23 @@ static void reada_for_search(struct btrfs_root *root,
                search = btrfs_node_blockptr(node, nr);
                if ((search <= target && target - search <= 65536) ||
                    (search > target && search - target <= 65536)) {
-                       readahead_tree_block(root, search, blocksize,
-                                    btrfs_node_ptr_generation(node, nr));
+                       gen = btrfs_node_ptr_generation(node, nr);
+                       if (map && node->map_token) {
+                               unmap_extent_buffer(node, node->map_token,
+                                                   KM_USER1);
+                               node->map_token = NULL;
+                       }
+                       readahead_tree_block(root, search, blocksize, gen);
                        nread += blocksize;
                }
                nscan++;
                if ((nread > 65536 || nscan > 32))
                        break;
        }
+       if (map && node->map_token) {
+               unmap_extent_buffer(node, node->map_token, KM_USER1);
+               node->map_token = NULL;
+       }
 }
 
 /*
@@ -1648,9 +1669,6 @@ again:
                }
 cow_done:
                BUG_ON(!cow && ins_len);
-               if (level != btrfs_header_level(b))
-                       WARN_ON(1);
-               level = btrfs_header_level(b);
 
                p->nodes[level] = b;
                if (!p->skip_locking)
index 6c093fa98f61f45f4d98344fab0e52de088a8aa2..378b5b4443f3a6991e1313d4009b89160d16539e 100644 (file)
@@ -930,7 +930,6 @@ struct btrfs_fs_info {
         * is required instead of the faster short fsync log commits
         */
        u64 last_trans_log_full_commit;
-       u64 open_ioctl_trans;
        unsigned long mount_opt:20;
        unsigned long compress_type:4;
        u64 max_inline;
@@ -947,7 +946,6 @@ struct btrfs_fs_info {
        struct super_block *sb;
        struct inode *btree_inode;
        struct backing_dev_info bdi;
-       struct mutex trans_mutex;
        struct mutex tree_log_mutex;
        struct mutex transaction_kthread_mutex;
        struct mutex cleaner_mutex;
@@ -968,6 +966,7 @@ struct btrfs_fs_info {
        struct rw_semaphore subvol_sem;
        struct srcu_struct subvol_srcu;
 
+       spinlock_t trans_lock;
        struct list_head trans_list;
        struct list_head hashers;
        struct list_head dead_roots;
@@ -980,6 +979,7 @@ struct btrfs_fs_info {
        atomic_t async_submit_draining;
        atomic_t nr_async_bios;
        atomic_t async_delalloc_pages;
+       atomic_t open_ioctl_trans;
 
        /*
         * this is used by the balancing code to wait for all the pending
@@ -1044,6 +1044,7 @@ struct btrfs_fs_info {
        int closing;
        int log_root_recovering;
        int enospc_unlink;
+       int trans_no_join;
 
        u64 total_pinned;
 
@@ -1065,7 +1066,6 @@ struct btrfs_fs_info {
        struct reloc_control *reloc_ctl;
 
        spinlock_t delalloc_lock;
-       spinlock_t new_trans_lock;
        u64 delalloc_bytes;
 
        /* data_alloc_cluster is only used in ssd mode */
@@ -1340,6 +1340,7 @@ struct btrfs_ioctl_defrag_range_args {
 #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14)
 #define BTRFS_MOUNT_ENOSPC_DEBUG        (1 << 15)
 #define BTRFS_MOUNT_AUTO_DEFRAG                (1 << 16)
+#define BTRFS_MOUNT_INODE_MAP_CACHE    (1 << 17)
 
 #define btrfs_clear_opt(o, opt)                ((o) &= ~BTRFS_MOUNT_##opt)
 #define btrfs_set_opt(o, opt)          ((o) |= BTRFS_MOUNT_##opt)
@@ -2238,6 +2239,9 @@ int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
 void btrfs_block_rsv_release(struct btrfs_root *root,
                             struct btrfs_block_rsv *block_rsv,
                             u64 num_bytes);
+int btrfs_truncate_reserve_metadata(struct btrfs_trans_handle *trans,
+                                   struct btrfs_root *root,
+                                   struct btrfs_block_rsv *rsv);
 int btrfs_set_block_group_ro(struct btrfs_root *root,
                             struct btrfs_block_group_cache *cache);
 int btrfs_set_block_group_rw(struct btrfs_root *root,
@@ -2350,6 +2354,15 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
                        struct btrfs_root *root,
                        struct extent_buffer *node,
                        struct extent_buffer *parent);
+static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
+{
+       /*
+        * Get synced with close_ctree()
+        */
+       smp_mb();
+       return fs_info->closing;
+}
+
 /* root-item.c */
 int btrfs_find_root_ref(struct btrfs_root *tree_root,
                        struct btrfs_path *path,
@@ -2512,8 +2525,7 @@ int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
 int btrfs_writepages(struct address_space *mapping,
                     struct writeback_control *wbc);
 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
-                            struct btrfs_root *new_root,
-                            u64 new_dirid, u64 alloc_hint);
+                            struct btrfs_root *new_root, u64 new_dirid);
 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
                         size_t size, struct bio *bio, unsigned long bio_flags);
 
index 01e29503a54bd0aae2a972ba04284b94afcb8489..6462c29d2d37fcc8ec779f1d6d3b6817003a68c2 100644 (file)
@@ -678,6 +678,7 @@ static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans,
        INIT_LIST_HEAD(&head);
 
        next = item;
+       nitems = 0;
 
        /*
         * count the number of the continuous items that we can insert in batch
@@ -1129,7 +1130,7 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
        delayed_node = async_node->delayed_node;
        root = delayed_node->root;
 
-       trans = btrfs_join_transaction(root, 0);
+       trans = btrfs_join_transaction(root);
        if (IS_ERR(trans))
                goto free_path;
 
@@ -1572,8 +1573,7 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
        btrfs_set_stack_inode_transid(inode_item, trans->transid);
        btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
        btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
-       btrfs_set_stack_inode_block_group(inode_item,
-                                         BTRFS_I(inode)->block_group);
+       btrfs_set_stack_inode_block_group(inode_item, 0);
 
        btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item),
                                     inode->i_atime.tv_sec);
@@ -1595,7 +1595,7 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
                               struct btrfs_root *root, struct inode *inode)
 {
        struct btrfs_delayed_node *delayed_node;
-       int ret;
+       int ret = 0;
 
        delayed_node = btrfs_get_or_create_delayed_node(inode);
        if (IS_ERR(delayed_node))
index 98b6a71decba105616ef5727b07b18c6d79dd8e0..9f68c68986535fbdbd21f01abad64906e19fde68 100644 (file)
@@ -1505,24 +1505,24 @@ static int transaction_kthread(void *arg)
                vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
                mutex_lock(&root->fs_info->transaction_kthread_mutex);
 
-               spin_lock(&root->fs_info->new_trans_lock);
+               spin_lock(&root->fs_info->trans_lock);
                cur = root->fs_info->running_transaction;
                if (!cur) {
-                       spin_unlock(&root->fs_info->new_trans_lock);
+                       spin_unlock(&root->fs_info->trans_lock);
                        goto sleep;
                }
 
                now = get_seconds();
                if (!cur->blocked &&
                    (now < cur->start_time || now - cur->start_time < 30)) {
-                       spin_unlock(&root->fs_info->new_trans_lock);
+                       spin_unlock(&root->fs_info->trans_lock);
                        delay = HZ * 5;
                        goto sleep;
                }
                transid = cur->transid;
-               spin_unlock(&root->fs_info->new_trans_lock);
+               spin_unlock(&root->fs_info->trans_lock);
 
-               trans = btrfs_join_transaction(root, 1);
+               trans = btrfs_join_transaction(root);
                BUG_ON(IS_ERR(trans));
                if (transid == trans->transid) {
                        ret = btrfs_commit_transaction(trans, root);
@@ -1613,7 +1613,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        INIT_LIST_HEAD(&fs_info->ordered_operations);
        INIT_LIST_HEAD(&fs_info->caching_block_groups);
        spin_lock_init(&fs_info->delalloc_lock);
-       spin_lock_init(&fs_info->new_trans_lock);
+       spin_lock_init(&fs_info->trans_lock);
        spin_lock_init(&fs_info->ref_cache_lock);
        spin_lock_init(&fs_info->fs_roots_radix_lock);
        spin_lock_init(&fs_info->delayed_iput_lock);
@@ -1645,6 +1645,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        fs_info->max_inline = 8192 * 1024;
        fs_info->metadata_ratio = 0;
        fs_info->defrag_inodes = RB_ROOT;
+       fs_info->trans_no_join = 0;
 
        fs_info->thread_pool_size = min_t(unsigned long,
                                          num_online_cpus() + 2, 8);
@@ -1667,8 +1668,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        init_waitqueue_head(&fs_info->scrub_pause_wait);
        init_rwsem(&fs_info->scrub_super_lock);
        fs_info->scrub_workers_refcnt = 0;
-       btrfs_init_workers(&fs_info->scrub_workers, "scrub",
-                          fs_info->thread_pool_size, &fs_info->generic_worker);
 
        sb->s_blocksize = 4096;
        sb->s_blocksize_bits = blksize_bits(4096);
@@ -1709,7 +1708,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        fs_info->do_barriers = 1;
 
 
-       mutex_init(&fs_info->trans_mutex);
        mutex_init(&fs_info->ordered_operations_mutex);
        mutex_init(&fs_info->tree_log_mutex);
        mutex_init(&fs_info->chunk_mutex);
@@ -2479,13 +2477,13 @@ int btrfs_commit_super(struct btrfs_root *root)
        down_write(&root->fs_info->cleanup_work_sem);
        up_write(&root->fs_info->cleanup_work_sem);
 
-       trans = btrfs_join_transaction(root, 1);
+       trans = btrfs_join_transaction(root);
        if (IS_ERR(trans))
                return PTR_ERR(trans);
        ret = btrfs_commit_transaction(trans, root);
        BUG_ON(ret);
        /* run commit again to drop the original snapshot */
-       trans = btrfs_join_transaction(root, 1);
+       trans = btrfs_join_transaction(root);
        if (IS_ERR(trans))
                return PTR_ERR(trans);
        btrfs_commit_transaction(trans, root);
@@ -2911,9 +2909,8 @@ static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
 
        INIT_LIST_HEAD(&splice);
 
-       list_splice_init(&root->fs_info->delalloc_inodes, &splice);
-
        spin_lock(&root->fs_info->delalloc_lock);
+       list_splice_init(&root->fs_info->delalloc_inodes, &splice);
 
        while (!list_empty(&splice)) {
                btrfs_inode = list_entry(splice.next, struct btrfs_inode,
@@ -3024,10 +3021,13 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
 
        WARN_ON(1);
 
-       mutex_lock(&root->fs_info->trans_mutex);
        mutex_lock(&root->fs_info->transaction_kthread_mutex);
 
+       spin_lock(&root->fs_info->trans_lock);
        list_splice_init(&root->fs_info->trans_list, &list);
+       root->fs_info->trans_no_join = 1;
+       spin_unlock(&root->fs_info->trans_lock);
+
        while (!list_empty(&list)) {
                t = list_entry(list.next, struct btrfs_transaction, list);
                if (!t)
@@ -3052,23 +3052,18 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
                t->blocked = 0;
                if (waitqueue_active(&root->fs_info->transaction_wait))
                        wake_up(&root->fs_info->transaction_wait);
-               mutex_unlock(&root->fs_info->trans_mutex);
 
-               mutex_lock(&root->fs_info->trans_mutex);
                t->commit_done = 1;
                if (waitqueue_active(&t->commit_wait))
                        wake_up(&t->commit_wait);
-               mutex_unlock(&root->fs_info->trans_mutex);
-
-               mutex_lock(&root->fs_info->trans_mutex);
 
                btrfs_destroy_pending_snapshots(t);
 
                btrfs_destroy_delalloc_inodes(root);
 
-               spin_lock(&root->fs_info->new_trans_lock);
+               spin_lock(&root->fs_info->trans_lock);
                root->fs_info->running_transaction = NULL;
-               spin_unlock(&root->fs_info->new_trans_lock);
+               spin_unlock(&root->fs_info->trans_lock);
 
                btrfs_destroy_marked_extents(root, &t->dirty_pages,
                                             EXTENT_DIRTY);
@@ -3082,8 +3077,10 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
                kmem_cache_free(btrfs_transaction_cachep, t);
        }
 
+       spin_lock(&root->fs_info->trans_lock);
+       root->fs_info->trans_no_join = 0;
+       spin_unlock(&root->fs_info->trans_lock);
        mutex_unlock(&root->fs_info->transaction_kthread_mutex);
-       mutex_unlock(&root->fs_info->trans_mutex);
 
        return 0;
 }
index 169bd62ce776257e72badbb2f16f4dbb1d0eaa56..b42efc2ded513ec10c38eb7ebcee9247d2f2c825 100644 (file)
@@ -348,7 +348,7 @@ static int caching_kthread(void *data)
         */
        path->skip_locking = 1;
        path->search_commit_root = 1;
-       path->reada = 2;
+       path->reada = 1;
 
        key.objectid = last;
        key.offset = 0;
@@ -366,8 +366,7 @@ again:
        nritems = btrfs_header_nritems(leaf);
 
        while (1) {
-               smp_mb();
-               if (fs_info->closing > 1) {
+               if (btrfs_fs_closing(fs_info) > 1) {
                        last = (u64)-1;
                        break;
                }
@@ -379,15 +378,18 @@ again:
                        if (ret)
                                break;
 
-                       caching_ctl->progress = last;
-                       btrfs_release_path(path);
-                       up_read(&fs_info->extent_commit_sem);
-                       mutex_unlock(&caching_ctl->mutex);
-                       if (btrfs_transaction_in_commit(fs_info))
-                               schedule_timeout(1);
-                       else
+                       if (need_resched() ||
+                           btrfs_next_leaf(extent_root, path)) {
+                               caching_ctl->progress = last;
+                               btrfs_release_path(path);
+                               up_read(&fs_info->extent_commit_sem);
+                               mutex_unlock(&caching_ctl->mutex);
                                cond_resched();
-                       goto again;
+                               goto again;
+                       }
+                       leaf = path->nodes[0];
+                       nritems = btrfs_header_nritems(leaf);
+                       continue;
                }
 
                if (key.objectid < block_group->key.objectid) {
@@ -3065,7 +3067,7 @@ again:
                        spin_unlock(&data_sinfo->lock);
 alloc:
                        alloc_target = btrfs_get_alloc_profile(root, 1);
-                       trans = btrfs_join_transaction(root, 1);
+                       trans = btrfs_join_transaction(root);
                        if (IS_ERR(trans))
                                return PTR_ERR(trans);
 
@@ -3087,13 +3089,21 @@ alloc:
                        }
                        goto again;
                }
+
+               /*
+                * If we have less pinned bytes than we want to allocate then
+                * don't bother committing the transaction, it won't help us.
+                */
+               if (data_sinfo->bytes_pinned < bytes)
+                       committed = 1;
                spin_unlock(&data_sinfo->lock);
 
                /* commit the current transaction and try again */
 commit_trans:
-               if (!committed && !root->fs_info->open_ioctl_trans) {
+               if (!committed &&
+                   !atomic_read(&root->fs_info->open_ioctl_trans)) {
                        committed = 1;
-                       trans = btrfs_join_transaction(root, 1);
+                       trans = btrfs_join_transaction(root);
                        if (IS_ERR(trans))
                                return PTR_ERR(trans);
                        ret = btrfs_commit_transaction(trans, root);
@@ -3472,7 +3482,7 @@ again:
                goto out;
 
        ret = -ENOSPC;
-       trans = btrfs_join_transaction(root, 1);
+       trans = btrfs_join_transaction(root);
        if (IS_ERR(trans))
                goto out;
        ret = btrfs_commit_transaction(trans, root);
@@ -3699,7 +3709,7 @@ int btrfs_block_rsv_check(struct btrfs_trans_handle *trans,
                if (trans)
                        return -EAGAIN;
 
-               trans = btrfs_join_transaction(root, 1);
+               trans = btrfs_join_transaction(root);
                BUG_ON(IS_ERR(trans));
                ret = btrfs_commit_transaction(trans, root);
                return 0;
@@ -3837,6 +3847,37 @@ static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
        WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
 }
 
+int btrfs_truncate_reserve_metadata(struct btrfs_trans_handle *trans,
+                                   struct btrfs_root *root,
+                                   struct btrfs_block_rsv *rsv)
+{
+       struct btrfs_block_rsv *trans_rsv = &root->fs_info->trans_block_rsv;
+       u64 num_bytes;
+       int ret;
+
+       /*
+        * Truncate should be freeing data, but give us 2 items just in case it
+        * needs to use some space.  We may want to be smarter about this in the
+        * future.
+        */
+       num_bytes = btrfs_calc_trans_metadata_size(root, 2);
+
+       /* We already have enough bytes, just return */
+       if (rsv->reserved >= num_bytes)
+               return 0;
+
+       num_bytes -= rsv->reserved;
+
+       /*
+        * You should have reserved enough space before hand to do this, so this
+        * should not fail.
+        */
+       ret = block_rsv_migrate_bytes(trans_rsv, rsv, num_bytes);
+       BUG_ON(ret);
+
+       return 0;
+}
+
 int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
                                 struct btrfs_root *root,
                                 int num_items)
@@ -3877,23 +3918,18 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
        struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
 
        /*
-        * one for deleting orphan item, one for updating inode and
-        * two for calling btrfs_truncate_inode_items.
-        *
-        * btrfs_truncate_inode_items is a delete operation, it frees
-        * more space than it uses in most cases. So two units of
-        * metadata space should be enough for calling it many times.
-        * If all of the metadata space is used, we can commit
-        * transaction and use space it freed.
+        * We need to hold space in order to delete our orphan item once we've
+        * added it, so this takes the reservation so we can release it later
+        * when we are truly done with the orphan item.
         */
-       u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4);
+       u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
        return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
 }
 
 void btrfs_orphan_release_metadata(struct inode *inode)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
-       u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4);
+       u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
        btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
 }
 
@@ -4987,6 +5023,15 @@ have_block_group:
                if (unlikely(block_group->ro))
                        goto loop;
 
+               spin_lock(&block_group->free_space_ctl->tree_lock);
+               if (cached &&
+                   block_group->free_space_ctl->free_space <
+                   num_bytes + empty_size) {
+                       spin_unlock(&block_group->free_space_ctl->tree_lock);
+                       goto loop;
+               }
+               spin_unlock(&block_group->free_space_ctl->tree_lock);
+
                /*
                 * Ok we want to try and use the cluster allocator, so lets look
                 * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
@@ -5150,6 +5195,7 @@ checks:
                        btrfs_add_free_space(block_group, offset,
                                             search_start - offset);
                BUG_ON(offset > search_start);
+               btrfs_put_block_group(block_group);
                break;
 loop:
                failed_cluster_refill = false;
@@ -5172,9 +5218,7 @@ loop:
         * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
         *                      again
         */
-       if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
-           (found_uncached_bg || empty_size || empty_cluster ||
-            allowed_chunk_alloc)) {
+       if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
                index = 0;
                if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
                        found_uncached_bg = false;
@@ -5214,42 +5258,39 @@ loop:
                        goto search;
                }
 
-               if (loop < LOOP_CACHING_WAIT) {
-                       loop++;
-                       goto search;
-               }
+               loop++;
 
                if (loop == LOOP_ALLOC_CHUNK) {
-                       empty_size = 0;
-                       empty_cluster = 0;
-               }
+                      if (allowed_chunk_alloc) {
+                               ret = do_chunk_alloc(trans, root, num_bytes +
+                                                    2 * 1024 * 1024, data,
+                                                    CHUNK_ALLOC_LIMITED);
+                               allowed_chunk_alloc = 0;
+                               if (ret == 1)
+                                       done_chunk_alloc = 1;
+                       } else if (!done_chunk_alloc &&
+                                  space_info->force_alloc ==
+                                  CHUNK_ALLOC_NO_FORCE) {
+                               space_info->force_alloc = CHUNK_ALLOC_LIMITED;
+                       }
 
-               if (allowed_chunk_alloc) {
-                       ret = do_chunk_alloc(trans, root, num_bytes +
-                                            2 * 1024 * 1024, data,
-                                            CHUNK_ALLOC_LIMITED);
-                       allowed_chunk_alloc = 0;
-                       done_chunk_alloc = 1;
-               } else if (!done_chunk_alloc &&
-                          space_info->force_alloc == CHUNK_ALLOC_NO_FORCE) {
-                       space_info->force_alloc = CHUNK_ALLOC_LIMITED;
+                      /*
+                       * We didn't allocate a chunk, go ahead and drop the
+                       * empty size and loop again.
+                       */
+                      if (!done_chunk_alloc)
+                              loop = LOOP_NO_EMPTY_SIZE;
                }
 
-               if (loop < LOOP_NO_EMPTY_SIZE) {
-                       loop++;
-                       goto search;
+               if (loop == LOOP_NO_EMPTY_SIZE) {
+                       empty_size = 0;
+                       empty_cluster = 0;
                }
-               ret = -ENOSPC;
+
+               goto search;
        } else if (!ins->objectid) {
                ret = -ENOSPC;
-       }
-
-       /* we found what we needed */
-       if (ins->objectid) {
-               if (!(data & BTRFS_BLOCK_GROUP_DATA))
-                       trans->block_group = block_group->key.objectid;
-
-               btrfs_put_block_group(block_group);
+       } else if (ins->objectid) {
                ret = 0;
        }
 
@@ -6526,7 +6567,7 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
 
        BUG_ON(cache->ro);
 
-       trans = btrfs_join_transaction(root, 1);
+       trans = btrfs_join_transaction(root);
        BUG_ON(IS_ERR(trans));
 
        alloc_flags = update_block_group_flags(root, cache->flags);
@@ -6882,6 +6923,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
+       path->reada = 1;
 
        cache_gen = btrfs_super_cache_generation(&root->fs_info->super_copy);
        if (cache_gen != 0 &&
index c5d9fbb92bc31b50ec9c7e2fd955a267661afe2d..7055d11c1efdd2efef6668b18e0dfcae802a9dd7 100644 (file)
@@ -1476,7 +1476,7 @@ u64 count_range_bits(struct extent_io_tree *tree,
                        if (total_bytes >= max_bytes)
                                break;
                        if (!found) {
-                               *start = state->start;
+                               *start = max(cur_start, state->start);
                                found = 1;
                        }
                        last = state->end;
index 4e8445a4757c0a14a991fc1ca037411e0426c94e..a11a92ee2d30a84ccf52284e704c5cc971f7126e 100644 (file)
@@ -126,9 +126,9 @@ struct extent_buffer {
        unsigned long map_len;
        struct page *first_page;
        unsigned long bflags;
-       atomic_t refs;
        struct list_head leak_list;
        struct rcu_head rcu_head;
+       atomic_t refs;
 
        /* the spinlock is used to protect most operations */
        spinlock_t lock;
index c6a22d783c35576253d96f1c3380b01d949f9eca..fa4ef18b66b150a975d4143288b1de699dc0ecf0 100644 (file)
@@ -129,7 +129,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
        if (!btrfs_test_opt(root, AUTO_DEFRAG))
                return 0;
 
-       if (root->fs_info->closing)
+       if (btrfs_fs_closing(root->fs_info))
                return 0;
 
        if (BTRFS_I(inode)->in_defrag)
@@ -144,7 +144,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
        if (!defrag)
                return -ENOMEM;
 
-       defrag->ino = inode->i_ino;
+       defrag->ino = btrfs_ino(inode);
        defrag->transid = transid;
        defrag->root = root->root_key.objectid;
 
@@ -229,7 +229,7 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
                first_ino = defrag->ino + 1;
                rb_erase(&defrag->rb_node, &fs_info->defrag_inodes);
 
-               if (fs_info->closing)
+               if (btrfs_fs_closing(fs_info))
                        goto next_free;
 
                spin_unlock(&fs_info->defrag_inodes_lock);
@@ -1480,14 +1480,12 @@ int btrfs_sync_file(struct file *file, int datasync)
         * the current transaction, we can bail out now without any
         * syncing
         */
-       mutex_lock(&root->fs_info->trans_mutex);
+       smp_mb();
        if (BTRFS_I(inode)->last_trans <=
            root->fs_info->last_trans_committed) {
                BTRFS_I(inode)->last_trans = 0;
-               mutex_unlock(&root->fs_info->trans_mutex);
                goto out;
        }
-       mutex_unlock(&root->fs_info->trans_mutex);
 
        /*
         * ok we haven't committed the transaction yet, lets do a commit
index 70d45795d758e63cd6303327245e80aeffcfa7ae..9f985a429877fdd5d04100514f3ff4a5f2abeb18 100644 (file)
@@ -98,7 +98,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
                return inode;
 
        spin_lock(&block_group->lock);
-       if (!root->fs_info->closing) {
+       if (!btrfs_fs_closing(root->fs_info)) {
                block_group->inode = igrab(inode);
                block_group->iref = 1;
        }
@@ -250,7 +250,7 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
        pgoff_t index = 0;
        unsigned long first_page_offset;
        int num_checksums;
-       int ret = 0, ret2;
+       int ret = 0;
 
        INIT_LIST_HEAD(&bitmaps);
 
@@ -402,7 +402,14 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
                                spin_lock(&ctl->tree_lock);
                                ret = link_free_space(ctl, e);
                                spin_unlock(&ctl->tree_lock);
-                               BUG_ON(ret);
+                               if (ret) {
+                                       printk(KERN_ERR "Duplicate entries in "
+                                              "free space cache, dumping\n");
+                                       kunmap(page);
+                                       unlock_page(page);
+                                       page_cache_release(page);
+                                       goto free_cache;
+                               }
                        } else {
                                e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
                                if (!e->bitmap) {
@@ -414,10 +421,18 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
                                        goto free_cache;
                                }
                                spin_lock(&ctl->tree_lock);
-                               ret2 = link_free_space(ctl, e);
+                               ret = link_free_space(ctl, e);
                                ctl->total_bitmaps++;
                                ctl->op->recalc_thresholds(ctl);
                                spin_unlock(&ctl->tree_lock);
+                               if (ret) {
+                                       printk(KERN_ERR "Duplicate entries in "
+                                              "free space cache, dumping\n");
+                                       kunmap(page);
+                                       unlock_page(page);
+                                       page_cache_release(page);
+                                       goto free_cache;
+                               }
                                list_add_tail(&e->list, &bitmaps);
                        }
 
@@ -478,8 +493,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
         * If we're unmounting then just return, since this does a search on the
         * normal root and not the commit root and we could deadlock.
         */
-       smp_mb();
-       if (fs_info->closing)
+       if (btrfs_fs_closing(fs_info))
                return 0;
 
        /*
@@ -575,10 +589,25 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
 
        num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
                PAGE_CACHE_SHIFT;
+
+       /* Since the first page has all of our checksums and our generation we
+        * need to calculate the offset into the page that we can start writing
+        * our entries.
+        */
+       first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64);
+
        filemap_write_and_wait(inode->i_mapping);
        btrfs_wait_ordered_range(inode, inode->i_size &
                                 ~(root->sectorsize - 1), (u64)-1);
 
+       /* make sure we don't overflow that first page */
+       if (first_page_offset + sizeof(struct btrfs_free_space_entry) >= PAGE_CACHE_SIZE) {
+               /* this is really the same as running out of space, where we also return 0 */
+               printk(KERN_CRIT "Btrfs: free space cache was too big for the crc page\n");
+               ret = 0;
+               goto out_update;
+       }
+
        /* We need a checksum per page. */
        crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS);
        if (!crc)
@@ -590,12 +619,6 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
                return -1;
        }
 
-       /* Since the first page has all of our checksums and our generation we
-        * need to calculate the offset into the page that we can start writing
-        * our entries.
-        */
-       first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64);
-
        /* Get the cluster for this block_group if it exists */
        if (block_group && !list_empty(&block_group->cluster_list))
                cluster = list_entry(block_group->cluster_list.next,
@@ -857,12 +880,14 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
        ret = 1;
 
 out_free:
+       kfree(checksums);
+       kfree(pages);
+
+out_update:
        if (ret != 1) {
                invalidate_inode_pages2_range(inode->i_mapping, 0, index);
                BTRFS_I(inode)->generation = 0;
        }
-       kfree(checksums);
-       kfree(pages);
        btrfs_update_inode(trans, root, inode);
        return ret;
 }
@@ -963,10 +988,16 @@ static int tree_insert_offset(struct rb_root *root, u64 offset,
                         * logically.
                         */
                        if (bitmap) {
-                               WARN_ON(info->bitmap);
+                               if (info->bitmap) {
+                                       WARN_ON_ONCE(1);
+                                       return -EEXIST;
+                               }
                                p = &(*p)->rb_right;
                        } else {
-                               WARN_ON(!info->bitmap);
+                               if (!info->bitmap) {
+                                       WARN_ON_ONCE(1);
+                                       return -EEXIST;
+                               }
                                p = &(*p)->rb_left;
                        }
                }
@@ -1386,6 +1417,23 @@ again:
        return 0;
 }
 
+static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
+                              struct btrfs_free_space *info, u64 offset,
+                              u64 bytes)
+{
+       u64 bytes_to_set = 0;
+       u64 end;
+
+       end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
+
+       bytes_to_set = min(end - offset, bytes);
+
+       bitmap_set_bits(ctl, info, offset, bytes_to_set);
+
+       return bytes_to_set;
+
+}
+
 static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
                      struct btrfs_free_space *info)
 {
@@ -1422,12 +1470,18 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
        return true;
 }
 
+static struct btrfs_free_space_op free_space_op = {
+       .recalc_thresholds      = recalculate_thresholds,
+       .use_bitmap             = use_bitmap,
+};
+
 static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
                              struct btrfs_free_space *info)
 {
        struct btrfs_free_space *bitmap_info;
+       struct btrfs_block_group_cache *block_group = NULL;
        int added = 0;
-       u64 bytes, offset, end;
+       u64 bytes, offset, bytes_added;
        int ret;
 
        bytes = info->bytes;
@@ -1436,7 +1490,49 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
        if (!ctl->op->use_bitmap(ctl, info))
                return 0;
 
+       if (ctl->op == &free_space_op)
+               block_group = ctl->private;
 again:
+       /*
+        * Since we link bitmaps right into the cluster we need to see if we
+        * have a cluster here, and if so and it has our bitmap we need to add
+        * the free space to that bitmap.
+        */
+       if (block_group && !list_empty(&block_group->cluster_list)) {
+               struct btrfs_free_cluster *cluster;
+               struct rb_node *node;
+               struct btrfs_free_space *entry;
+
+               cluster = list_entry(block_group->cluster_list.next,
+                                    struct btrfs_free_cluster,
+                                    block_group_list);
+               spin_lock(&cluster->lock);
+               node = rb_first(&cluster->root);
+               if (!node) {
+                       spin_unlock(&cluster->lock);
+                       goto no_cluster_bitmap;
+               }
+
+               entry = rb_entry(node, struct btrfs_free_space, offset_index);
+               if (!entry->bitmap) {
+                       spin_unlock(&cluster->lock);
+                       goto no_cluster_bitmap;
+               }
+
+               if (entry->offset == offset_to_bitmap(ctl, offset)) {
+                       bytes_added = add_bytes_to_bitmap(ctl, entry,
+                                                         offset, bytes);
+                       bytes -= bytes_added;
+                       offset += bytes_added;
+               }
+               spin_unlock(&cluster->lock);
+               if (!bytes) {
+                       ret = 1;
+                       goto out;
+               }
+       }
+
+no_cluster_bitmap:
        bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
                                         1, 0);
        if (!bitmap_info) {
@@ -1444,19 +1540,10 @@ again:
                goto new_bitmap;
        }
 
-       end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
-
-       if (offset >= bitmap_info->offset && offset + bytes > end) {
-               bitmap_set_bits(ctl, bitmap_info, offset, end - offset);
-               bytes -= end - offset;
-               offset = end;
-               added = 0;
-       } else if (offset >= bitmap_info->offset && offset + bytes <= end) {
-               bitmap_set_bits(ctl, bitmap_info, offset, bytes);
-               bytes = 0;
-       } else {
-               BUG();
-       }
+       bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
+       bytes -= bytes_added;
+       offset += bytes_added;
+       added = 0;
 
        if (!bytes) {
                ret = 1;
@@ -1735,11 +1822,6 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
               "\n", count);
 }
 
-static struct btrfs_free_space_op free_space_op = {
-       .recalc_thresholds      = recalculate_thresholds,
-       .use_bitmap             = use_bitmap,
-};
-
 void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
 {
        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
@@ -2111,9 +2193,11 @@ again:
 /*
  * This searches the block group for just extents to fill the cluster with.
  */
-static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
-                                  struct btrfs_free_cluster *cluster,
-                                  u64 offset, u64 bytes, u64 min_bytes)
+static noinline int
+setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
+                       struct btrfs_free_cluster *cluster,
+                       struct list_head *bitmaps, u64 offset, u64 bytes,
+                       u64 min_bytes)
 {
        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *first = NULL;
@@ -2135,6 +2219,8 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
         * extent entry.
         */
        while (entry->bitmap) {
+               if (list_empty(&entry->list))
+                       list_add_tail(&entry->list, bitmaps);
                node = rb_next(&entry->offset_index);
                if (!node)
                        return -ENOSPC;
@@ -2154,8 +2240,12 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
                        return -ENOSPC;
                entry = rb_entry(node, struct btrfs_free_space, offset_index);
 
-               if (entry->bitmap)
+               if (entry->bitmap) {
+                       if (list_empty(&entry->list))
+                               list_add_tail(&entry->list, bitmaps);
                        continue;
+               }
+
                /*
                 * we haven't filled the empty size and the window is
                 * very large.  reset and try again
@@ -2207,9 +2297,11 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
  * This specifically looks for bitmaps that may work in the cluster, we assume
  * that we have already failed to find extents that will work.
  */
-static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
-                               struct btrfs_free_cluster *cluster,
-                               u64 offset, u64 bytes, u64 min_bytes)
+static noinline int
+setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
+                    struct btrfs_free_cluster *cluster,
+                    struct list_head *bitmaps, u64 offset, u64 bytes,
+                    u64 min_bytes)
 {
        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *entry;
@@ -2219,10 +2311,39 @@ static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
        if (ctl->total_bitmaps == 0)
                return -ENOSPC;
 
+       /*
+        * First check our cached list of bitmaps and see if there is an entry
+        * here that will work.
+        */
+       list_for_each_entry(entry, bitmaps, list) {
+               if (entry->bytes < min_bytes)
+                       continue;
+               ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
+                                          bytes, min_bytes);
+               if (!ret)
+                       return 0;
+       }
+
+       /*
+        * If we do have entries on our list and we are here then we didn't find
+        * anything, so go ahead and get the next entry after the last entry in
+        * this list and start the search from there.
+        */
+       if (!list_empty(bitmaps)) {
+               entry = list_entry(bitmaps->prev, struct btrfs_free_space,
+                                  list);
+               node = rb_next(&entry->offset_index);
+               if (!node)
+                       return -ENOSPC;
+               entry = rb_entry(node, struct btrfs_free_space, offset_index);
+               goto search;
+       }
+
        entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1);
        if (!entry)
                return -ENOSPC;
 
+search:
        node = &entry->offset_index;
        do {
                entry = rb_entry(node, struct btrfs_free_space, offset_index);
@@ -2253,6 +2374,8 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
                             u64 offset, u64 bytes, u64 empty_size)
 {
        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
+       struct list_head bitmaps;
+       struct btrfs_free_space *entry, *tmp;
        u64 min_bytes;
        int ret;
 
@@ -2291,11 +2414,16 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
                goto out;
        }
 
-       ret = setup_cluster_no_bitmap(block_group, cluster, offset, bytes,
-                                     min_bytes);
+       INIT_LIST_HEAD(&bitmaps);
+       ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
+                                     bytes, min_bytes);
        if (ret)
-               ret = setup_cluster_bitmap(block_group, cluster, offset,
-                                          bytes, min_bytes);
+               ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
+                                          offset, bytes, min_bytes);
+
+       /* Clear our temporary list */
+       list_for_each_entry_safe(entry, tmp, &bitmaps, list)
+               list_del_init(&entry->list);
 
        if (!ret) {
                atomic_inc(&block_group->count);
@@ -2481,7 +2609,7 @@ struct inode *lookup_free_ino_inode(struct btrfs_root *root,
                return inode;
 
        spin_lock(&root->cache_lock);
-       if (!root->fs_info->closing)
+       if (!btrfs_fs_closing(root->fs_info))
                root->cache_inode = igrab(inode);
        spin_unlock(&root->cache_lock);
 
@@ -2504,12 +2632,14 @@ int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
        int ret = 0;
        u64 root_gen = btrfs_root_generation(&root->root_item);
 
+       if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+               return 0;
+
        /*
         * If we're unmounting then just return, since this does a search on the
         * normal root and not the commit root and we could deadlock.
         */
-       smp_mb();
-       if (fs_info->closing)
+       if (btrfs_fs_closing(fs_info))
                return 0;
 
        path = btrfs_alloc_path();
@@ -2543,6 +2673,9 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
        struct inode *inode;
        int ret;
 
+       if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+               return 0;
+
        inode = lookup_free_ino_inode(root, path);
        if (IS_ERR(inode))
                return 0;
index 3262cd17a12f89192ce0c3e5394d5936ce003049..b4087e0fa8714bca45e5fc026caf3491e212dd14 100644 (file)
@@ -38,6 +38,9 @@ static int caching_kthread(void *data)
        int slot;
        int ret;
 
+       if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+               return 0;
+
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
@@ -59,8 +62,7 @@ again:
                goto out;
 
        while (1) {
-               smp_mb();
-               if (fs_info->closing)
+               if (btrfs_fs_closing(fs_info))
                        goto out;
 
                leaf = path->nodes[0];
@@ -141,6 +143,9 @@ static void start_caching(struct btrfs_root *root)
        int ret;
        u64 objectid;
 
+       if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+               return;
+
        spin_lock(&root->cache_lock);
        if (root->cached != BTRFS_CACHE_NO) {
                spin_unlock(&root->cache_lock);
@@ -178,6 +183,9 @@ static void start_caching(struct btrfs_root *root)
 
 int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
 {
+       if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+               return btrfs_find_free_objectid(root, objectid);
+
 again:
        *objectid = btrfs_find_ino_for_alloc(root);
 
@@ -201,6 +209,10 @@ void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
 {
        struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
        struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
+
+       if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+               return;
+
 again:
        if (root->cached == BTRFS_CACHE_FINISHED) {
                __btrfs_add_free_space(ctl, objectid, 1);
@@ -250,6 +262,9 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
        struct rb_node *n;
        u64 count;
 
+       if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+               return;
+
        while (1) {
                n = rb_first(rbroot);
                if (!n)
@@ -388,9 +403,24 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
        int prealloc;
        bool retry = false;
 
+       /* only fs tree and subvol/snap needs ino cache */
+       if (root->root_key.objectid != BTRFS_FS_TREE_OBJECTID &&
+           (root->root_key.objectid < BTRFS_FIRST_FREE_OBJECTID ||
+            root->root_key.objectid > BTRFS_LAST_FREE_OBJECTID))
+               return 0;
+
+       /* Don't save inode cache if we are deleting this root */
+       if (btrfs_root_refs(&root->root_item) == 0 &&
+           root != root->fs_info->tree_root)
+               return 0;
+
+       if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+               return 0;
+
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
+
 again:
        inode = lookup_free_ino_inode(root, path);
        if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
index 39a9d5750efdaf9401f414cc4e6824ecf2bc301b..751ddf8fc58a152fd442979cb758637cbcf01bc0 100644 (file)
@@ -138,7 +138,6 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
                return -ENOMEM;
 
        path->leave_spinning = 1;
-       btrfs_set_trans_block_group(trans, inode);
 
        key.objectid = btrfs_ino(inode);
        key.offset = start;
@@ -426,9 +425,8 @@ again:
                }
        }
        if (start == 0) {
-               trans = btrfs_join_transaction(root, 1);
+               trans = btrfs_join_transaction(root);
                BUG_ON(IS_ERR(trans));
-               btrfs_set_trans_block_group(trans, inode);
                trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 
                /* lets try to make an inline extent */
@@ -623,8 +621,9 @@ retry:
                            async_extent->start + async_extent->ram_size - 1,
                            GFP_NOFS);
 
-               trans = btrfs_join_transaction(root, 1);
+               trans = btrfs_join_transaction(root);
                BUG_ON(IS_ERR(trans));
+               trans->block_rsv = &root->fs_info->delalloc_block_rsv;
                ret = btrfs_reserve_extent(trans, root,
                                           async_extent->compressed_size,
                                           async_extent->compressed_size,
@@ -793,9 +792,8 @@ static noinline int cow_file_range(struct inode *inode,
        int ret = 0;
 
        BUG_ON(is_free_space_inode(root, inode));
-       trans = btrfs_join_transaction(root, 1);
+       trans = btrfs_join_transaction(root);
        BUG_ON(IS_ERR(trans));
-       btrfs_set_trans_block_group(trans, inode);
        trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 
        num_bytes = (end - start + blocksize) & ~(blocksize - 1);
@@ -1077,10 +1075,12 @@ static noinline int run_delalloc_nocow(struct inode *inode,
        nolock = is_free_space_inode(root, inode);
 
        if (nolock)
-               trans = btrfs_join_transaction_nolock(root, 1);
+               trans = btrfs_join_transaction_nolock(root);
        else
-               trans = btrfs_join_transaction(root, 1);
+               trans = btrfs_join_transaction(root);
+
        BUG_ON(IS_ERR(trans));
+       trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 
        cow_start = (u64)-1;
        cur_offset = start;
@@ -1519,8 +1519,6 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
 {
        struct btrfs_ordered_sum *sum;
 
-       btrfs_set_trans_block_group(trans, inode);
-
        list_for_each_entry(sum, list, list) {
                btrfs_csum_file_blocks(trans,
                       BTRFS_I(inode)->root->fs_info->csum_root, sum);
@@ -1735,11 +1733,10 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
                ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
                if (!ret) {
                        if (nolock)
-                               trans = btrfs_join_transaction_nolock(root, 1);
+                               trans = btrfs_join_transaction_nolock(root);
                        else
-                               trans = btrfs_join_transaction(root, 1);
+                               trans = btrfs_join_transaction(root);
                        BUG_ON(IS_ERR(trans));
-                       btrfs_set_trans_block_group(trans, inode);
                        trans->block_rsv = &root->fs_info->delalloc_block_rsv;
                        ret = btrfs_update_inode(trans, root, inode);
                        BUG_ON(ret);
@@ -1752,11 +1749,10 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
                         0, &cached_state, GFP_NOFS);
 
        if (nolock)
-               trans = btrfs_join_transaction_nolock(root, 1);
+               trans = btrfs_join_transaction_nolock(root);
        else
-               trans = btrfs_join_transaction(root, 1);
+               trans = btrfs_join_transaction(root);
        BUG_ON(IS_ERR(trans));
-       btrfs_set_trans_block_group(trans, inode);
        trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 
        if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
@@ -1990,7 +1986,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
        }
 
        if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
-               return 0;
+               goto good;
 
        if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
            test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
@@ -2431,7 +2427,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
                                        (u64)-1);
 
        if (root->orphan_block_rsv || root->orphan_item_inserted) {
-               trans = btrfs_join_transaction(root, 1);
+               trans = btrfs_join_transaction(root);
                if (!IS_ERR(trans))
                        btrfs_end_transaction(trans, root);
        }
@@ -2511,12 +2507,12 @@ static void btrfs_read_locked_inode(struct inode *inode)
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_key location;
        int maybe_acls;
-       u64 alloc_group_block;
        u32 rdev;
        int ret;
 
        path = btrfs_alloc_path();
        BUG_ON(!path);
+       path->leave_spinning = 1;
        memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
 
        ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
@@ -2526,6 +2522,12 @@ static void btrfs_read_locked_inode(struct inode *inode)
        leaf = path->nodes[0];
        inode_item = btrfs_item_ptr(leaf, path->slots[0],
                                    struct btrfs_inode_item);
+       if (!leaf->map_token)
+               map_private_extent_buffer(leaf, (unsigned long)inode_item,
+                                         sizeof(struct btrfs_inode_item),
+                                         &leaf->map_token, &leaf->kaddr,
+                                         &leaf->map_start, &leaf->map_len,
+                                         KM_USER1);
 
        inode->i_mode = btrfs_inode_mode(leaf, inode_item);
        inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
@@ -2555,8 +2557,6 @@ static void btrfs_read_locked_inode(struct inode *inode)
        BTRFS_I(inode)->index_cnt = (u64)-1;
        BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
 
-       alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
-
        /*
         * try to precache a NULL acl entry for files that don't have
         * any xattrs or acls
@@ -2566,8 +2566,11 @@ static void btrfs_read_locked_inode(struct inode *inode)
        if (!maybe_acls)
                cache_no_acl(inode);
 
-       BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
-                                               alloc_group_block, 0);
+       if (leaf->map_token) {
+               unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
+               leaf->map_token = NULL;
+       }
+
        btrfs_free_path(path);
        inode_item = NULL;
 
@@ -2647,7 +2650,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
        btrfs_set_inode_transid(leaf, item, trans->transid);
        btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
        btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
-       btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
+       btrfs_set_inode_block_group(leaf, item, 0);
 
        if (leaf->map_token) {
                unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
@@ -3004,8 +3007,6 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
        if (IS_ERR(trans))
                return PTR_ERR(trans);
 
-       btrfs_set_trans_block_group(trans, dir);
-
        btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
 
        ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
@@ -3094,8 +3095,6 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
        if (IS_ERR(trans))
                return PTR_ERR(trans);
 
-       btrfs_set_trans_block_group(trans, dir);
-
        if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
                err = btrfs_unlink_subvol(trans, root, dir,
                                          BTRFS_I(inode)->location.objectid,
@@ -3514,7 +3513,6 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
                                err = PTR_ERR(trans);
                                break;
                        }
-                       btrfs_set_trans_block_group(trans, inode);
 
                        err = btrfs_drop_extents(trans, inode, cur_offset,
                                                 cur_offset + hole_size,
@@ -3648,9 +3646,8 @@ void btrfs_evict_inode(struct inode *inode)
        btrfs_i_size_write(inode, 0);
 
        while (1) {
-               trans = btrfs_start_transaction(root, 0);
+               trans = btrfs_join_transaction(root);
                BUG_ON(IS_ERR(trans));
-               btrfs_set_trans_block_group(trans, inode);
                trans->block_rsv = root->orphan_block_rsv;
 
                ret = btrfs_block_rsv_check(trans, root,
@@ -4133,7 +4130,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
-       path->reada = 2;
+
+       path->reada = 1;
 
        if (key_type == BTRFS_DIR_INDEX_KEY) {
                INIT_LIST_HEAD(&ins_list);
@@ -4268,18 +4266,16 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
        if (BTRFS_I(inode)->dummy_inode)
                return 0;
 
-       smp_mb();
-       if (root->fs_info->closing && is_free_space_inode(root, inode))
+       if (btrfs_fs_closing(root->fs_info) && is_free_space_inode(root, inode))
                nolock = true;
 
        if (wbc->sync_mode == WB_SYNC_ALL) {
                if (nolock)
-                       trans = btrfs_join_transaction_nolock(root, 1);
+                       trans = btrfs_join_transaction_nolock(root);
                else
-                       trans = btrfs_join_transaction(root, 1);
+                       trans = btrfs_join_transaction(root);
                if (IS_ERR(trans))
                        return PTR_ERR(trans);
-               btrfs_set_trans_block_group(trans, inode);
                if (nolock)
                        ret = btrfs_end_transaction_nolock(trans, root);
                else
@@ -4303,9 +4299,8 @@ void btrfs_dirty_inode(struct inode *inode, int flags)
        if (BTRFS_I(inode)->dummy_inode)
                return;
 
-       trans = btrfs_join_transaction(root, 1);
+       trans = btrfs_join_transaction(root);
        BUG_ON(IS_ERR(trans));
-       btrfs_set_trans_block_group(trans, inode);
 
        ret = btrfs_update_inode(trans, root, inode);
        if (ret && ret == -ENOSPC) {
@@ -4319,7 +4314,6 @@ void btrfs_dirty_inode(struct inode *inode, int flags)
                                       PTR_ERR(trans));
                        return;
                }
-               btrfs_set_trans_block_group(trans, inode);
 
                ret = btrfs_update_inode(trans, root, inode);
                if (ret) {
@@ -4418,8 +4412,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
                                     struct btrfs_root *root,
                                     struct inode *dir,
                                     const char *name, int name_len,
-                                    u64 ref_objectid, u64 objectid,
-                                    u64 alloc_hint, int mode, u64 *index)
+                                    u64 ref_objectid, u64 objectid, int mode,
+                                    u64 *index)
 {
        struct inode *inode;
        struct btrfs_inode_item *inode_item;
@@ -4472,8 +4466,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
                owner = 0;
        else
                owner = 1;
-       BTRFS_I(inode)->block_group =
-                       btrfs_find_block_group(root, 0, alloc_hint, owner);
 
        key[0].objectid = objectid;
        btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
@@ -4629,15 +4621,13 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
        if (IS_ERR(trans))
                return PTR_ERR(trans);
 
-       btrfs_set_trans_block_group(trans, dir);
-
        err = btrfs_find_free_ino(root, &objectid);
        if (err)
                goto out_unlock;
 
        inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
                                dentry->d_name.len, btrfs_ino(dir), objectid,
-                               BTRFS_I(dir)->block_group, mode, &index);
+                               mode, &index);
        if (IS_ERR(inode)) {
                err = PTR_ERR(inode);
                goto out_unlock;
@@ -4649,7 +4639,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
                goto out_unlock;
        }
 
-       btrfs_set_trans_block_group(trans, inode);
        err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
        if (err)
                drop_inode = 1;
@@ -4658,8 +4647,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
                init_special_inode(inode, inode->i_mode, rdev);
                btrfs_update_inode(trans, root, inode);
        }
-       btrfs_update_inode_block_group(trans, inode);
-       btrfs_update_inode_block_group(trans, dir);
 out_unlock:
        nr = trans->blocks_used;
        btrfs_end_transaction_throttle(trans, root);
@@ -4692,15 +4679,13 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
        if (IS_ERR(trans))
                return PTR_ERR(trans);
 
-       btrfs_set_trans_block_group(trans, dir);
-
        err = btrfs_find_free_ino(root, &objectid);
        if (err)
                goto out_unlock;
 
        inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
                                dentry->d_name.len, btrfs_ino(dir), objectid,
-                               BTRFS_I(dir)->block_group, mode, &index);
+                               mode, &index);
        if (IS_ERR(inode)) {
                err = PTR_ERR(inode);
                goto out_unlock;
@@ -4712,7 +4697,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
                goto out_unlock;
        }
 
-       btrfs_set_trans_block_group(trans, inode);
        err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
        if (err)
                drop_inode = 1;
@@ -4723,8 +4707,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
                inode->i_op = &btrfs_file_inode_operations;
                BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
        }
-       btrfs_update_inode_block_group(trans, inode);
-       btrfs_update_inode_block_group(trans, dir);
 out_unlock:
        nr = trans->blocks_used;
        btrfs_end_transaction_throttle(trans, root);
@@ -4771,8 +4753,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
 
        btrfs_inc_nlink(inode);
        inode->i_ctime = CURRENT_TIME;
-
-       btrfs_set_trans_block_group(trans, dir);
        ihold(inode);
 
        err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
@@ -4781,7 +4761,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
                drop_inode = 1;
        } else {
                struct dentry *parent = dget_parent(dentry);
-               btrfs_update_inode_block_group(trans, dir);
                err = btrfs_update_inode(trans, root, inode);
                BUG_ON(err);
                btrfs_log_new_name(trans, inode, NULL, parent);
@@ -4818,7 +4797,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
        trans = btrfs_start_transaction(root, 5);
        if (IS_ERR(trans))
                return PTR_ERR(trans);
-       btrfs_set_trans_block_group(trans, dir);
 
        err = btrfs_find_free_ino(root, &objectid);
        if (err)
@@ -4826,8 +4804,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
 
        inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
                                dentry->d_name.len, btrfs_ino(dir), objectid,
-                               BTRFS_I(dir)->block_group, S_IFDIR | mode,
-                               &index);
+                               S_IFDIR | mode, &index);
        if (IS_ERR(inode)) {
                err = PTR_ERR(inode);
                goto out_fail;
@@ -4841,7 +4818,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
 
        inode->i_op = &btrfs_dir_inode_operations;
        inode->i_fop = &btrfs_dir_file_operations;
-       btrfs_set_trans_block_group(trans, inode);
 
        btrfs_i_size_write(inode, 0);
        err = btrfs_update_inode(trans, root, inode);
@@ -4855,8 +4831,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
 
        d_instantiate(dentry, inode);
        drop_on_err = 0;
-       btrfs_update_inode_block_group(trans, inode);
-       btrfs_update_inode_block_group(trans, dir);
 
 out_fail:
        nr = trans->blocks_used;
@@ -4989,7 +4963,15 @@ again:
 
        if (!path) {
                path = btrfs_alloc_path();
-               BUG_ON(!path);
+               if (!path) {
+                       err = -ENOMEM;
+                       goto out;
+               }
+               /*
+                * Chances are we'll be called again, so go ahead and do
+                * readahead
+                */
+               path->reada = 1;
        }
 
        ret = btrfs_lookup_file_extent(trans, root, path,
@@ -5130,8 +5112,10 @@ again:
                                kunmap(page);
                                free_extent_map(em);
                                em = NULL;
+
                                btrfs_release_path(path);
-                               trans = btrfs_join_transaction(root, 1);
+                               trans = btrfs_join_transaction(root);
+
                                if (IS_ERR(trans))
                                        return ERR_CAST(trans);
                                goto again;
@@ -5375,7 +5359,7 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
                btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
        }
 
-       trans = btrfs_join_transaction(root, 0);
+       trans = btrfs_join_transaction(root);
        if (IS_ERR(trans))
                return ERR_CAST(trans);
 
@@ -5611,7 +5595,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
                 * to make sure the current transaction stays open
                 * while we look for nocow cross refs
                 */
-               trans = btrfs_join_transaction(root, 0);
+               trans = btrfs_join_transaction(root);
                if (IS_ERR(trans))
                        goto must_cow;
 
@@ -5750,7 +5734,7 @@ again:
 
        BUG_ON(!ordered);
 
-       trans = btrfs_join_transaction(root, 1);
+       trans = btrfs_join_transaction(root);
        if (IS_ERR(trans)) {
                err = -ENOMEM;
                goto out;
@@ -6500,6 +6484,7 @@ out:
 static int btrfs_truncate(struct inode *inode)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
+       struct btrfs_block_rsv *rsv;
        int ret;
        int err = 0;
        struct btrfs_trans_handle *trans;
@@ -6513,28 +6498,80 @@ static int btrfs_truncate(struct inode *inode)
        btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
        btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
 
-       trans = btrfs_start_transaction(root, 5);
-       if (IS_ERR(trans))
-               return PTR_ERR(trans);
+       /*
+        * Yes ladies and gentelment, this is indeed ugly.  The fact is we have
+        * 3 things going on here
+        *
+        * 1) We need to reserve space for our orphan item and the space to
+        * delete our orphan item.  Lord knows we don't want to have a dangling
+        * orphan item because we didn't reserve space to remove it.
+        *
+        * 2) We need to reserve space to update our inode.
+        *
+        * 3) We need to have something to cache all the space that is going to
+        * be free'd up by the truncate operation, but also have some slack
+        * space reserved in case it uses space during the truncate (thank you
+        * very much snapshotting).
+        *
+        * And we need these to all be seperate.  The fact is we can use alot of
+        * space doing the truncate, and we have no earthly idea how much space
+        * we will use, so we need the truncate reservation to be seperate so it
+        * doesn't end up using space reserved for updating the inode or
+        * removing the orphan item.  We also need to be able to stop the
+        * transaction and start a new one, which means we need to be able to
+        * update the inode several times, and we have no idea of knowing how
+        * many times that will be, so we can't just reserve 1 item for the
+        * entirety of the opration, so that has to be done seperately as well.
+        * Then there is the orphan item, which does indeed need to be held on
+        * to for the whole operation, and we need nobody to touch this reserved
+        * space except the orphan code.
+        *
+        * So that leaves us with
+        *
+        * 1) root->orphan_block_rsv - for the orphan deletion.
+        * 2) rsv - for the truncate reservation, which we will steal from the
+        * transaction reservation.
+        * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
+        * updating the inode.
+        */
+       rsv = btrfs_alloc_block_rsv(root);
+       if (!rsv)
+               return -ENOMEM;
+       btrfs_add_durable_block_rsv(root->fs_info, rsv);
+
+       trans = btrfs_start_transaction(root, 4);
+       if (IS_ERR(trans)) {
+               err = PTR_ERR(trans);
+               goto out;
+       }
 
-       btrfs_set_trans_block_group(trans, inode);
+       /*
+        * Reserve space for the truncate process.  Truncate should be adding
+        * space, but if there are snapshots it may end up using space.
+        */
+       ret = btrfs_truncate_reserve_metadata(trans, root, rsv);
+       BUG_ON(ret);
 
        ret = btrfs_orphan_add(trans, inode);
        if (ret) {
                btrfs_end_transaction(trans, root);
-               return ret;
+               goto out;
        }
 
        nr = trans->blocks_used;
        btrfs_end_transaction(trans, root);
        btrfs_btree_balance_dirty(root, nr);
 
-       /* Now start a transaction for the truncate */
-       trans = btrfs_start_transaction(root, 0);
-       if (IS_ERR(trans))
-               return PTR_ERR(trans);
-       btrfs_set_trans_block_group(trans, inode);
-       trans->block_rsv = root->orphan_block_rsv;
+       /*
+        * Ok so we've already migrated our bytes over for the truncate, so here
+        * just reserve the one slot we need for updating the inode.
+        */
+       trans = btrfs_start_transaction(root, 1);
+       if (IS_ERR(trans)) {
+               err = PTR_ERR(trans);
+               goto out;
+       }
+       trans->block_rsv = rsv;
 
        /*
         * setattr is responsible for setting the ordered_data_close flag,
@@ -6558,24 +6595,17 @@ static int btrfs_truncate(struct inode *inode)
 
        while (1) {
                if (!trans) {
-                       trans = btrfs_start_transaction(root, 0);
-                       if (IS_ERR(trans))
-                               return PTR_ERR(trans);
-                       btrfs_set_trans_block_group(trans, inode);
-                       trans->block_rsv = root->orphan_block_rsv;
-               }
+                       trans = btrfs_start_transaction(root, 3);
+                       if (IS_ERR(trans)) {
+                               err = PTR_ERR(trans);
+                               goto out;
+                       }
 
-               ret = btrfs_block_rsv_check(trans, root,
-                                           root->orphan_block_rsv, 0, 5);
-               if (ret == -EAGAIN) {
-                       ret = btrfs_commit_transaction(trans, root);
-                       if (ret)
-                               return ret;
-                       trans = NULL;
-                       continue;
-               } else if (ret) {
-                       err = ret;
-                       break;
+                       ret = btrfs_truncate_reserve_metadata(trans, root,
+                                                             rsv);
+                       BUG_ON(ret);
+
+                       trans->block_rsv = rsv;
                }
 
                ret = btrfs_truncate_inode_items(trans, root, inode,
@@ -6586,6 +6616,7 @@ static int btrfs_truncate(struct inode *inode)
                        break;
                }
 
+               trans->block_rsv = &root->fs_info->trans_block_rsv;
                ret = btrfs_update_inode(trans, root, inode);
                if (ret) {
                        err = ret;
@@ -6599,6 +6630,7 @@ static int btrfs_truncate(struct inode *inode)
        }
 
        if (ret == 0 && inode->i_nlink > 0) {
+               trans->block_rsv = root->orphan_block_rsv;
                ret = btrfs_orphan_del(trans, inode);
                if (ret)
                        err = ret;
@@ -6610,15 +6642,20 @@ static int btrfs_truncate(struct inode *inode)
                ret = btrfs_orphan_del(NULL, inode);
        }
 
+       trans->block_rsv = &root->fs_info->trans_block_rsv;
        ret = btrfs_update_inode(trans, root, inode);
        if (ret && !err)
                err = ret;
 
        nr = trans->blocks_used;
        ret = btrfs_end_transaction_throttle(trans, root);
+       btrfs_btree_balance_dirty(root, nr);
+
+out:
+       btrfs_free_block_rsv(root, rsv);
+
        if (ret && !err)
                err = ret;
-       btrfs_btree_balance_dirty(root, nr);
 
        return err;
 }
@@ -6627,15 +6664,14 @@ static int btrfs_truncate(struct inode *inode)
  * create a new subvolume directory/inode (helper for the ioctl).
  */
 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
-                            struct btrfs_root *new_root,
-                            u64 new_dirid, u64 alloc_hint)
+                            struct btrfs_root *new_root, u64 new_dirid)
 {
        struct inode *inode;
        int err;
        u64 index = 0;
 
        inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
-                               new_dirid, alloc_hint, S_IFDIR | 0700, &index);
+                               new_dirid, S_IFDIR | 0700, &index);
        if (IS_ERR(inode))
                return PTR_ERR(inode);
        inode->i_op = &btrfs_dir_inode_operations;
@@ -6748,21 +6784,6 @@ void btrfs_destroy_inode(struct inode *inode)
                spin_unlock(&root->fs_info->ordered_extent_lock);
        }
 
-       if (root == root->fs_info->tree_root) {
-               struct btrfs_block_group_cache *block_group;
-
-               block_group = btrfs_lookup_block_group(root->fs_info,
-                                               BTRFS_I(inode)->block_group);
-               if (block_group && block_group->inode == inode) {
-                       spin_lock(&block_group->lock);
-                       block_group->inode = NULL;
-                       spin_unlock(&block_group->lock);
-                       btrfs_put_block_group(block_group);
-               } else if (block_group) {
-                       btrfs_put_block_group(block_group);
-               }
-       }
-
        spin_lock(&root->orphan_lock);
        if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
                printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n",
@@ -6948,8 +6969,6 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                 goto out_notrans;
         }
 
-       btrfs_set_trans_block_group(trans, new_dir);
-
        if (dest != root)
                btrfs_record_root_in_trans(trans, dest);
 
@@ -7131,16 +7150,13 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
        if (IS_ERR(trans))
                return PTR_ERR(trans);
 
-       btrfs_set_trans_block_group(trans, dir);
-
        err = btrfs_find_free_ino(root, &objectid);
        if (err)
                goto out_unlock;
 
        inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
                                dentry->d_name.len, btrfs_ino(dir), objectid,
-                               BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
-                               &index);
+                               S_IFLNK|S_IRWXUGO, &index);
        if (IS_ERR(inode)) {
                err = PTR_ERR(inode);
                goto out_unlock;
@@ -7152,7 +7168,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
                goto out_unlock;
        }
 
-       btrfs_set_trans_block_group(trans, inode);
        err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
        if (err)
                drop_inode = 1;
@@ -7163,8 +7178,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
                inode->i_op = &btrfs_file_inode_operations;
                BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
        }
-       btrfs_update_inode_block_group(trans, inode);
-       btrfs_update_inode_block_group(trans, dir);
        if (drop_inode)
                goto out_unlock;
 
index 85e818ce00c5df0be84c6ce3c1894ab6c5303615..b793d112d1f65c80b95e06be70233cd5f5aa2654 100644 (file)
@@ -243,7 +243,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
                ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
        }
 
-       trans = btrfs_join_transaction(root, 1);
+       trans = btrfs_join_transaction(root);
        BUG_ON(IS_ERR(trans));
 
        ret = btrfs_update_inode(trans, root, inode);
@@ -414,8 +414,7 @@ static noinline int create_subvol(struct btrfs_root *root,
 
        btrfs_record_root_in_trans(trans, new_root);
 
-       ret = btrfs_create_subvol_root(trans, new_root, new_dirid,
-                                      BTRFS_I(dir)->block_group);
+       ret = btrfs_create_subvol_root(trans, new_root, new_dirid);
        /*
         * insert the directory item
         */
@@ -707,16 +706,17 @@ static int find_new_extents(struct btrfs_root *root,
        struct btrfs_file_extent_item *extent;
        int type;
        int ret;
+       u64 ino = btrfs_ino(inode);
 
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
 
-       min_key.objectid = inode->i_ino;
+       min_key.objectid = ino;
        min_key.type = BTRFS_EXTENT_DATA_KEY;
        min_key.offset = *off;
 
-       max_key.objectid = inode->i_ino;
+       max_key.objectid = ino;
        max_key.type = (u8)-1;
        max_key.offset = (u64)-1;
 
@@ -727,7 +727,7 @@ static int find_new_extents(struct btrfs_root *root,
                                           path, 0, newer_than);
                if (ret != 0)
                        goto none;
-               if (min_key.objectid != inode->i_ino)
+               if (min_key.objectid != ino)
                        goto none;
                if (min_key.type != BTRFS_EXTENT_DATA_KEY)
                        goto none;
@@ -2054,29 +2054,34 @@ static long btrfs_ioctl_rm_dev(struct btrfs_root *root, void __user *arg)
 
 static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg)
 {
-       struct btrfs_ioctl_fs_info_args fi_args;
+       struct btrfs_ioctl_fs_info_args *fi_args;
        struct btrfs_device *device;
        struct btrfs_device *next;
        struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+       int ret = 0;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
-       fi_args.num_devices = fs_devices->num_devices;
-       fi_args.max_id = 0;
-       memcpy(&fi_args.fsid, root->fs_info->fsid, sizeof(fi_args.fsid));
+       fi_args = kzalloc(sizeof(*fi_args), GFP_KERNEL);
+       if (!fi_args)
+               return -ENOMEM;
+
+       fi_args->num_devices = fs_devices->num_devices;
+       memcpy(&fi_args->fsid, root->fs_info->fsid, sizeof(fi_args->fsid));
 
        mutex_lock(&fs_devices->device_list_mutex);
        list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
-               if (device->devid > fi_args.max_id)
-                       fi_args.max_id = device->devid;
+               if (device->devid > fi_args->max_id)
+                       fi_args->max_id = device->devid;
        }
        mutex_unlock(&fs_devices->device_list_mutex);
 
-       if (copy_to_user(arg, &fi_args, sizeof(fi_args)))
-               return -EFAULT;
+       if (copy_to_user(arg, fi_args, sizeof(*fi_args)))
+               ret = -EFAULT;
 
-       return 0;
+       kfree(fi_args);
+       return ret;
 }
 
 static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg)
@@ -2489,12 +2494,10 @@ static long btrfs_ioctl_trans_start(struct file *file)
        if (ret)
                goto out;
 
-       mutex_lock(&root->fs_info->trans_mutex);
-       root->fs_info->open_ioctl_trans++;
-       mutex_unlock(&root->fs_info->trans_mutex);
+       atomic_inc(&root->fs_info->open_ioctl_trans);
 
        ret = -ENOMEM;
-       trans = btrfs_start_ioctl_transaction(root, 0);
+       trans = btrfs_start_ioctl_transaction(root);
        if (IS_ERR(trans))
                goto out_drop;
 
@@ -2502,9 +2505,7 @@ static long btrfs_ioctl_trans_start(struct file *file)
        return 0;
 
 out_drop:
-       mutex_lock(&root->fs_info->trans_mutex);
-       root->fs_info->open_ioctl_trans--;
-       mutex_unlock(&root->fs_info->trans_mutex);
+       atomic_dec(&root->fs_info->open_ioctl_trans);
        mnt_drop_write(file->f_path.mnt);
 out:
        return ret;
@@ -2738,9 +2739,7 @@ long btrfs_ioctl_trans_end(struct file *file)
 
        btrfs_end_transaction(trans, root);
 
-       mutex_lock(&root->fs_info->trans_mutex);
-       root->fs_info->open_ioctl_trans--;
-       mutex_unlock(&root->fs_info->trans_mutex);
+       atomic_dec(&root->fs_info->open_ioctl_trans);
 
        mnt_drop_write(file->f_path.mnt);
        return 0;
index ca38eca70af0b3b552c60cc8615b70925c575635..b1ef27cc673b8abc9e135b05e0da24ff7a0c7bb2 100644 (file)
@@ -677,6 +677,8 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
                err = -ENOMEM;
                goto out;
        }
+       path1->reada = 1;
+       path2->reada = 2;
 
        node = alloc_backref_node(cache);
        if (!node) {
@@ -1999,6 +2001,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
+       path->reada = 1;
 
        reloc_root = root->reloc_root;
        root_item = &reloc_root->root_item;
@@ -2139,10 +2142,10 @@ int prepare_to_merge(struct reloc_control *rc, int err)
        u64 num_bytes = 0;
        int ret;
 
-       mutex_lock(&root->fs_info->trans_mutex);
+       spin_lock(&root->fs_info->trans_lock);
        rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
        rc->merging_rsv_size += rc->nodes_relocated * 2;
-       mutex_unlock(&root->fs_info->trans_mutex);
+       spin_unlock(&root->fs_info->trans_lock);
 again:
        if (!err) {
                num_bytes = rc->merging_rsv_size;
@@ -2152,7 +2155,7 @@ again:
                        err = ret;
        }
 
-       trans = btrfs_join_transaction(rc->extent_root, 1);
+       trans = btrfs_join_transaction(rc->extent_root);
        if (IS_ERR(trans)) {
                if (!err)
                        btrfs_block_rsv_release(rc->extent_root,
@@ -2211,9 +2214,9 @@ int merge_reloc_roots(struct reloc_control *rc)
        int ret;
 again:
        root = rc->extent_root;
-       mutex_lock(&root->fs_info->trans_mutex);
+       spin_lock(&root->fs_info->trans_lock);
        list_splice_init(&rc->reloc_roots, &reloc_roots);
-       mutex_unlock(&root->fs_info->trans_mutex);
+       spin_unlock(&root->fs_info->trans_lock);
 
        while (!list_empty(&reloc_roots)) {
                found = 1;
@@ -3236,7 +3239,7 @@ truncate:
                goto out;
        }
 
-       trans = btrfs_join_transaction(root, 0);
+       trans = btrfs_join_transaction(root);
        if (IS_ERR(trans)) {
                btrfs_free_path(path);
                ret = PTR_ERR(trans);
@@ -3300,6 +3303,7 @@ static int find_data_references(struct reloc_control *rc,
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
+       path->reada = 1;
 
        root = read_fs_root(rc->extent_root->fs_info, ref_root);
        if (IS_ERR(root)) {
@@ -3586,17 +3590,17 @@ next:
 static void set_reloc_control(struct reloc_control *rc)
 {
        struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
-       mutex_lock(&fs_info->trans_mutex);
+       spin_lock(&fs_info->trans_lock);
        fs_info->reloc_ctl = rc;
-       mutex_unlock(&fs_info->trans_mutex);
+       spin_unlock(&fs_info->trans_lock);
 }
 
 static void unset_reloc_control(struct reloc_control *rc)
 {
        struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
-       mutex_lock(&fs_info->trans_mutex);
+       spin_lock(&fs_info->trans_lock);
        fs_info->reloc_ctl = NULL;
-       mutex_unlock(&fs_info->trans_mutex);
+       spin_unlock(&fs_info->trans_lock);
 }
 
 static int check_extent_flags(u64 flags)
@@ -3645,7 +3649,7 @@ int prepare_to_relocate(struct reloc_control *rc)
        rc->create_reloc_tree = 1;
        set_reloc_control(rc);
 
-       trans = btrfs_join_transaction(rc->extent_root, 1);
+       trans = btrfs_join_transaction(rc->extent_root);
        BUG_ON(IS_ERR(trans));
        btrfs_commit_transaction(trans, rc->extent_root);
        return 0;
@@ -3668,6 +3672,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
+       path->reada = 1;
 
        ret = prepare_to_relocate(rc);
        if (ret) {
@@ -3834,7 +3839,7 @@ restart:
        btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1);
 
        /* get rid of pinned extents */
-       trans = btrfs_join_transaction(rc->extent_root, 1);
+       trans = btrfs_join_transaction(rc->extent_root);
        if (IS_ERR(trans))
                err = PTR_ERR(trans);
        else
@@ -4093,6 +4098,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
+       path->reada = -1;
 
        key.objectid = BTRFS_TREE_RELOC_OBJECTID;
        key.type = BTRFS_ROOT_ITEM_KEY;
@@ -4159,7 +4165,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
 
        set_reloc_control(rc);
 
-       trans = btrfs_join_transaction(rc->extent_root, 1);
+       trans = btrfs_join_transaction(rc->extent_root);
        if (IS_ERR(trans)) {
                unset_reloc_control(rc);
                err = PTR_ERR(trans);
@@ -4193,7 +4199,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
 
        unset_reloc_control(rc);
 
-       trans = btrfs_join_transaction(rc->extent_root, 1);
+       trans = btrfs_join_transaction(rc->extent_root);
        if (IS_ERR(trans))
                err = PTR_ERR(trans);
        else
index 6dfed0c27ac3b64d337b81b2a6fe374d8fd44afe..a8d03d5efb5df3b3a8d4f6fa659909e571a69a7c 100644 (file)
  * Boston, MA 021110-1307, USA.
  */
 
-#include <linux/sched.h>
-#include <linux/pagemap.h>
-#include <linux/writeback.h>
 #include <linux/blkdev.h>
-#include <linux/rbtree.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
 #include "ctree.h"
 #include "volumes.h"
 #include "disk-io.h"
@@ -117,33 +111,37 @@ static void scrub_free_csums(struct scrub_dev *sdev)
        }
 }
 
+static void scrub_free_bio(struct bio *bio)
+{
+       int i;
+       struct page *last_page = NULL;
+
+       if (!bio)
+               return;
+
+       for (i = 0; i < bio->bi_vcnt; ++i) {
+               if (bio->bi_io_vec[i].bv_page == last_page)
+                       continue;
+               last_page = bio->bi_io_vec[i].bv_page;
+               __free_page(last_page);
+       }
+       bio_put(bio);
+}
+
 static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev)
 {
        int i;
-       int j;
-       struct page *last_page;
 
        if (!sdev)
                return;
 
        for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
                struct scrub_bio *sbio = sdev->bios[i];
-               struct bio *bio;
 
                if (!sbio)
                        break;
 
-               bio = sbio->bio;
-               if (bio) {
-                       last_page = NULL;
-                       for (j = 0; j < bio->bi_vcnt; ++j) {
-                               if (bio->bi_io_vec[j].bv_page == last_page)
-                                       continue;
-                               last_page = bio->bi_io_vec[j].bv_page;
-                               __free_page(last_page);
-                       }
-                       bio_put(bio);
-               }
+               scrub_free_bio(sbio->bio);
                kfree(sbio);
        }
 
@@ -156,8 +154,6 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
 {
        struct scrub_dev *sdev;
        int             i;
-       int             j;
-       int             ret;
        struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
 
        sdev = kzalloc(sizeof(*sdev), GFP_NOFS);
@@ -165,7 +161,6 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
                goto nomem;
        sdev->dev = dev;
        for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
-               struct bio *bio;
                struct scrub_bio *sbio;
 
                sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
@@ -173,32 +168,10 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
                        goto nomem;
                sdev->bios[i] = sbio;
 
-               bio = bio_kmalloc(GFP_NOFS, SCRUB_PAGES_PER_BIO);
-               if (!bio)
-                       goto nomem;
-
                sbio->index = i;
                sbio->sdev = sdev;
-               sbio->bio = bio;
                sbio->count = 0;
                sbio->work.func = scrub_checksum;
-               bio->bi_private = sdev->bios[i];
-               bio->bi_end_io = scrub_bio_end_io;
-               bio->bi_sector = 0;
-               bio->bi_bdev = dev->bdev;
-               bio->bi_size = 0;
-
-               for (j = 0; j < SCRUB_PAGES_PER_BIO; ++j) {
-                       struct page *page;
-                       page = alloc_page(GFP_NOFS);
-                       if (!page)
-                               goto nomem;
-
-                       ret = bio_add_page(bio, page, PAGE_SIZE, 0);
-                       if (!ret)
-                               goto nomem;
-               }
-               WARN_ON(bio->bi_vcnt != SCRUB_PAGES_PER_BIO);
 
                if (i != SCRUB_BIOS_PER_DEV-1)
                        sdev->bios[i]->next_free = i + 1;
@@ -369,9 +342,6 @@ static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
        int ret;
        DECLARE_COMPLETION_ONSTACK(complete);
 
-       /* we are going to wait on this IO */
-       rw |= REQ_SYNC;
-
        bio = bio_alloc(GFP_NOFS, 1);
        bio->bi_bdev = bdev;
        bio->bi_sector = sector;
@@ -380,6 +350,7 @@ static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
        bio->bi_private = &complete;
        submit_bio(rw, bio);
 
+       /* this will also unplug the queue */
        wait_for_completion(&complete);
 
        ret = !test_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -394,6 +365,7 @@ static void scrub_bio_end_io(struct bio *bio, int err)
        struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
 
        sbio->err = err;
+       sbio->bio = bio;
 
        btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work);
 }
@@ -453,6 +425,8 @@ static void scrub_checksum(struct btrfs_work *work)
        }
 
 out:
+       scrub_free_bio(sbio->bio);
+       sbio->bio = NULL;
        spin_lock(&sdev->list_lock);
        sbio->next_free = sdev->first_free;
        sdev->first_free = sbio->index;
@@ -583,25 +557,50 @@ static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer)
 static int scrub_submit(struct scrub_dev *sdev)
 {
        struct scrub_bio *sbio;
+       struct bio *bio;
+       int i;
 
        if (sdev->curr == -1)
                return 0;
 
        sbio = sdev->bios[sdev->curr];
 
-       sbio->bio->bi_sector = sbio->physical >> 9;
-       sbio->bio->bi_size = sbio->count * PAGE_SIZE;
-       sbio->bio->bi_next = NULL;
-       sbio->bio->bi_flags |= 1 << BIO_UPTODATE;
-       sbio->bio->bi_comp_cpu = -1;
-       sbio->bio->bi_bdev = sdev->dev->bdev;
+       bio = bio_alloc(GFP_NOFS, sbio->count);
+       if (!bio)
+               goto nomem;
+
+       bio->bi_private = sbio;
+       bio->bi_end_io = scrub_bio_end_io;
+       bio->bi_bdev = sdev->dev->bdev;
+       bio->bi_sector = sbio->physical >> 9;
+
+       for (i = 0; i < sbio->count; ++i) {
+               struct page *page;
+               int ret;
+
+               page = alloc_page(GFP_NOFS);
+               if (!page)
+                       goto nomem;
+
+               ret = bio_add_page(bio, page, PAGE_SIZE, 0);
+               if (!ret) {
+                       __free_page(page);
+                       goto nomem;
+               }
+       }
+
        sbio->err = 0;
        sdev->curr = -1;
        atomic_inc(&sdev->in_flight);
 
-       submit_bio(0, sbio->bio);
+       submit_bio(READ, bio);
 
        return 0;
+
+nomem:
+       scrub_free_bio(bio);
+
+       return -ENOMEM;
 }
 
 static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
@@ -633,7 +632,11 @@ again:
                sbio->logical = logical;
        } else if (sbio->physical + sbio->count * PAGE_SIZE != physical ||
                   sbio->logical + sbio->count * PAGE_SIZE != logical) {
-               scrub_submit(sdev);
+               int ret;
+
+               ret = scrub_submit(sdev);
+               if (ret)
+                       return ret;
                goto again;
        }
        sbio->spag[sbio->count].flags = flags;
@@ -645,8 +648,13 @@ again:
                memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size);
        }
        ++sbio->count;
-       if (sbio->count == SCRUB_PAGES_PER_BIO || force)
-               scrub_submit(sdev);
+       if (sbio->count == SCRUB_PAGES_PER_BIO || force) {
+               int ret;
+
+               ret = scrub_submit(sdev);
+               if (ret)
+                       return ret;
+       }
 
        return 0;
 }
@@ -727,6 +735,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
        struct btrfs_root *root = fs_info->extent_root;
        struct btrfs_root *csum_root = fs_info->csum_root;
        struct btrfs_extent_item *extent;
+       struct blk_plug plug;
        u64 flags;
        int ret;
        int slot;
@@ -789,18 +798,12 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
 
                ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
                if (ret < 0)
-                       goto out;
-
-               l = path->nodes[0];
-               slot = path->slots[0];
-               btrfs_item_key_to_cpu(l, &key, slot);
-               if (key.objectid != logical) {
-                       ret = btrfs_previous_item(root, path, 0,
-                                                 BTRFS_EXTENT_ITEM_KEY);
-                       if (ret < 0)
-                               goto out;
-               }
+                       goto out_noplug;
 
+               /*
+                * we might miss half an extent here, but that doesn't matter,
+                * as it's only the prefetch
+                */
                while (1) {
                        l = path->nodes[0];
                        slot = path->slots[0];
@@ -809,7 +812,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
                                if (ret == 0)
                                        continue;
                                if (ret < 0)
-                                       goto out;
+                                       goto out_noplug;
 
                                break;
                        }
@@ -831,6 +834,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
         * the scrub. This might currently (crc32) end up to be about 1MB
         */
        start_stripe = 0;
+       blk_start_plug(&plug);
 again:
        logical = base + offset + start_stripe * increment;
        for (i = start_stripe; i < nstripes; ++i) {
@@ -890,15 +894,20 @@ again:
                ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
                if (ret < 0)
                        goto out;
-
-               l = path->nodes[0];
-               slot = path->slots[0];
-               btrfs_item_key_to_cpu(l, &key, slot);
-               if (key.objectid != logical) {
+               if (ret > 0) {
                        ret = btrfs_previous_item(root, path, 0,
                                                  BTRFS_EXTENT_ITEM_KEY);
                        if (ret < 0)
                                goto out;
+                       if (ret > 0) {
+                               /* there's no smaller item, so stick with the
+                                * larger one */
+                               btrfs_release_path(path);
+                               ret = btrfs_search_slot(NULL, root, &key,
+                                                       path, 0, 0);
+                               if (ret < 0)
+                                       goto out;
+                       }
                }
 
                while (1) {
@@ -972,6 +981,8 @@ next:
        scrub_submit(sdev);
 
 out:
+       blk_finish_plug(&plug);
+out_noplug:
        btrfs_free_path(path);
        return ret < 0 ? ret : 0;
 }
@@ -1047,8 +1058,15 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
        while (1) {
                ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
                if (ret < 0)
-                       goto out;
-               ret = 0;
+                       break;
+               if (ret > 0) {
+                       if (path->slots[0] >=
+                           btrfs_header_nritems(path->nodes[0])) {
+                               ret = btrfs_next_leaf(root, path);
+                               if (ret)
+                                       break;
+                       }
+               }
 
                l = path->nodes[0];
                slot = path->slots[0];
@@ -1058,7 +1076,7 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
                if (found_key.objectid != sdev->dev->devid)
                        break;
 
-               if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
+               if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY)
                        break;
 
                if (found_key.offset >= end)
@@ -1087,7 +1105,7 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
                cache = btrfs_lookup_block_group(fs_info, chunk_offset);
                if (!cache) {
                        ret = -ENOENT;
-                       goto out;
+                       break;
                }
                ret = scrub_chunk(sdev, chunk_tree, chunk_objectid,
                                  chunk_offset, length);
@@ -1099,9 +1117,13 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
                btrfs_release_path(path);
        }
 
-out:
        btrfs_free_path(path);
-       return ret;
+
+       /*
+        * ret can still be 1 from search_slot or next_leaf,
+        * that's not an error
+        */
+       return ret < 0 ? ret : 0;
 }
 
 static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
@@ -1138,8 +1160,12 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
        struct btrfs_fs_info *fs_info = root->fs_info;
 
        mutex_lock(&fs_info->scrub_lock);
-       if (fs_info->scrub_workers_refcnt == 0)
+       if (fs_info->scrub_workers_refcnt == 0) {
+               btrfs_init_workers(&fs_info->scrub_workers, "scrub",
+                          fs_info->thread_pool_size, &fs_info->generic_worker);
+               fs_info->scrub_workers.idle_thresh = 4;
                btrfs_start_workers(&fs_info->scrub_workers, 1);
+       }
        ++fs_info->scrub_workers_refcnt;
        mutex_unlock(&fs_info->scrub_lock);
 
@@ -1166,7 +1192,7 @@ int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
        int ret;
        struct btrfs_device *dev;
 
-       if (root->fs_info->closing)
+       if (btrfs_fs_closing(root->fs_info))
                return -EINVAL;
 
        /*
index 9b2e7e5bc3efa40e2779ba450ee99ac7623087d8..0bb4ebbb71b7b0bf6861b7a5efde6f5cf2f5b97e 100644 (file)
@@ -161,7 +161,8 @@ enum {
        Opt_compress_type, Opt_compress_force, Opt_compress_force_type,
        Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard,
        Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed,
-       Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, Opt_err,
+       Opt_enospc_debug, Opt_subvolrootid, Opt_defrag,
+       Opt_inode_cache, Opt_err,
 };
 
 static match_table_t tokens = {
@@ -193,6 +194,7 @@ static match_table_t tokens = {
        {Opt_enospc_debug, "enospc_debug"},
        {Opt_subvolrootid, "subvolrootid=%d"},
        {Opt_defrag, "autodefrag"},
+       {Opt_inode_cache, "inode_cache"},
        {Opt_err, NULL},
 };
 
@@ -361,6 +363,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
                        printk(KERN_INFO "btrfs: enabling disk space caching\n");
                        btrfs_set_opt(info->mount_opt, SPACE_CACHE);
                        break;
+               case Opt_inode_cache:
+                       printk(KERN_INFO "btrfs: enabling inode map caching\n");
+                       btrfs_set_opt(info->mount_opt, INODE_MAP_CACHE);
+                       break;
                case Opt_clear_cache:
                        printk(KERN_INFO "btrfs: force clearing of disk cache\n");
                        btrfs_set_opt(info->mount_opt, CLEAR_CACHE);
@@ -819,7 +825,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
        } else {
                char b[BDEVNAME_SIZE];
 
-               s->s_flags = flags;
+               s->s_flags = flags | MS_NOSEC;
                strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
                error = btrfs_fill_super(s, fs_devices, data,
                                         flags & MS_SILENT ? 1 : 0);
index dc80f7156923ae120c5496f3fd0093a8616fbeec..2b3590b9fe98a6107efc9a7b13ce053e499c049e 100644 (file)
@@ -35,6 +35,7 @@ static noinline void put_transaction(struct btrfs_transaction *transaction)
 {
        WARN_ON(atomic_read(&transaction->use_count) == 0);
        if (atomic_dec_and_test(&transaction->use_count)) {
+               BUG_ON(!list_empty(&transaction->list));
                memset(transaction, 0, sizeof(*transaction));
                kmem_cache_free(btrfs_transaction_cachep, transaction);
        }
@@ -49,46 +50,72 @@ static noinline void switch_commit_root(struct btrfs_root *root)
 /*
  * either allocate a new transaction or hop into the existing one
  */
-static noinline int join_transaction(struct btrfs_root *root)
+static noinline int join_transaction(struct btrfs_root *root, int nofail)
 {
        struct btrfs_transaction *cur_trans;
+
+       spin_lock(&root->fs_info->trans_lock);
+       if (root->fs_info->trans_no_join) {
+               if (!nofail) {
+                       spin_unlock(&root->fs_info->trans_lock);
+                       return -EBUSY;
+               }
+       }
+
        cur_trans = root->fs_info->running_transaction;
-       if (!cur_trans) {
-               cur_trans = kmem_cache_alloc(btrfs_transaction_cachep,
-                                            GFP_NOFS);
-               if (!cur_trans)
-                       return -ENOMEM;
-               root->fs_info->generation++;
-               atomic_set(&cur_trans->num_writers, 1);
-               cur_trans->num_joined = 0;
-               cur_trans->transid = root->fs_info->generation;
-               init_waitqueue_head(&cur_trans->writer_wait);
-               init_waitqueue_head(&cur_trans->commit_wait);
-               cur_trans->in_commit = 0;
-               cur_trans->blocked = 0;
-               atomic_set(&cur_trans->use_count, 1);
-               cur_trans->commit_done = 0;
-               cur_trans->start_time = get_seconds();
-
-               cur_trans->delayed_refs.root = RB_ROOT;
-               cur_trans->delayed_refs.num_entries = 0;
-               cur_trans->delayed_refs.num_heads_ready = 0;
-               cur_trans->delayed_refs.num_heads = 0;
-               cur_trans->delayed_refs.flushing = 0;
-               cur_trans->delayed_refs.run_delayed_start = 0;
-               spin_lock_init(&cur_trans->delayed_refs.lock);
-
-               INIT_LIST_HEAD(&cur_trans->pending_snapshots);
-               list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
-               extent_io_tree_init(&cur_trans->dirty_pages,
-                                    root->fs_info->btree_inode->i_mapping);
-               spin_lock(&root->fs_info->new_trans_lock);
-               root->fs_info->running_transaction = cur_trans;
-               spin_unlock(&root->fs_info->new_trans_lock);
-       } else {
+       if (cur_trans) {
+               atomic_inc(&cur_trans->use_count);
                atomic_inc(&cur_trans->num_writers);
                cur_trans->num_joined++;
+               spin_unlock(&root->fs_info->trans_lock);
+               return 0;
        }
+       spin_unlock(&root->fs_info->trans_lock);
+
+       cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
+       if (!cur_trans)
+               return -ENOMEM;
+       spin_lock(&root->fs_info->trans_lock);
+       if (root->fs_info->running_transaction) {
+               kmem_cache_free(btrfs_transaction_cachep, cur_trans);
+               cur_trans = root->fs_info->running_transaction;
+               atomic_inc(&cur_trans->use_count);
+               atomic_inc(&cur_trans->num_writers);
+               cur_trans->num_joined++;
+               spin_unlock(&root->fs_info->trans_lock);
+               return 0;
+       }
+       atomic_set(&cur_trans->num_writers, 1);
+       cur_trans->num_joined = 0;
+       init_waitqueue_head(&cur_trans->writer_wait);
+       init_waitqueue_head(&cur_trans->commit_wait);
+       cur_trans->in_commit = 0;
+       cur_trans->blocked = 0;
+       /*
+        * One for this trans handle, one so it will live on until we
+        * commit the transaction.
+        */
+       atomic_set(&cur_trans->use_count, 2);
+       cur_trans->commit_done = 0;
+       cur_trans->start_time = get_seconds();
+
+       cur_trans->delayed_refs.root = RB_ROOT;
+       cur_trans->delayed_refs.num_entries = 0;
+       cur_trans->delayed_refs.num_heads_ready = 0;
+       cur_trans->delayed_refs.num_heads = 0;
+       cur_trans->delayed_refs.flushing = 0;
+       cur_trans->delayed_refs.run_delayed_start = 0;
+       spin_lock_init(&cur_trans->commit_lock);
+       spin_lock_init(&cur_trans->delayed_refs.lock);
+
+       INIT_LIST_HEAD(&cur_trans->pending_snapshots);
+       list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
+       extent_io_tree_init(&cur_trans->dirty_pages,
+                            root->fs_info->btree_inode->i_mapping);
+       root->fs_info->generation++;
+       cur_trans->transid = root->fs_info->generation;
+       root->fs_info->running_transaction = cur_trans;
+       spin_unlock(&root->fs_info->trans_lock);
 
        return 0;
 }
@@ -99,39 +126,28 @@ static noinline int join_transaction(struct btrfs_root *root)
  * to make sure the old root from before we joined the transaction is deleted
  * when the transaction commits
  */
-static noinline int record_root_in_trans(struct btrfs_trans_handle *trans,
-                                        struct btrfs_root *root)
+int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
+                              struct btrfs_root *root)
 {
        if (root->ref_cows && root->last_trans < trans->transid) {
                WARN_ON(root == root->fs_info->extent_root);
                WARN_ON(root->commit_root != root->node);
 
+               spin_lock(&root->fs_info->fs_roots_radix_lock);
+               if (root->last_trans == trans->transid) {
+                       spin_unlock(&root->fs_info->fs_roots_radix_lock);
+                       return 0;
+               }
+               root->last_trans = trans->transid;
                radix_tree_tag_set(&root->fs_info->fs_roots_radix,
                           (unsigned long)root->root_key.objectid,
                           BTRFS_ROOT_TRANS_TAG);
-               root->last_trans = trans->transid;
+               spin_unlock(&root->fs_info->fs_roots_radix_lock);
                btrfs_init_reloc_root(trans, root);
        }
        return 0;
 }
 
-int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
-                              struct btrfs_root *root)
-{
-       if (!root->ref_cows)
-               return 0;
-
-       mutex_lock(&root->fs_info->trans_mutex);
-       if (root->last_trans == trans->transid) {
-               mutex_unlock(&root->fs_info->trans_mutex);
-               return 0;
-       }
-
-       record_root_in_trans(trans, root);
-       mutex_unlock(&root->fs_info->trans_mutex);
-       return 0;
-}
-
 /* wait for commit against the current transaction to become unblocked
  * when this is done, it is safe to start a new transaction, but the current
  * transaction might not be fully on disk.
@@ -140,21 +156,23 @@ static void wait_current_trans(struct btrfs_root *root)
 {
        struct btrfs_transaction *cur_trans;
 
+       spin_lock(&root->fs_info->trans_lock);
        cur_trans = root->fs_info->running_transaction;
        if (cur_trans && cur_trans->blocked) {
                DEFINE_WAIT(wait);
                atomic_inc(&cur_trans->use_count);
+               spin_unlock(&root->fs_info->trans_lock);
                while (1) {
                        prepare_to_wait(&root->fs_info->transaction_wait, &wait,
                                        TASK_UNINTERRUPTIBLE);
                        if (!cur_trans->blocked)
                                break;
-                       mutex_unlock(&root->fs_info->trans_mutex);
                        schedule();
-                       mutex_lock(&root->fs_info->trans_mutex);
                }
                finish_wait(&root->fs_info->transaction_wait, &wait);
                put_transaction(cur_trans);
+       } else {
+               spin_unlock(&root->fs_info->trans_lock);
        }
 }
 
@@ -167,10 +185,16 @@ enum btrfs_trans_type {
 
 static int may_wait_transaction(struct btrfs_root *root, int type)
 {
-       if (!root->fs_info->log_root_recovering &&
-           ((type == TRANS_START && !root->fs_info->open_ioctl_trans) ||
-            type == TRANS_USERSPACE))
+       if (root->fs_info->log_root_recovering)
+               return 0;
+
+       if (type == TRANS_USERSPACE)
                return 1;
+
+       if (type == TRANS_START &&
+           !atomic_read(&root->fs_info->open_ioctl_trans))
+               return 1;
+
        return 0;
 }
 
@@ -184,36 +208,44 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
 
        if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
                return ERR_PTR(-EROFS);
+
+       if (current->journal_info) {
+               WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
+               h = current->journal_info;
+               h->use_count++;
+               h->orig_rsv = h->block_rsv;
+               h->block_rsv = NULL;
+               goto got_it;
+       }
 again:
        h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
        if (!h)
                return ERR_PTR(-ENOMEM);
 
-       if (type != TRANS_JOIN_NOLOCK)
-               mutex_lock(&root->fs_info->trans_mutex);
        if (may_wait_transaction(root, type))
                wait_current_trans(root);
 
-       ret = join_transaction(root);
+       do {
+               ret = join_transaction(root, type == TRANS_JOIN_NOLOCK);
+               if (ret == -EBUSY)
+                       wait_current_trans(root);
+       } while (ret == -EBUSY);
+
        if (ret < 0) {
                kmem_cache_free(btrfs_trans_handle_cachep, h);
-               if (type != TRANS_JOIN_NOLOCK)
-                       mutex_unlock(&root->fs_info->trans_mutex);
                return ERR_PTR(ret);
        }
 
        cur_trans = root->fs_info->running_transaction;
-       atomic_inc(&cur_trans->use_count);
-       if (type != TRANS_JOIN_NOLOCK)
-               mutex_unlock(&root->fs_info->trans_mutex);
 
        h->transid = cur_trans->transid;
        h->transaction = cur_trans;
        h->blocks_used = 0;
-       h->block_group = 0;
        h->bytes_reserved = 0;
        h->delayed_ref_updates = 0;
+       h->use_count = 1;
        h->block_rsv = NULL;
+       h->orig_rsv = NULL;
 
        smp_mb();
        if (cur_trans->blocked && may_wait_transaction(root, type)) {
@@ -241,11 +273,8 @@ again:
                }
        }
 
-       if (type != TRANS_JOIN_NOLOCK)
-               mutex_lock(&root->fs_info->trans_mutex);
-       record_root_in_trans(h, root);
-       if (type != TRANS_JOIN_NOLOCK)
-               mutex_unlock(&root->fs_info->trans_mutex);
+got_it:
+       btrfs_record_root_in_trans(h, root);
 
        if (!current->journal_info && type != TRANS_USERSPACE)
                current->journal_info = h;
@@ -257,22 +286,19 @@ struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
 {
        return start_transaction(root, num_items, TRANS_START);
 }
-struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root,
-                                                  int num_blocks)
+struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
 {
        return start_transaction(root, 0, TRANS_JOIN);
 }
 
-struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root,
-                                                         int num_blocks)
+struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
 {
        return start_transaction(root, 0, TRANS_JOIN_NOLOCK);
 }
 
-struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
-                                                        int num_blocks)
+struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
 {
-       return start_transaction(r, 0, TRANS_USERSPACE);
+       return start_transaction(root, 0, TRANS_USERSPACE);
 }
 
 /* wait for a transaction commit to be fully complete */
@@ -280,17 +306,13 @@ static noinline int wait_for_commit(struct btrfs_root *root,
                                    struct btrfs_transaction *commit)
 {
        DEFINE_WAIT(wait);
-       mutex_lock(&root->fs_info->trans_mutex);
        while (!commit->commit_done) {
                prepare_to_wait(&commit->commit_wait, &wait,
                                TASK_UNINTERRUPTIBLE);
                if (commit->commit_done)
                        break;
-               mutex_unlock(&root->fs_info->trans_mutex);
                schedule();
-               mutex_lock(&root->fs_info->trans_mutex);
        }
-       mutex_unlock(&root->fs_info->trans_mutex);
        finish_wait(&commit->commit_wait, &wait);
        return 0;
 }
@@ -300,59 +322,56 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
        struct btrfs_transaction *cur_trans = NULL, *t;
        int ret;
 
-       mutex_lock(&root->fs_info->trans_mutex);
-
        ret = 0;
        if (transid) {
                if (transid <= root->fs_info->last_trans_committed)
-                       goto out_unlock;
+                       goto out;
 
                /* find specified transaction */
+               spin_lock(&root->fs_info->trans_lock);
                list_for_each_entry(t, &root->fs_info->trans_list, list) {
                        if (t->transid == transid) {
                                cur_trans = t;
+                               atomic_inc(&cur_trans->use_count);
                                break;
                        }
                        if (t->transid > transid)
                                break;
                }
+               spin_unlock(&root->fs_info->trans_lock);
                ret = -EINVAL;
                if (!cur_trans)
-                       goto out_unlock;  /* bad transid */
+                       goto out;  /* bad transid */
        } else {
                /* find newest transaction that is committing | committed */
+               spin_lock(&root->fs_info->trans_lock);
                list_for_each_entry_reverse(t, &root->fs_info->trans_list,
                                            list) {
                        if (t->in_commit) {
                                if (t->commit_done)
-                                       goto out_unlock;
+                                       break;
                                cur_trans = t;
+                               atomic_inc(&cur_trans->use_count);
                                break;
                        }
                }
+               spin_unlock(&root->fs_info->trans_lock);
                if (!cur_trans)
-                       goto out_unlock;  /* nothing committing|committed */
+                       goto out;  /* nothing committing|committed */
        }
 
-       atomic_inc(&cur_trans->use_count);
-       mutex_unlock(&root->fs_info->trans_mutex);
-
        wait_for_commit(root, cur_trans);
 
-       mutex_lock(&root->fs_info->trans_mutex);
        put_transaction(cur_trans);
        ret = 0;
-out_unlock:
-       mutex_unlock(&root->fs_info->trans_mutex);
+out:
        return ret;
 }
 
 void btrfs_throttle(struct btrfs_root *root)
 {
-       mutex_lock(&root->fs_info->trans_mutex);
-       if (!root->fs_info->open_ioctl_trans)
+       if (!atomic_read(&root->fs_info->open_ioctl_trans))
                wait_current_trans(root);
-       mutex_unlock(&root->fs_info->trans_mutex);
 }
 
 static int should_end_transaction(struct btrfs_trans_handle *trans,
@@ -370,6 +389,7 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
        struct btrfs_transaction *cur_trans = trans->transaction;
        int updates;
 
+       smp_mb();
        if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
                return 1;
 
@@ -388,6 +408,11 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
        struct btrfs_fs_info *info = root->fs_info;
        int count = 0;
 
+       if (--trans->use_count) {
+               trans->block_rsv = trans->orig_rsv;
+               return 0;
+       }
+
        while (count < 4) {
                unsigned long cur = trans->delayed_ref_updates;
                trans->delayed_ref_updates = 0;
@@ -410,9 +435,11 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
 
        btrfs_trans_release_metadata(trans, root);
 
-       if (lock && !root->fs_info->open_ioctl_trans &&
-           should_end_transaction(trans, root))
+       if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
+           should_end_transaction(trans, root)) {
                trans->transaction->blocked = 1;
+               smp_wmb();
+       }
 
        if (lock && cur_trans->blocked && !cur_trans->in_commit) {
                if (throttle)
@@ -703,9 +730,9 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
  */
 int btrfs_add_dead_root(struct btrfs_root *root)
 {
-       mutex_lock(&root->fs_info->trans_mutex);
+       spin_lock(&root->fs_info->trans_lock);
        list_add(&root->root_list, &root->fs_info->dead_roots);
-       mutex_unlock(&root->fs_info->trans_mutex);
+       spin_unlock(&root->fs_info->trans_lock);
        return 0;
 }
 
@@ -721,6 +748,7 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
        int ret;
        int err = 0;
 
+       spin_lock(&fs_info->fs_roots_radix_lock);
        while (1) {
                ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
                                                 (void **)gang, 0,
@@ -733,6 +761,7 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
                        radix_tree_tag_clear(&fs_info->fs_roots_radix,
                                        (unsigned long)root->root_key.objectid,
                                        BTRFS_ROOT_TRANS_TAG);
+                       spin_unlock(&fs_info->fs_roots_radix_lock);
 
                        btrfs_free_log(trans, root);
                        btrfs_update_reloc_root(trans, root);
@@ -753,10 +782,12 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
                        err = btrfs_update_root(trans, fs_info->tree_root,
                                                &root->root_key,
                                                &root->root_item);
+                       spin_lock(&fs_info->fs_roots_radix_lock);
                        if (err)
                                break;
                }
        }
+       spin_unlock(&fs_info->fs_roots_radix_lock);
        return err;
 }
 
@@ -786,7 +817,7 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
                btrfs_btree_balance_dirty(info->tree_root, nr);
                cond_resched();
 
-               if (root->fs_info->closing || ret != -EAGAIN)
+               if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
                        break;
        }
        root->defrag_running = 0;
@@ -851,7 +882,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
        parent = dget_parent(dentry);
        parent_inode = parent->d_inode;
        parent_root = BTRFS_I(parent_inode)->root;
-       record_root_in_trans(trans, parent_root);
+       btrfs_record_root_in_trans(trans, parent_root);
 
        /*
         * insert the directory item
@@ -869,7 +900,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
        ret = btrfs_update_inode(trans, parent_root, parent_inode);
        BUG_ON(ret);
 
-       record_root_in_trans(trans, root);
+       btrfs_record_root_in_trans(trans, root);
        btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
        memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
        btrfs_check_and_init_root_item(new_root_item);
@@ -967,20 +998,20 @@ static void update_super_roots(struct btrfs_root *root)
 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
 {
        int ret = 0;
-       spin_lock(&info->new_trans_lock);
+       spin_lock(&info->trans_lock);
        if (info->running_transaction)
                ret = info->running_transaction->in_commit;
-       spin_unlock(&info->new_trans_lock);
+       spin_unlock(&info->trans_lock);
        return ret;
 }
 
 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
 {
        int ret = 0;
-       spin_lock(&info->new_trans_lock);
+       spin_lock(&info->trans_lock);
        if (info->running_transaction)
                ret = info->running_transaction->blocked;
-       spin_unlock(&info->new_trans_lock);
+       spin_unlock(&info->trans_lock);
        return ret;
 }
 
@@ -1004,9 +1035,7 @@ static void wait_current_trans_commit_start(struct btrfs_root *root,
                                    &wait);
                        break;
                }
-               mutex_unlock(&root->fs_info->trans_mutex);
                schedule();
-               mutex_lock(&root->fs_info->trans_mutex);
                finish_wait(&root->fs_info->transaction_blocked_wait, &wait);
        }
 }
@@ -1032,9 +1061,7 @@ static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
                                    &wait);
                        break;
                }
-               mutex_unlock(&root->fs_info->trans_mutex);
                schedule();
-               mutex_lock(&root->fs_info->trans_mutex);
                finish_wait(&root->fs_info->transaction_wait,
                            &wait);
        }
@@ -1072,7 +1099,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
 
        INIT_DELAYED_WORK(&ac->work, do_async_commit);
        ac->root = root;
-       ac->newtrans = btrfs_join_transaction(root, 0);
+       ac->newtrans = btrfs_join_transaction(root);
        if (IS_ERR(ac->newtrans)) {
                int err = PTR_ERR(ac->newtrans);
                kfree(ac);
@@ -1080,23 +1107,22 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
        }
 
        /* take transaction reference */
-       mutex_lock(&root->fs_info->trans_mutex);
        cur_trans = trans->transaction;
        atomic_inc(&cur_trans->use_count);
-       mutex_unlock(&root->fs_info->trans_mutex);
 
        btrfs_end_transaction(trans, root);
        schedule_delayed_work(&ac->work, 0);
 
        /* wait for transaction to start and unblock */
-       mutex_lock(&root->fs_info->trans_mutex);
        if (wait_for_unblock)
                wait_current_trans_commit_start_and_unblock(root, cur_trans);
        else
                wait_current_trans_commit_start(root, cur_trans);
-       put_transaction(cur_trans);
-       mutex_unlock(&root->fs_info->trans_mutex);
 
+       if (current->journal_info == trans)
+               current->journal_info = NULL;
+
+       put_transaction(cur_trans);
        return 0;
 }
 
@@ -1139,38 +1165,41 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
        ret = btrfs_run_delayed_refs(trans, root, 0);
        BUG_ON(ret);
 
-       mutex_lock(&root->fs_info->trans_mutex);
+       spin_lock(&cur_trans->commit_lock);
        if (cur_trans->in_commit) {
+               spin_unlock(&cur_trans->commit_lock);
                atomic_inc(&cur_trans->use_count);
-               mutex_unlock(&root->fs_info->trans_mutex);
                btrfs_end_transaction(trans, root);
 
                ret = wait_for_commit(root, cur_trans);
                BUG_ON(ret);
 
-               mutex_lock(&root->fs_info->trans_mutex);
                put_transaction(cur_trans);
-               mutex_unlock(&root->fs_info->trans_mutex);
 
                return 0;
        }
 
        trans->transaction->in_commit = 1;
        trans->transaction->blocked = 1;
+       spin_unlock(&cur_trans->commit_lock);
        wake_up(&root->fs_info->transaction_blocked_wait);
 
+       spin_lock(&root->fs_info->trans_lock);
        if (cur_trans->list.prev != &root->fs_info->trans_list) {
                prev_trans = list_entry(cur_trans->list.prev,
                                        struct btrfs_transaction, list);
                if (!prev_trans->commit_done) {
                        atomic_inc(&prev_trans->use_count);
-                       mutex_unlock(&root->fs_info->trans_mutex);
+                       spin_unlock(&root->fs_info->trans_lock);
 
                        wait_for_commit(root, prev_trans);
 
-                       mutex_lock(&root->fs_info->trans_mutex);
                        put_transaction(prev_trans);
+               } else {
+                       spin_unlock(&root->fs_info->trans_lock);
                }
+       } else {
+               spin_unlock(&root->fs_info->trans_lock);
        }
 
        if (now < cur_trans->start_time || now - cur_trans->start_time < 1)
@@ -1178,12 +1207,12 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 
        do {
                int snap_pending = 0;
+
                joined = cur_trans->num_joined;
                if (!list_empty(&trans->transaction->pending_snapshots))
                        snap_pending = 1;
 
                WARN_ON(cur_trans != trans->transaction);
-               mutex_unlock(&root->fs_info->trans_mutex);
 
                if (flush_on_commit || snap_pending) {
                        btrfs_start_delalloc_inodes(root, 1);
@@ -1206,14 +1235,15 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
                prepare_to_wait(&cur_trans->writer_wait, &wait,
                                TASK_UNINTERRUPTIBLE);
 
-               smp_mb();
                if (atomic_read(&cur_trans->num_writers) > 1)
                        schedule_timeout(MAX_SCHEDULE_TIMEOUT);
                else if (should_grow)
                        schedule_timeout(1);
 
-               mutex_lock(&root->fs_info->trans_mutex);
                finish_wait(&cur_trans->writer_wait, &wait);
+               spin_lock(&root->fs_info->trans_lock);
+               root->fs_info->trans_no_join = 1;
+               spin_unlock(&root->fs_info->trans_lock);
        } while (atomic_read(&cur_trans->num_writers) > 1 ||
                 (should_grow && cur_trans->num_joined != joined));
 
@@ -1258,9 +1288,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
        btrfs_prepare_extent_commit(trans, root);
 
        cur_trans = root->fs_info->running_transaction;
-       spin_lock(&root->fs_info->new_trans_lock);
-       root->fs_info->running_transaction = NULL;
-       spin_unlock(&root->fs_info->new_trans_lock);
 
        btrfs_set_root_node(&root->fs_info->tree_root->root_item,
                            root->fs_info->tree_root->node);
@@ -1281,10 +1308,13 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
               sizeof(root->fs_info->super_copy));
 
        trans->transaction->blocked = 0;
+       spin_lock(&root->fs_info->trans_lock);
+       root->fs_info->running_transaction = NULL;
+       root->fs_info->trans_no_join = 0;
+       spin_unlock(&root->fs_info->trans_lock);
 
        wake_up(&root->fs_info->transaction_wait);
 
-       mutex_unlock(&root->fs_info->trans_mutex);
        ret = btrfs_write_and_wait_transaction(trans, root);
        BUG_ON(ret);
        write_ctree_super(trans, root, 0);
@@ -1297,22 +1327,21 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 
        btrfs_finish_extent_commit(trans, root);
 
-       mutex_lock(&root->fs_info->trans_mutex);
-
        cur_trans->commit_done = 1;
 
        root->fs_info->last_trans_committed = cur_trans->transid;
 
        wake_up(&cur_trans->commit_wait);
 
+       spin_lock(&root->fs_info->trans_lock);
        list_del_init(&cur_trans->list);
+       spin_unlock(&root->fs_info->trans_lock);
+
        put_transaction(cur_trans);
        put_transaction(cur_trans);
 
        trace_btrfs_transaction_commit(root);
 
-       mutex_unlock(&root->fs_info->trans_mutex);
-
        btrfs_scrub_continue(root);
 
        if (current->journal_info == trans)
@@ -1334,9 +1363,9 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root)
        LIST_HEAD(list);
        struct btrfs_fs_info *fs_info = root->fs_info;
 
-       mutex_lock(&fs_info->trans_mutex);
+       spin_lock(&fs_info->trans_lock);
        list_splice_init(&fs_info->dead_roots, &list);
-       mutex_unlock(&fs_info->trans_mutex);
+       spin_unlock(&fs_info->trans_lock);
 
        while (!list_empty(&list)) {
                root = list_entry(list.next, struct btrfs_root, root_list);
index 804c88639e5de00a4065267aaf52d050dd88303c..02564e6230acd672b5fa6539cc88890417204e41 100644 (file)
@@ -28,10 +28,12 @@ struct btrfs_transaction {
         * transaction can end
         */
        atomic_t num_writers;
+       atomic_t use_count;
 
        unsigned long num_joined;
+
+       spinlock_t commit_lock;
        int in_commit;
-       atomic_t use_count;
        int commit_done;
        int blocked;
        struct list_head list;
@@ -45,13 +47,14 @@ struct btrfs_transaction {
 
 struct btrfs_trans_handle {
        u64 transid;
-       u64 block_group;
        u64 bytes_reserved;
+       unsigned long use_count;
        unsigned long blocks_reserved;
        unsigned long blocks_used;
        unsigned long delayed_ref_updates;
        struct btrfs_transaction *transaction;
        struct btrfs_block_rsv *block_rsv;
+       struct btrfs_block_rsv *orig_rsv;
 };
 
 struct btrfs_pending_snapshot {
@@ -66,19 +69,6 @@ struct btrfs_pending_snapshot {
        struct list_head list;
 };
 
-static inline void btrfs_set_trans_block_group(struct btrfs_trans_handle *trans,
-                                              struct inode *inode)
-{
-       trans->block_group = BTRFS_I(inode)->block_group;
-}
-
-static inline void btrfs_update_inode_block_group(
-                                         struct btrfs_trans_handle *trans,
-                                         struct inode *inode)
-{
-       BTRFS_I(inode)->block_group = trans->block_group;
-}
-
 static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
                                              struct inode *inode)
 {
@@ -92,12 +82,9 @@ int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans,
                                 struct btrfs_root *root);
 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
                                                   int num_items);
-struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root,
-                                                 int num_blocks);
-struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root,
-                                                         int num_blocks);
-struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
-                                                        int num_blocks);
+struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
+struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
+struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root);
 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid);
 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
                                     struct btrfs_root *root);
index c48214ef5c09611100590c75f1eed8c8f0547e04..1efa56e18f9b905ceac4dbcb3faa0221ab331ce2 100644 (file)
@@ -504,7 +504,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
                BUG_ON(!new_device);
                memcpy(new_device, device, sizeof(*new_device));
                new_device->name = kstrdup(device->name, GFP_NOFS);
-               BUG_ON(!new_device->name);
+               BUG_ON(device->name && !new_device->name);
                new_device->bdev = NULL;
                new_device->writeable = 0;
                new_device->in_fs_metadata = 0;
@@ -689,12 +689,8 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
        transid = btrfs_super_generation(disk_super);
        if (disk_super->label[0])
                printk(KERN_INFO "device label %s ", disk_super->label);
-       else {
-               /* FIXME, make a readl uuid parser */
-               printk(KERN_INFO "device fsid %llx-%llx ",
-                      *(unsigned long long *)disk_super->fsid,
-                      *(unsigned long long *)(disk_super->fsid + 8));
-       }
+       else
+               printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
        printk(KERN_CONT "devid %llu transid %llu %s\n",
               (unsigned long long)devid, (unsigned long long)transid, path);
        ret = device_list_add(path, disk_super, devid, fs_devices_ret);
index f3107e4b4d56a3d4b31a5f889ff321328e881978..5366fe452ab07db7402402e96351c1b956809e20 100644 (file)
@@ -158,8 +158,6 @@ int __btrfs_setxattr(struct btrfs_trans_handle *trans,
        if (IS_ERR(trans))
                return PTR_ERR(trans);
 
-       btrfs_set_trans_block_group(trans, inode);
-
        ret = do_setxattr(trans, inode, name, value, size, flags);
        if (ret)
                goto out;
index 49c9aada0374b4c8b407d1110d37203f26339b62..1a80b048ade822849b88fb51003e5244c80872f5 100644 (file)
@@ -1902,10 +1902,8 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len,
                if (!buffer_uptodate(*wait_bh))
                        err = -EIO;
        }
-       if (unlikely(err)) {
+       if (unlikely(err))
                page_zero_new_buffers(page, from, to);
-               ClearPageUptodate(page);
-       }
        return err;
 }
 EXPORT_SYMBOL(__block_write_begin);
index 33da49dc3cc6fa49498df7dd902481f86d8fac9e..5a3953db81184170a8f221fc6c231996993724d2 100644 (file)
@@ -453,7 +453,7 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc)
        int err;
        struct inode *inode = page->mapping->host;
        BUG_ON(!inode);
-       igrab(inode);
+       ihold(inode);
        err = writepage_nounlock(page, wbc);
        unlock_page(page);
        iput(inode);
index 1f72b00447c40e6383330496421a177b79150405..f605753c8fe9b20aa4d2f165b57534d1d9b3fe78 100644 (file)
@@ -2940,14 +2940,12 @@ void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
        while (!list_empty(&mdsc->cap_dirty)) {
                ci = list_first_entry(&mdsc->cap_dirty, struct ceph_inode_info,
                                      i_dirty_item);
-               inode = igrab(&ci->vfs_inode);
+               inode = &ci->vfs_inode;
+               ihold(inode);
                dout("flush_dirty_caps %p\n", inode);
                spin_unlock(&mdsc->cap_dirty_lock);
-               if (inode) {
-                       ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH,
-                                       NULL);
-                       iput(inode);
-               }
+               ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, NULL);
+               iput(inode);
                spin_lock(&mdsc->cap_dirty_lock);
        }
        spin_unlock(&mdsc->cap_dirty_lock);
index 33729e822bb96dc0e1197cca57106fb64a9f0abe..ef8f08c343e8936df6f6fd2cde5eb895f93d8f83 100644 (file)
@@ -308,7 +308,8 @@ more:
                req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
                if (IS_ERR(req))
                        return PTR_ERR(req);
-               req->r_inode = igrab(inode);
+               req->r_inode = inode;
+               ihold(inode);
                req->r_dentry = dget(filp->f_dentry);
                /* hints to request -> mds selection code */
                req->r_direct_mode = USE_AUTH_MDS;
@@ -787,10 +788,12 @@ static int ceph_link(struct dentry *old_dentry, struct inode *dir,
        req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
        req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
        err = ceph_mdsc_do_request(mdsc, dir, req);
-       if (err)
+       if (err) {
                d_drop(dentry);
-       else if (!req->r_reply_info.head->is_dentry)
-               d_instantiate(dentry, igrab(old_dentry->d_inode));
+       } else if (!req->r_reply_info.head->is_dentry) {
+               ihold(old_dentry->d_inode);
+               d_instantiate(dentry, old_dentry->d_inode);
+       }
        ceph_mdsc_put_request(req);
        return err;
 }
index a610d3d674886a082e5a0e0803f3612534b62f74..f67b687550dea4cd00e27554650ce892845d4d90 100644 (file)
@@ -109,7 +109,7 @@ static struct dentry *__fh_to_dentry(struct super_block *sb,
                err = ceph_mdsc_do_request(mdsc, NULL, req);
                inode = req->r_target_inode;
                if (inode)
-                       igrab(inode);
+                       ihold(inode);
                ceph_mdsc_put_request(req);
                if (!inode)
                        return ERR_PTR(-ESTALE);
@@ -167,7 +167,7 @@ static struct dentry *__cfh_to_dentry(struct super_block *sb,
                err = ceph_mdsc_do_request(mdsc, NULL, req);
                inode = req->r_target_inode;
                if (inode)
-                       igrab(inode);
+                       ihold(inode);
                ceph_mdsc_put_request(req);
                if (!inode)
                        return ERR_PTR(err ? err : -ESTALE);
index 203252d88d9fa6509d1dd7bfed3e4198c414d906..9542f07d0b9306774e7172afed25b6f809503c06 100644 (file)
@@ -191,7 +191,8 @@ int ceph_open(struct inode *inode, struct file *file)
                err = PTR_ERR(req);
                goto out;
        }
-       req->r_inode = igrab(inode);
+       req->r_inode = inode;
+       ihold(inode);
        req->r_num_caps = 1;
        err = ceph_mdsc_do_request(mdsc, parent_inode, req);
        if (!err)
@@ -282,7 +283,7 @@ int ceph_release(struct inode *inode, struct file *file)
 static int striped_read(struct inode *inode,
                        u64 off, u64 len,
                        struct page **pages, int num_pages,
-                       int *checkeof, bool align_to_pages,
+                       int *checkeof, bool o_direct,
                        unsigned long buf_align)
 {
        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
@@ -307,7 +308,7 @@ static int striped_read(struct inode *inode,
        io_align = off & ~PAGE_MASK;
 
 more:
-       if (align_to_pages)
+       if (o_direct)
                page_align = (pos - io_align + buf_align) & ~PAGE_MASK;
        else
                page_align = pos & ~PAGE_MASK;
@@ -317,10 +318,10 @@ more:
                                  ci->i_truncate_seq,
                                  ci->i_truncate_size,
                                  page_pos, pages_left, page_align);
-       hit_stripe = this_len < left;
-       was_short = ret >= 0 && ret < this_len;
        if (ret == -ENOENT)
                ret = 0;
+       hit_stripe = this_len < left;
+       was_short = ret >= 0 && ret < this_len;
        dout("striped_read %llu~%u (read %u) got %d%s%s\n", pos, left, read,
             ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
 
@@ -345,20 +346,22 @@ more:
        }
 
        if (was_short) {
-               /* was original extent fully inside i_size? */
-               if (pos + left <= inode->i_size) {
-                       dout("zero tail\n");
-                       ceph_zero_page_vector_range(page_off + read, len - read,
+               /* did we bounce off eof? */
+               if (pos + left > inode->i_size)
+                       *checkeof = 1;
+
+               /* zero trailing bytes (inside i_size) */
+               if (left > 0 && pos < inode->i_size) {
+                       if (pos + left > inode->i_size)
+                               left = inode->i_size - pos;
+
+                       dout("zero tail %d\n", left);
+                       ceph_zero_page_vector_range(page_off + read, left,
                                                    pages);
-                       read = len;
-                       goto out;
+                       read += left;
                }
-
-               /* check i_size */
-               *checkeof = 1;
        }
 
-out:
        if (ret >= 0)
                ret = read;
        dout("striped_read returns %d\n", ret);
@@ -658,7 +661,7 @@ out:
 
                /* hit EOF or hole? */
                if (statret == 0 && *ppos < inode->i_size) {
-                       dout("aio_read sync_read hit hole, reading more\n");
+                       dout("aio_read sync_read hit hole, ppos %lld < size %lld, reading more\n", *ppos, inode->i_size);
                        read += ret;
                        base += ret;
                        len -= ret;
index 70b6a4839c386be5fa4b40b48c3bf6521eda7ca7..d8858e96ab1870d62f1597185f87a2c8fb93f34a 100644 (file)
@@ -1101,10 +1101,10 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
                                goto done;
                        }
                        req->r_dentry = dn;  /* may have spliced */
-                       igrab(in);
+                       ihold(in);
                } else if (ceph_ino(in) == vino.ino &&
                           ceph_snap(in) == vino.snap) {
-                       igrab(in);
+                       ihold(in);
                } else {
                        dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
                             dn, in, ceph_ino(in), ceph_snap(in),
@@ -1144,7 +1144,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
                        goto done;
                }
                req->r_dentry = dn;  /* may have spliced */
-               igrab(in);
+               ihold(in);
                rinfo->head->is_dentry = 1;  /* fool notrace handlers */
        }
 
@@ -1328,7 +1328,7 @@ void ceph_queue_writeback(struct inode *inode)
        if (queue_work(ceph_inode_to_client(inode)->wb_wq,
                       &ceph_inode(inode)->i_wb_work)) {
                dout("ceph_queue_writeback %p\n", inode);
-               igrab(inode);
+               ihold(inode);
        } else {
                dout("ceph_queue_writeback %p failed\n", inode);
        }
@@ -1353,7 +1353,7 @@ void ceph_queue_invalidate(struct inode *inode)
        if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
                       &ceph_inode(inode)->i_pg_inv_work)) {
                dout("ceph_queue_invalidate %p\n", inode);
-               igrab(inode);
+               ihold(inode);
        } else {
                dout("ceph_queue_invalidate %p failed\n", inode);
        }
@@ -1477,7 +1477,7 @@ void ceph_queue_vmtruncate(struct inode *inode)
        if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
                       &ci->i_vmtruncate_work)) {
                dout("ceph_queue_vmtruncate %p\n", inode);
-               igrab(inode);
+               ihold(inode);
        } else {
                dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
                     inode, ci->i_truncate_pending);
@@ -1738,7 +1738,8 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
                __mark_inode_dirty(inode, inode_dirty_flags);
 
        if (mask) {
-               req->r_inode = igrab(inode);
+               req->r_inode = inode;
+               ihold(inode);
                req->r_inode_drop = release;
                req->r_args.setattr.mask = cpu_to_le32(mask);
                req->r_num_caps = 1;
@@ -1779,7 +1780,8 @@ int ceph_do_getattr(struct inode *inode, int mask)
        req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
        if (IS_ERR(req))
                return PTR_ERR(req);
-       req->r_inode = igrab(inode);
+       req->r_inode = inode;
+       ihold(inode);
        req->r_num_caps = 1;
        req->r_args.getattr.mask = cpu_to_le32(mask);
        err = ceph_mdsc_do_request(mdsc, NULL, req);
index 8888c9ba68dbfec194e06f06142547ee2d35c8bc..ef0b5f48e13ac77a75233a40634d000068c9a21c 100644 (file)
@@ -73,7 +73,8 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
                                       USE_AUTH_MDS);
        if (IS_ERR(req))
                return PTR_ERR(req);
-       req->r_inode = igrab(inode);
+       req->r_inode = inode;
+       ihold(inode);
        req->r_inode_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL;
 
        req->r_args.setlayout.layout.fl_stripe_unit =
@@ -135,7 +136,8 @@ static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg)
 
        if (IS_ERR(req))
                return PTR_ERR(req);
-       req->r_inode = igrab(inode);
+       req->r_inode = inode;
+       ihold(inode);
 
        req->r_args.setlayout.layout.fl_stripe_unit =
                        cpu_to_le32(l.stripe_unit);
index 476b329867d41cf2cec7b3e2ad51e5f3885d9d7a..80576d05d687639bc72fb089b6803980500dbbd3 100644 (file)
@@ -23,7 +23,8 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
        req = ceph_mdsc_create_request(mdsc, operation, USE_AUTH_MDS);
        if (IS_ERR(req))
                return PTR_ERR(req);
-       req->r_inode = igrab(inode);
+       req->r_inode = inode;
+       ihold(inode);
 
        /* mds requires start and length rather than start and end */
        if (LLONG_MAX == fl->fl_end)
@@ -32,11 +33,10 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
                length = fl->fl_end - fl->fl_start + 1;
 
        dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, "
-            "length: %llu, wait: %d, type`: %d", (int)lock_type,
+            "length: %llu, wait: %d, type: %d", (int)lock_type,
             (int)operation, (u64)fl->fl_pid, fl->fl_start,
             length, wait, fl->fl_type);
 
-
        req->r_args.filelock_change.rule = lock_type;
        req->r_args.filelock_change.type = cmd;
        req->r_args.filelock_change.pid = cpu_to_le64((u64)fl->fl_pid);
@@ -70,7 +70,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
        }
        ceph_mdsc_put_request(req);
        dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, "
-            "length: %llu, wait: %d, type`: %d, err code %d", (int)lock_type,
+            "length: %llu, wait: %d, type: %d, err code %d", (int)lock_type,
             (int)operation, (u64)fl->fl_pid, fl->fl_start,
             length, wait, fl->fl_type, err);
        return err;
@@ -109,16 +109,20 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
                        dout("mds locked, locking locally");
                        err = posix_lock_file(file, fl, NULL);
                        if (err && (CEPH_MDS_OP_SETFILELOCK == op)) {
-                               /* undo! This should only happen if the kernel detects
-                                * local deadlock. */
+                               /* undo! This should only happen if
+                                * the kernel detects local
+                                * deadlock. */
                                ceph_lock_message(CEPH_LOCK_FCNTL, op, file,
                                                  CEPH_LOCK_UNLOCK, 0, fl);
-                               dout("got %d on posix_lock_file, undid lock", err);
+                               dout("got %d on posix_lock_file, undid lock",
+                                    err);
                        }
                }
 
-       } else {
-               dout("mds returned error code %d", err);
+       } else if (err == -ERESTARTSYS) {
+               dout("undoing lock\n");
+               ceph_lock_message(CEPH_LOCK_FCNTL, op, file,
+                                 CEPH_LOCK_UNLOCK, 0, fl);
        }
        return err;
 }
@@ -155,8 +159,11 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
                                          file, CEPH_LOCK_UNLOCK, 0, fl);
                        dout("got %d on flock_lock_file_wait, undid lock", err);
                }
-       } else {
-               dout("mds error code %d", err);
+       } else if (err == -ERESTARTSYS) {
+               dout("undoing lock\n");
+               ceph_lock_message(CEPH_LOCK_FLOCK,
+                                 CEPH_MDS_OP_SETFILELOCK,
+                                 file, CEPH_LOCK_UNLOCK, 0, fl);
        }
        return err;
 }
index 24067d68a5549769df4be2c2a2574c36d6dcf702..54b14de2e729114d50d91fde0f9c5ab5bf7d29d7 100644 (file)
@@ -722,7 +722,7 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
                ci = list_first_entry(&mdsc->snap_flush_list,
                                struct ceph_inode_info, i_snap_flush_item);
                inode = &ci->vfs_inode;
-               igrab(inode);
+               ihold(inode);
                spin_unlock(&mdsc->snap_flush_lock);
                spin_lock(&inode->i_lock);
                __ceph_flush_snaps(ci, &session, 0);
index f2b628696180e93d69bae9c72464ed4b795d694e..f42d730f1b66ce26afa774d5faeb3752fbc1646e 100644 (file)
@@ -665,7 +665,8 @@ static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
                err = PTR_ERR(req);
                goto out;
        }
-       req->r_inode = igrab(inode);
+       req->r_inode = inode;
+       ihold(inode);
        req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
        req->r_num_caps = 1;
        req->r_args.setxattr.flags = cpu_to_le32(flags);
@@ -795,7 +796,8 @@ static int ceph_send_removexattr(struct dentry *dentry, const char *name)
                                       USE_AUTH_MDS);
        if (IS_ERR(req))
                return PTR_ERR(req);
-       req->r_inode = igrab(inode);
+       req->r_inode = inode;
+       ihold(inode);
        req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
        req->r_num_caps = 1;
        req->r_path2 = kstrdup(name, GFP_NOFS);
index 1cd4c3a1862d72491432bd4a017782e78b331dcd..53ed1ad2c112808ea27c92dce5fff5ebefad8651 100644 (file)
@@ -7,6 +7,7 @@ config CIFS
        select CRYPTO_MD5
        select CRYPTO_HMAC
        select CRYPTO_ARC4
+       select CRYPTO_ECB
        select CRYPTO_DES
        help
          This is the client VFS module for the Common Internet File System
@@ -148,7 +149,7 @@ config CIFS_FSCACHE
 
 config CIFS_ACL
          bool "Provide CIFS ACL support (EXPERIMENTAL)"
-         depends on EXPERIMENTAL && CIFS_XATTR
+         depends on EXPERIMENTAL && CIFS_XATTR && KEYS
          help
            Allows to fetch CIFS/NTFS ACL from the server.  The DACL blob
            is handed over to the application/caller.
index dd8584d35a14df875e09b4716f041d87f028e70a..545509c3313b0a8e1061742e5360c8d665c2e94c 100644 (file)
@@ -92,7 +92,7 @@ static uint16_t cifs_server_get_key(const void *cookie_netfs_data,
                break;
 
        default:
-               cERROR(1, "CIFS: Unknown network family '%d'", sa->sa_family);
+               cERROR(1, "Unknown network family '%d'", sa->sa_family);
                key_len = 0;
                break;
        }
@@ -152,7 +152,7 @@ static uint16_t cifs_super_get_key(const void *cookie_netfs_data, void *buffer,
 
        sharename = extract_sharename(tcon->treeName);
        if (IS_ERR(sharename)) {
-               cFYI(1, "CIFS: couldn't extract sharename\n");
+               cFYI(1, "%s: couldn't extract sharename\n", __func__);
                sharename = NULL;
                return 0;
        }
@@ -302,7 +302,7 @@ static void cifs_fscache_inode_now_uncached(void *cookie_netfs_data)
        pagevec_init(&pvec, 0);
        first = 0;
 
-       cFYI(1, "cifs inode 0x%p now uncached", cifsi);
+       cFYI(1, "%s: cifs inode 0x%p now uncached", __func__, cifsi);
 
        for (;;) {
                nr_pages = pagevec_lookup(&pvec,
index dfbd9f1f373daa9b39adeb852595438af0c4177b..5a0ee7f2af062a68fdec065c43099274486c1b2d 100644 (file)
@@ -184,7 +184,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
        if (cifs_pdu == NULL || server == NULL)
                return -EINVAL;
 
-       if (cifs_pdu->Command == SMB_COM_NEGOTIATE)
+       if (!server->session_estab)
                return 0;
 
        if (cifs_pdu->Command == SMB_COM_LOCKING_ANDX) {
index 989442dcfb45d5af094d619d468b648094820908..e9def996e3835610c6f8cc94dfd1a6579f2a4c39 100644 (file)
@@ -352,6 +352,37 @@ cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
        }
 }
 
+static void
+cifs_show_security(struct seq_file *s, struct TCP_Server_Info *server)
+{
+       seq_printf(s, ",sec=");
+
+       switch (server->secType) {
+       case LANMAN:
+               seq_printf(s, "lanman");
+               break;
+       case NTLMv2:
+               seq_printf(s, "ntlmv2");
+               break;
+       case NTLM:
+               seq_printf(s, "ntlm");
+               break;
+       case Kerberos:
+               seq_printf(s, "krb5");
+               break;
+       case RawNTLMSSP:
+               seq_printf(s, "ntlmssp");
+               break;
+       default:
+               /* shouldn't ever happen */
+               seq_printf(s, "unknown");
+               break;
+       }
+
+       if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+               seq_printf(s, "i");
+}
+
 /*
  * cifs_show_options() is for displaying mount options in /proc/mounts.
  * Not all settable options are displayed but most of the important
@@ -365,6 +396,8 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
        struct sockaddr *srcaddr;
        srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
 
+       cifs_show_security(s, tcon->ses->server);
+
        seq_printf(s, ",unc=%s", tcon->treeName);
 
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
index 64313f778ebfcd50c456c2b7b92ebcc6c82fb364..0900e1658c967de0fc2f4d70cc7645c9363a5d30 100644 (file)
@@ -129,5 +129,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
 extern const struct export_operations cifs_export_ops;
 #endif /* CIFS_NFSD_EXPORT */
 
-#define CIFS_VERSION   "1.72"
+#define CIFS_VERSION   "1.73"
 #endif                         /* _CIFSFS_H */
index 6d88b82537c3d4cf899a5741c114bb50c61ad564..12cf72dd0c42963b75f674edbb651bb32ccb486c 100644 (file)
@@ -152,7 +152,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
                mid_entry->callback(mid_entry);
        }
 
-       while (server->tcpStatus == CifsNeedReconnect) {
+       do {
                try_to_freeze();
 
                /* we should try only the port we connected to before */
@@ -167,7 +167,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
                                server->tcpStatus = CifsNeedNegotiate;
                        spin_unlock(&GlobalMid_Lock);
                }
-       }
+       } while (server->tcpStatus == CifsNeedReconnect);
 
        return rc;
 }
@@ -784,7 +784,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                         struct smb_vol *vol)
 {
        char *value, *data, *end;
-       char *mountdata_copy, *options;
+       char *mountdata_copy = NULL, *options;
        unsigned int  temp_len, i, j;
        char separator[2];
        short int override_uid = -1;
@@ -1391,7 +1391,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                                "/proc/fs/cifs/LookupCacheEnabled to 0\n");
                } else if (strnicmp(data, "fsc", 3) == 0) {
 #ifndef CONFIG_CIFS_FSCACHE
-                       cERROR(1, "FS-Cache support needs CONFIG_CIFS_FSCACHE"
+                       cERROR(1, "FS-Cache support needs CONFIG_CIFS_FSCACHE "
                                  "kernel config option set");
                        goto cifs_parse_mount_err;
 #endif
@@ -1976,7 +1976,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
                warned_on_ntlm = true;
                cERROR(1, "default security mechanism requested.  The default "
                        "security mechanism will be upgraded from ntlm to "
-                       "ntlmv2 in kernel release 2.6.41");
+                       "ntlmv2 in kernel release 3.1");
        }
        ses->overrideSecFlg = volume_info->secFlg;
 
@@ -2149,7 +2149,10 @@ cifs_put_tlink(struct tcon_link *tlink)
 }
 
 static inline struct tcon_link *
-cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb);
+cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb)
+{
+       return cifs_sb->master_tlink;
+}
 
 static int
 compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
@@ -3171,6 +3174,10 @@ out:
        return rc;
 }
 
+/*
+ * Issue a TREE_CONNECT request. Note that for IPC$ shares, that the tcon
+ * pointer may be NULL.
+ */
 int
 CIFSTCon(unsigned int xid, struct cifs_ses *ses,
         const char *tree, struct cifs_tcon *tcon,
@@ -3205,7 +3212,7 @@ CIFSTCon(unsigned int xid, struct cifs_ses *ses,
        pSMB->AndXCommand = 0xFF;
        pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO);
        bcc_ptr = &pSMB->Password[0];
-       if ((ses->server->sec_mode) & SECMODE_USER) {
+       if (!tcon || (ses->server->sec_mode & SECMODE_USER)) {
                pSMB->PasswordLength = cpu_to_le16(1);  /* minimum */
                *bcc_ptr = 0; /* password is null byte */
                bcc_ptr++;              /* skip password */
@@ -3371,7 +3378,7 @@ int cifs_negotiate_protocol(unsigned int xid, struct cifs_ses *ses)
        }
        if (rc == 0) {
                spin_lock(&GlobalMid_Lock);
-               if (server->tcpStatus != CifsExiting)
+               if (server->tcpStatus == CifsNeedNegotiate)
                        server->tcpStatus = CifsGood;
                else
                        rc = -EHOSTDOWN;
@@ -3484,12 +3491,6 @@ out:
        return tcon;
 }
 
-static inline struct tcon_link *
-cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb)
-{
-       return cifs_sb->master_tlink;
-}
-
 struct cifs_tcon *
 cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb)
 {
index d368a47ba5ebf317b377a0c34b0aeb2938a6c871..816696621ec9ea1be2d5b351ac4fc17d0e8f41aa 100644 (file)
@@ -28,14 +28,14 @@ void cifs_fscache_get_client_cookie(struct TCP_Server_Info *server)
        server->fscache =
                fscache_acquire_cookie(cifs_fscache_netfs.primary_index,
                                &cifs_fscache_server_index_def, server);
-       cFYI(1, "CIFS: get client cookie (0x%p/0x%p)", server,
-                               server->fscache);
+       cFYI(1, "%s: (0x%p/0x%p)", __func__, server,
+                       server->fscache);
 }
 
 void cifs_fscache_release_client_cookie(struct TCP_Server_Info *server)
 {
-       cFYI(1, "CIFS: release client cookie (0x%p/0x%p)", server,
-                               server->fscache);
+       cFYI(1, "%s: (0x%p/0x%p)", __func__, server,
+                       server->fscache);
        fscache_relinquish_cookie(server->fscache, 0);
        server->fscache = NULL;
 }
@@ -47,13 +47,13 @@ void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
        tcon->fscache =
                fscache_acquire_cookie(server->fscache,
                                &cifs_fscache_super_index_def, tcon);
-       cFYI(1, "CIFS: get superblock cookie (0x%p/0x%p)",
-                               server->fscache, tcon->fscache);
+       cFYI(1, "%s: (0x%p/0x%p)", __func__, server->fscache,
+                       tcon->fscache);
 }
 
 void cifs_fscache_release_super_cookie(struct cifs_tcon *tcon)
 {
-       cFYI(1, "CIFS: releasing superblock cookie (0x%p)", tcon->fscache);
+       cFYI(1, "%s: (0x%p)", __func__, tcon->fscache);
        fscache_relinquish_cookie(tcon->fscache, 0);
        tcon->fscache = NULL;
 }
@@ -70,8 +70,8 @@ static void cifs_fscache_enable_inode_cookie(struct inode *inode)
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE) {
                cifsi->fscache = fscache_acquire_cookie(tcon->fscache,
                                &cifs_fscache_inode_object_def, cifsi);
-               cFYI(1, "CIFS: got FH cookie (0x%p/0x%p)", tcon->fscache,
-                               cifsi->fscache);
+               cFYI(1, "%s: got FH cookie (0x%p/0x%p)", __func__,
+                               tcon->fscache, cifsi->fscache);
        }
 }
 
@@ -80,8 +80,7 @@ void cifs_fscache_release_inode_cookie(struct inode *inode)
        struct cifsInodeInfo *cifsi = CIFS_I(inode);
 
        if (cifsi->fscache) {
-               cFYI(1, "CIFS releasing inode cookie (0x%p)",
-                               cifsi->fscache);
+               cFYI(1, "%s: (0x%p)", __func__, cifsi->fscache);
                fscache_relinquish_cookie(cifsi->fscache, 0);
                cifsi->fscache = NULL;
        }
@@ -92,8 +91,7 @@ static void cifs_fscache_disable_inode_cookie(struct inode *inode)
        struct cifsInodeInfo *cifsi = CIFS_I(inode);
 
        if (cifsi->fscache) {
-               cFYI(1, "CIFS disabling inode cookie (0x%p)",
-                               cifsi->fscache);
+               cFYI(1, "%s: (0x%p)", __func__, cifsi->fscache);
                fscache_relinquish_cookie(cifsi->fscache, 1);
                cifsi->fscache = NULL;
        }
@@ -121,8 +119,8 @@ void cifs_fscache_reset_inode_cookie(struct inode *inode)
                                        cifs_sb_master_tcon(cifs_sb)->fscache,
                                        &cifs_fscache_inode_object_def,
                                        cifsi);
-               cFYI(1, "CIFS: new cookie 0x%p oldcookie 0x%p",
-                               cifsi->fscache, old);
+               cFYI(1, "%s: new cookie 0x%p oldcookie 0x%p",
+                               __func__, cifsi->fscache, old);
        }
 }
 
@@ -132,8 +130,8 @@ int cifs_fscache_release_page(struct page *page, gfp_t gfp)
                struct inode *inode = page->mapping->host;
                struct cifsInodeInfo *cifsi = CIFS_I(inode);
 
-               cFYI(1, "CIFS: fscache release page (0x%p/0x%p)",
-                               page, cifsi->fscache);
+               cFYI(1, "%s: (0x%p/0x%p)", __func__, page,
+                               cifsi->fscache);
                if (!fscache_maybe_release_page(cifsi->fscache, page, gfp))
                        return 0;
        }
@@ -144,8 +142,7 @@ int cifs_fscache_release_page(struct page *page, gfp_t gfp)
 static void cifs_readpage_from_fscache_complete(struct page *page, void *ctx,
                                                int error)
 {
-       cFYI(1, "CFS: readpage_from_fscache_complete (0x%p/%d)",
-                       page, error);
+       cFYI(1, "%s: (0x%p/%d)", __func__, page, error);
        if (!error)
                SetPageUptodate(page);
        unlock_page(page);
@@ -158,7 +155,7 @@ int __cifs_readpage_from_fscache(struct inode *inode, struct page *page)
 {
        int ret;
 
-       cFYI(1, "CIFS: readpage_from_fscache(fsc:%p, p:%p, i:0x%p",
+       cFYI(1, "%s: (fsc:%p, p:%p, i:0x%p", __func__,
                        CIFS_I(inode)->fscache, page, inode);
        ret = fscache_read_or_alloc_page(CIFS_I(inode)->fscache, page,
                                         cifs_readpage_from_fscache_complete,
@@ -167,11 +164,11 @@ int __cifs_readpage_from_fscache(struct inode *inode, struct page *page)
        switch (ret) {
 
        case 0: /* page found in fscache, read submitted */
-               cFYI(1, "CIFS: readpage_from_fscache: submitted");
+               cFYI(1, "%s: submitted", __func__);
                return ret;
        case -ENOBUFS:  /* page won't be cached */
        case -ENODATA:  /* page not in cache */
-               cFYI(1, "CIFS: readpage_from_fscache %d", ret);
+               cFYI(1, "%s: %d", __func__, ret);
                return 1;
 
        default:
@@ -190,7 +187,7 @@ int __cifs_readpages_from_fscache(struct inode *inode,
 {
        int ret;
 
-       cFYI(1, "CIFS: __cifs_readpages_from_fscache (0x%p/%u/0x%p)",
+       cFYI(1, "%s: (0x%p/%u/0x%p)", __func__,
                        CIFS_I(inode)->fscache, *nr_pages, inode);
        ret = fscache_read_or_alloc_pages(CIFS_I(inode)->fscache, mapping,
                                          pages, nr_pages,
@@ -199,12 +196,12 @@ int __cifs_readpages_from_fscache(struct inode *inode,
                                          mapping_gfp_mask(mapping));
        switch (ret) {
        case 0: /* read submitted to the cache for all pages */
-               cFYI(1, "CIFS: readpages_from_fscache: submitted");
+               cFYI(1, "%s: submitted", __func__);
                return ret;
 
        case -ENOBUFS:  /* some pages are not cached and can't be */
        case -ENODATA:  /* some pages are not cached */
-               cFYI(1, "CIFS: readpages_from_fscache: no page");
+               cFYI(1, "%s: no page", __func__);
                return 1;
 
        default:
@@ -218,7 +215,7 @@ void __cifs_readpage_to_fscache(struct inode *inode, struct page *page)
 {
        int ret;
 
-       cFYI(1, "CIFS: readpage_to_fscache(fsc: %p, p: %p, i: %p",
+       cFYI(1, "%s: (fsc: %p, p: %p, i: %p)", __func__,
                        CIFS_I(inode)->fscache, page, inode);
        ret = fscache_write_page(CIFS_I(inode)->fscache, page, GFP_KERNEL);
        if (ret != 0)
@@ -230,7 +227,7 @@ void __cifs_fscache_invalidate_page(struct page *page, struct inode *inode)
        struct cifsInodeInfo *cifsi = CIFS_I(inode);
        struct fscache_cookie *cookie = cifsi->fscache;
 
-       cFYI(1, "CIFS: fscache invalidatepage (0x%p/0x%p)", page, cookie);
+       cFYI(1, "%s: (0x%p/0x%p)", __func__, page, cookie);
        fscache_wait_on_page_write(cookie, page);
        fscache_uncache_page(cookie, page);
 }
index ea5f748906a83218eae60cbb241aaab53cec8ac8..6075a1e727aee13dd3cd492b61d55edd81ee258e 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1093,6 +1093,7 @@ int flush_old_exec(struct linux_binprm * bprm)
 
        bprm->mm = NULL;                /* We're using it now */
 
+       set_fs(USER_DS);
        current->flags &= ~(PF_RANDOMIZE | PF_KTHREAD);
        flush_thread();
        current->personality &= ~bprm->per_clear;
@@ -1357,10 +1358,6 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
        if (retval)
                return retval;
 
-       /* kernel module loader fixup */
-       /* so we don't try to load run modprobe in kernel space. */
-       set_fs(USER_DS);
-
        retval = audit_bprm(bprm);
        if (retval)
                return retval;
@@ -1999,7 +1996,7 @@ static void wait_for_dump_helpers(struct file *file)
  * is a special value that we use to trap recursive
  * core dumps
  */
-static int umh_pipe_setup(struct subprocess_info *info)
+static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
 {
        struct file *rp, *wp;
        struct fdtable *fdt;
index 7257752b6d5d3f0c8c133597135cb2f92821f784..7018e1d8902dee6c4b38f7247a38e9315cba27f5 100644 (file)
@@ -102,7 +102,7 @@ static int fat_ioctl_set_attributes(struct file *file, u32 __user *user_attr)
                if (attr & ATTR_SYS)
                        inode->i_flags |= S_IMMUTABLE;
                else
-                       inode->i_flags &= S_IMMUTABLE;
+                       inode->i_flags &= ~S_IMMUTABLE;
        }
 
        fat_save_attrs(inode, attr);
index cc6ec4b2f0ffed9c05959b149614a8a1e19e467d..38f84cd48b67d057798f8f75fe5c8f22f12b10dc 100644 (file)
@@ -921,6 +921,8 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
        if (sb->s_flags & MS_MANDLOCK)
                goto err;
 
+       sb->s_flags &= ~MS_NOSEC;
+
        if (!parse_fuse_opt((char *) data, &d, is_bdev))
                goto err;
 
index 2792a790e50ba9ae2eae255eccc68e47f39d9501..1c1336e7b3b222d347f657fa2895ddc1c2f05c65 100644 (file)
@@ -663,14 +663,19 @@ static void glock_work_func(struct work_struct *work)
                drop_ref = 1;
        }
        spin_lock(&gl->gl_spin);
-       if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
+       if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
            gl->gl_state != LM_ST_UNLOCKED &&
            gl->gl_demote_state != LM_ST_EXCLUSIVE) {
                unsigned long holdtime, now = jiffies;
+
                holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
                if (time_before(now, holdtime))
                        delay = holdtime - now;
-               set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags);
+
+               if (!delay) {
+                       clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
+                       set_bit(GLF_DEMOTE, &gl->gl_flags);
+               }
        }
        run_queue(gl, 0);
        spin_unlock(&gl->gl_spin);
index 3db5ba4568fc8efd30025a9e9906eb01a47f9c45..b3cc8586984e9748ff3c66e6c8703fa84c822c1c 100644 (file)
@@ -974,7 +974,7 @@ out_no_inode:
 out_no_read:
        printk(KERN_WARNING "%s: bread failed, dev=%s, iso_blknum=%d, block=%d\n",
                __func__, s->s_id, iso_blknum, block);
-       goto out_freesbi;
+       goto out_freebh;
 out_bad_zone_size:
        printk(KERN_WARNING "ISOFS: Bad logical zone size %ld\n",
                sbi->s_log_zone_size);
@@ -989,6 +989,7 @@ out_unknown_format:
 
 out_freebh:
        brelse(bh);
+       brelse(pri_bh);
 out_freesbi:
        kfree(opt.iocharset);
        kfree(sbi);
index 278e3fb40b71e259b3eab7492fcebbd2c2a033e2..583636f745e59dbe7e66e1b3871496ac3b29a63d 100644 (file)
@@ -1123,7 +1123,7 @@ int lmLogOpen(struct super_block *sb)
        bdev = blkdev_get_by_dev(sbi->logdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
                                 log);
        if (IS_ERR(bdev)) {
-               rc = -PTR_ERR(bdev);
+               rc = PTR_ERR(bdev);
                goto free;
        }
 
index e2e4e8d032ee47cc899007443482a823caf30535..9e425e7e6c8fbdcb2229fb9516480666c2ac0880 100644 (file)
@@ -812,6 +812,11 @@ static int follow_automount(struct path *path, unsigned flags,
        if (!mnt) /* mount collision */
                return 0;
 
+       if (!*need_mntput) {
+               /* lock_mount() may release path->mnt on error */
+               mntget(path->mnt);
+               *need_mntput = true;
+       }
        err = finish_automount(mnt, path);
 
        switch (err) {
@@ -819,12 +824,9 @@ static int follow_automount(struct path *path, unsigned flags,
                /* Someone else made a mount here whilst we were busy */
                return 0;
        case 0:
-               dput(path->dentry);
-               if (*need_mntput)
-                       mntput(path->mnt);
+               path_put(path);
                path->mnt = mnt;
                path->dentry = dget(mnt->mnt_root);
-               *need_mntput = true;
                return 0;
        default:
                return err;
@@ -844,9 +846,10 @@ static int follow_automount(struct path *path, unsigned flags,
  */
 static int follow_managed(struct path *path, unsigned flags)
 {
+       struct vfsmount *mnt = path->mnt; /* held by caller, must be left alone */
        unsigned managed;
        bool need_mntput = false;
-       int ret;
+       int ret = 0;
 
        /* Given that we're not holding a lock here, we retain the value in a
         * local variable for each dentry as we look at it so that we don't see
@@ -861,7 +864,7 @@ static int follow_managed(struct path *path, unsigned flags)
                        BUG_ON(!path->dentry->d_op->d_manage);
                        ret = path->dentry->d_op->d_manage(path->dentry, false);
                        if (ret < 0)
-                               return ret == -EISDIR ? 0 : ret;
+                               break;
                }
 
                /* Transit to a mounted filesystem. */
@@ -887,14 +890,19 @@ static int follow_managed(struct path *path, unsigned flags)
                if (managed & DCACHE_NEED_AUTOMOUNT) {
                        ret = follow_automount(path, flags, &need_mntput);
                        if (ret < 0)
-                               return ret == -EISDIR ? 0 : ret;
+                               break;
                        continue;
                }
 
                /* We didn't change the current path point */
                break;
        }
-       return 0;
+
+       if (need_mntput && path->mnt == mnt)
+               mntput(path->mnt);
+       if (ret == -EISDIR)
+               ret = 0;
+       return ret;
 }
 
 int follow_down_one(struct path *path)
@@ -2624,6 +2632,10 @@ static long do_rmdir(int dfd, const char __user *pathname)
        error = PTR_ERR(dentry);
        if (IS_ERR(dentry))
                goto exit2;
+       if (!dentry->d_inode) {
+               error = -ENOENT;
+               goto exit3;
+       }
        error = mnt_want_write(nd.path.mnt);
        if (error)
                goto exit3;
@@ -2712,8 +2724,9 @@ static long do_unlinkat(int dfd, const char __user *pathname)
                if (nd.last.name[nd.last.len])
                        goto slashes;
                inode = dentry->d_inode;
-               if (inode)
-                       ihold(inode);
+               if (!inode)
+                       goto slashes;
+               ihold(inode);
                error = mnt_want_write(nd.path.mnt);
                if (error)
                        goto exit2;
index 7eafe468a29c71cb5338612149a435cf8b037756..b2e3ff34762070a4b37085c051223809b59e4a6c 100644 (file)
@@ -1346,6 +1346,11 @@ static void nilfs_btree_shrink(struct nilfs_bmap *btree,
        path[level].bp_bh = NULL;
 }
 
+static void nilfs_btree_nop(struct nilfs_bmap *btree,
+                           struct nilfs_btree_path *path,
+                           int level, __u64 *keyp, __u64 *ptrp)
+{
+}
 
 static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
                                      struct nilfs_btree_path *path,
@@ -1356,20 +1361,19 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
        struct buffer_head *bh;
        struct nilfs_btree_node *node, *parent, *sib;
        __u64 sibptr;
-       int pindex, level, ncmin, ncmax, ncblk, ret;
+       int pindex, dindex, level, ncmin, ncmax, ncblk, ret;
 
        ret = 0;
        stats->bs_nblocks = 0;
        ncmin = NILFS_BTREE_NODE_NCHILDREN_MIN(nilfs_btree_node_size(btree));
        ncblk = nilfs_btree_nchildren_per_block(btree);
 
-       for (level = NILFS_BTREE_LEVEL_NODE_MIN;
+       for (level = NILFS_BTREE_LEVEL_NODE_MIN, dindex = path[level].bp_index;
             level < nilfs_btree_height(btree) - 1;
             level++) {
                node = nilfs_btree_get_nonroot_node(path, level);
                path[level].bp_oldreq.bpr_ptr =
-                       nilfs_btree_node_get_ptr(node, path[level].bp_index,
-                                                ncblk);
+                       nilfs_btree_node_get_ptr(node, dindex, ncblk);
                ret = nilfs_bmap_prepare_end_ptr(btree,
                                                 &path[level].bp_oldreq, dat);
                if (ret < 0)
@@ -1383,6 +1387,7 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
 
                parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax);
                pindex = path[level + 1].bp_index;
+               dindex = pindex;
 
                if (pindex > 0) {
                        /* left sibling */
@@ -1421,6 +1426,14 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
                                path[level].bp_sib_bh = bh;
                                path[level].bp_op = nilfs_btree_concat_right;
                                stats->bs_nblocks++;
+                               /*
+                                * When merging right sibling node
+                                * into the current node, pointer to
+                                * the right sibling node must be
+                                * terminated instead.  The adjustment
+                                * below is required for that.
+                                */
+                               dindex = pindex + 1;
                                /* continue; */
                        }
                } else {
@@ -1431,29 +1444,31 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
                            NILFS_BTREE_ROOT_NCHILDREN_MAX) {
                                path[level].bp_op = nilfs_btree_shrink;
                                stats->bs_nblocks += 2;
+                               level++;
+                               path[level].bp_op = nilfs_btree_nop;
+                               goto shrink_root_child;
                        } else {
                                path[level].bp_op = nilfs_btree_do_delete;
                                stats->bs_nblocks++;
+                               goto out;
                        }
-
-                       goto out;
-
                }
        }
 
+       /* child of the root node is deleted */
+       path[level].bp_op = nilfs_btree_do_delete;
+       stats->bs_nblocks++;
+
+shrink_root_child:
        node = nilfs_btree_get_root(btree);
        path[level].bp_oldreq.bpr_ptr =
-               nilfs_btree_node_get_ptr(node, path[level].bp_index,
+               nilfs_btree_node_get_ptr(node, dindex,
                                         NILFS_BTREE_ROOT_NCHILDREN_MAX);
 
        ret = nilfs_bmap_prepare_end_ptr(btree, &path[level].bp_oldreq, dat);
        if (ret < 0)
                goto err_out_child_node;
 
-       /* child of the root node is deleted */
-       path[level].bp_op = nilfs_btree_do_delete;
-       stats->bs_nblocks++;
-
        /* success */
  out:
        *levelp = level;
index 141646e88fb5a9e3d5103ed69d3eb4880b33963e..bb24ab6c282fae5099bc9442478c6689e1d44c59 100644 (file)
@@ -2573,7 +2573,7 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
        sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
 
        if (nilfs->ns_interval)
-               sci->sc_interval = nilfs->ns_interval;
+               sci->sc_interval = HZ * nilfs->ns_interval;
        if (nilfs->ns_watermark)
                sci->sc_watermark = nilfs->ns_watermark;
        return sci;
index cdbaf5e97308f3564af7820c575c591e3cfcbf95..56f61027236b696fce1ccde3e1edaf86acee59a0 100644 (file)
@@ -1072,7 +1072,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
 
        sb->s_magic = OCFS2_SUPER_MAGIC;
 
-       sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+       sb->s_flags = (sb->s_flags & ~(MS_POSIXACL | MS_NOSEC)) |
                ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
 
        /* Hard readonly mode only if: bdev_read_only, MS_RDONLY,
index f82e762eeca277683a49b23e4ce9534d29a672de..d545e97d99c3390706894ff1b030c2491a2c0a38 100644 (file)
@@ -255,13 +255,7 @@ ssize_t part_discard_alignment_show(struct device *dev,
                                   struct device_attribute *attr, char *buf)
 {
        struct hd_struct *p = dev_to_part(dev);
-       struct gendisk *disk = dev_to_disk(dev);
-       unsigned int alignment = 0;
-
-       if (disk->queue)
-               alignment = queue_limit_discard_alignment(&disk->queue->limits,
-                                                               p->start_sect);
-       return sprintf(buf, "%u\n", alignment);
+       return sprintf(buf, "%u\n", p->discard_alignment);
 }
 
 ssize_t part_stat_show(struct device *dev,
@@ -455,6 +449,8 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
        p->start_sect = start;
        p->alignment_offset =
                queue_limit_alignment_offset(&disk->queue->limits, start);
+       p->discard_alignment =
+               queue_limit_discard_alignment(&disk->queue->limits, start);
        p->nr_sects = len;
        p->partno = partno;
        p->policy = get_disk_ro(disk);
index 781dec5bd682c30435c852c98d08814f2a511f93..be177f702acbc9bc352c0e13f5b37aeedbf6dd25 100644 (file)
@@ -38,18 +38,21 @@ static struct dentry *proc_ns_instantiate(struct inode *dir,
        struct inode *inode;
        struct proc_inode *ei;
        struct dentry *error = ERR_PTR(-ENOENT);
+       void *ns;
 
        inode = proc_pid_make_inode(dir->i_sb, task);
        if (!inode)
                goto out;
 
+       ns = ns_ops->get(task);
+       if (!ns)
+               goto out_iput;
+
        ei = PROC_I(inode);
        inode->i_mode = S_IFREG|S_IRUSR;
        inode->i_fop  = &ns_file_operations;
        ei->ns_ops    = ns_ops;
-       ei->ns        = ns_ops->get(task);
-       if (!ei->ns)
-               goto out_iput;
+       ei->ns        = ns;
 
        dentry->d_op = &pid_dentry_operations;
        d_add(dentry, inode);
index a9000e9cfee54ac804eb71e75d0b72bf989041ed..d6c3b416529b9709f2db526be706dbb415902ea3 100644 (file)
@@ -28,11 +28,12 @@ static int proc_test_super(struct super_block *sb, void *data)
 
 static int proc_set_super(struct super_block *sb, void *data)
 {
-       struct pid_namespace *ns;
-
-       ns = (struct pid_namespace *)data;
-       sb->s_fs_info = get_pid_ns(ns);
-       return set_anon_super(sb, NULL);
+       int err = set_anon_super(sb, NULL);
+       if (!err) {
+               struct pid_namespace *ns = (struct pid_namespace *)data;
+               sb->s_fs_info = get_pid_ns(ns);
+       }
+       return err;
 }
 
 static struct dentry *proc_mount(struct file_system_type *fs_type,
index c75593953c5275eb89b56b08358d25830728cf1e..ab3d672db0deae0a84a01bc1938cd967cc27e79f 100644 (file)
@@ -822,7 +822,7 @@ struct dentry *mount_bdev(struct file_system_type *fs_type,
        } else {
                char b[BDEVNAME_SIZE];
 
-               s->s_flags = flags;
+               s->s_flags = flags | MS_NOSEC;
                s->s_mode = mode;
                strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
                sb_set_blocksize(s, block_size(bdev));
index 266895783b477a94e64248217483ec52f02c75fb..e34f0d99ea4ec5a7b3361b1fc0cd02fad2d23410 100644 (file)
@@ -95,6 +95,14 @@ static int sysfs_set_super(struct super_block *sb, void *data)
        return error;
 }
 
+static void free_sysfs_super_info(struct sysfs_super_info *info)
+{
+       int type;
+       for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++)
+               kobj_ns_drop(type, info->ns[type]);
+       kfree(info);
+}
+
 static struct dentry *sysfs_mount(struct file_system_type *fs_type,
        int flags, const char *dev_name, void *data)
 {
@@ -108,11 +116,11 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type,
                return ERR_PTR(-ENOMEM);
 
        for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++)
-               info->ns[type] = kobj_ns_current(type);
+               info->ns[type] = kobj_ns_grab_current(type);
 
        sb = sget(fs_type, sysfs_test_super, sysfs_set_super, info);
        if (IS_ERR(sb) || sb->s_fs_info != info)
-               kfree(info);
+               free_sysfs_super_info(info);
        if (IS_ERR(sb))
                return ERR_CAST(sb);
        if (!sb->s_root) {
@@ -131,12 +139,11 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type,
 static void sysfs_kill_sb(struct super_block *sb)
 {
        struct sysfs_super_info *info = sysfs_info(sb);
-
        /* Remove the superblock from fs_supers/s_instances
         * so we can't find it, before freeing sysfs_super_info.
         */
        kill_anon_super(sb);
-       kfree(info);
+       free_sysfs_super_info(info);
 }
 
 static struct file_system_type sysfs_fs_type = {
@@ -145,28 +152,6 @@ static struct file_system_type sysfs_fs_type = {
        .kill_sb        = sysfs_kill_sb,
 };
 
-void sysfs_exit_ns(enum kobj_ns_type type, const void *ns)
-{
-       struct super_block *sb;
-
-       mutex_lock(&sysfs_mutex);
-       spin_lock(&sb_lock);
-       list_for_each_entry(sb, &sysfs_fs_type.fs_supers, s_instances) {
-               struct sysfs_super_info *info = sysfs_info(sb);
-               /*
-                * If we see a superblock on the fs_supers/s_instances
-                * list the unmount has not completed and sb->s_fs_info
-                * points to a valid struct sysfs_super_info.
-                */
-               /* Ignore superblocks with the wrong ns */
-               if (info->ns[type] != ns)
-                       continue;
-               info->ns[type] = NULL;
-       }
-       spin_unlock(&sb_lock);
-       mutex_unlock(&sysfs_mutex);
-}
-
 int __init sysfs_init(void)
 {
        int err = -ENOMEM;
index 3d28af31d86300ecbddde7b4dc9d7eb25a255dc6..2ed2404f3113be24b1c77fa55e47f5126578e9e9 100644 (file)
@@ -136,7 +136,7 @@ struct sysfs_addrm_cxt {
  * instance).
  */
 struct sysfs_super_info {
-       const void *ns[KOBJ_NS_TYPES];
+       void *ns[KOBJ_NS_TYPES];
 };
 #define sysfs_info(SB) ((struct sysfs_super_info *)(SB->s_fs_info))
 extern struct sysfs_dirent sysfs_root;
index f67acbdda5e8c13fce54e51f72fe0882f2fa94da..dffeb3795af1d4204f8554447dbb2d2c33992429 100644 (file)
@@ -61,7 +61,9 @@ static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr)
 
 /*
  * Called when the clock was set to cancel the timers in the cancel
- * list.
+ * list. This will wake up processes waiting on these timers. The
+ * wake-up requires ctx->ticks to be non zero, therefore we increment
+ * it before calling wake_up_locked().
  */
 void timerfd_clock_was_set(void)
 {
@@ -76,6 +78,7 @@ void timerfd_clock_was_set(void)
                spin_lock_irqsave(&ctx->wqh.lock, flags);
                if (ctx->moffs.tv64 != moffs.tv64) {
                        ctx->moffs.tv64 = KTIME_MAX;
+                       ctx->ticks++;
                        wake_up_locked(&ctx->wqh);
                }
                spin_unlock_irqrestore(&ctx->wqh.lock, flags);
index 166951e0dcd3c7b1dc232102c74bf8b8d61e8c1e..3be645e012c9358170ad45a87af3b83b8fbd72ca 100644 (file)
@@ -581,6 +581,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
        ubifs_assert(wbuf->size % c->min_io_size == 0);
        ubifs_assert(mutex_is_locked(&wbuf->io_mutex));
        ubifs_assert(!c->ro_media && !c->ro_mount);
+       ubifs_assert(!c->space_fixup);
        if (c->leb_size - wbuf->offs >= c->max_write_size)
                ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size));
 
@@ -759,6 +760,7 @@ int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum,
        ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
        ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size);
        ubifs_assert(!c->ro_media && !c->ro_mount);
+       ubifs_assert(!c->space_fixup);
 
        if (c->ro_error)
                return -EROFS;
index 34b1679e6e3a671684a1f08be42e155ce4a48ded..cef0460f4c54651bbe5c432d8fc4e3307ee45d07 100644 (file)
@@ -669,6 +669,7 @@ out_free:
 
 out_release:
        release_head(c, BASEHD);
+       kfree(dent);
 out_ro:
        ubifs_ro_mode(c, err);
        if (last_reference)
index bd644bf587a8a7afbef10b83fdef31f4cb71fdf3..a5422fffbd69e10a11b27143feab3cc6a7d7f3f5 100644 (file)
@@ -674,7 +674,7 @@ static int kill_orphans(struct ubifs_info *c)
                if (IS_ERR(sleb)) {
                        if (PTR_ERR(sleb) == -EUCLEAN)
                                sleb = ubifs_recover_leb(c, lnum, 0,
-                                                        c->sbuf, 0);
+                                                        c->sbuf, -1);
                        if (IS_ERR(sleb)) {
                                err = PTR_ERR(sleb);
                                break;
index 731d9e2e7b50c848bf6088237922c43e0f00ef88..783d8e0beb76b59da67c2509d0371b194f994045 100644 (file)
@@ -564,19 +564,15 @@ static int fix_unclean_leb(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
 }
 
 /**
- * drop_last_node - drop the last node or group of nodes.
+ * drop_last_group - drop the last group of nodes.
  * @sleb: scanned LEB information
  * @offs: offset of dropped nodes is returned here
- * @grouped: non-zero if whole group of nodes have to be dropped
  *
  * This is a helper function for 'ubifs_recover_leb()' which drops the last
- * node of the scanned LEB or the last group of nodes if @grouped is not zero.
- * This function returns %1 if a node was dropped and %0 otherwise.
+ * group of nodes of the scanned LEB.
  */
-static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped)
+static void drop_last_group(struct ubifs_scan_leb *sleb, int *offs)
 {
-       int dropped = 0;
-
        while (!list_empty(&sleb->nodes)) {
                struct ubifs_scan_node *snod;
                struct ubifs_ch *ch;
@@ -585,17 +581,40 @@ static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped)
                                  list);
                ch = snod->node;
                if (ch->group_type != UBIFS_IN_NODE_GROUP)
-                       return dropped;
-               dbg_rcvry("dropping node at %d:%d", sleb->lnum, snod->offs);
+                       break;
+
+               dbg_rcvry("dropping grouped node at %d:%d",
+                         sleb->lnum, snod->offs);
+               *offs = snod->offs;
+               list_del(&snod->list);
+               kfree(snod);
+               sleb->nodes_cnt -= 1;
+       }
+}
+
+/**
+ * drop_last_node - drop the last node.
+ * @sleb: scanned LEB information
+ * @offs: offset of dropped nodes is returned here
+ * @grouped: non-zero if whole group of nodes have to be dropped
+ *
+ * This is a helper function for 'ubifs_recover_leb()' which drops the last
+ * node of the scanned LEB.
+ */
+static void drop_last_node(struct ubifs_scan_leb *sleb, int *offs)
+{
+       struct ubifs_scan_node *snod;
+
+       if (!list_empty(&sleb->nodes)) {
+               snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node,
+                                 list);
+
+               dbg_rcvry("dropping last node at %d:%d", sleb->lnum, snod->offs);
                *offs = snod->offs;
                list_del(&snod->list);
                kfree(snod);
                sleb->nodes_cnt -= 1;
-               dropped = 1;
-               if (!grouped)
-                       break;
        }
-       return dropped;
 }
 
 /**
@@ -604,7 +623,8 @@ static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped)
  * @lnum: LEB number
  * @offs: offset
  * @sbuf: LEB-sized buffer to use
- * @grouped: nodes may be grouped for recovery
+ * @jhead: journal head number this LEB belongs to (%-1 if the LEB does not
+ *         belong to any journal head)
  *
  * This function does a scan of a LEB, but caters for errors that might have
  * been caused by the unclean unmount from which we are attempting to recover.
@@ -612,13 +632,14 @@ static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped)
  * found, and a negative error code in case of failure.
  */
 struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
-                                        int offs, void *sbuf, int grouped)
+                                        int offs, void *sbuf, int jhead)
 {
        int ret = 0, err, len = c->leb_size - offs, start = offs, min_io_unit;
+       int grouped = jhead == -1 ? 0 : c->jheads[jhead].grouped;
        struct ubifs_scan_leb *sleb;
        void *buf = sbuf + offs;
 
-       dbg_rcvry("%d:%d", lnum, offs);
+       dbg_rcvry("%d:%d, jhead %d, grouped %d", lnum, offs, jhead, grouped);
 
        sleb = ubifs_start_scan(c, lnum, offs, sbuf);
        if (IS_ERR(sleb))
@@ -635,7 +656,7 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
                 * Scan quietly until there is an error from which we cannot
                 * recover
                 */
-               ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 0);
+               ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
                if (ret == SCANNED_A_NODE) {
                        /* A valid node, and not a padding node */
                        struct ubifs_ch *ch = buf;
@@ -695,59 +716,62 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
                 * If nodes are grouped, always drop the incomplete group at
                 * the end.
                 */
-               drop_last_node(sleb, &offs, 1);
+               drop_last_group(sleb, &offs);
 
-       /*
-        * While we are in the middle of the same min. I/O unit keep dropping
-        * nodes. So basically, what we want is to make sure that the last min.
-        * I/O unit where we saw the corruption is dropped completely with all
-        * the uncorrupted node which may possibly sit there.
-        *
-        * In other words, let's name the min. I/O unit where the corruption
-        * starts B, and the previous min. I/O unit A. The below code tries to
-        * deal with a situation when half of B contains valid nodes or the end
-        * of a valid node, and the second half of B contains corrupted data or
-        * garbage. This means that UBIFS had been writing to B just before the
-        * power cut happened. I do not know how realistic is this scenario
-        * that half of the min. I/O unit had been written successfully and the
-        * other half not, but this is possible in our 'failure mode emulation'
-        * infrastructure at least.
-        *
-        * So what is the problem, why we need to drop those nodes? Whey can't
-        * we just clean-up the second half of B by putting a padding node
-        * there? We can, and this works fine with one exception which was
-        * reproduced with power cut emulation testing and happens extremely
-        * rarely. The description follows, but it is worth noting that that is
-        * only about the GC head, so we could do this trick only if the bud
-        * belongs to the GC head, but it does not seem to be worth an
-        * additional "if" statement.
-        *
-        * So, imagine the file-system is full, we run GC which is moving valid
-        * nodes from LEB X to LEB Y (obviously, LEB Y is the current GC head
-        * LEB). The @c->gc_lnum is -1, which means that GC will retain LEB X
-        * and will try to continue. Imagine that LEB X is currently the
-        * dirtiest LEB, and the amount of used space in LEB Y is exactly the
-        * same as amount of free space in LEB X.
-        *
-        * And a power cut happens when nodes are moved from LEB X to LEB Y. We
-        * are here trying to recover LEB Y which is the GC head LEB. We find
-        * the min. I/O unit B as described above. Then we clean-up LEB Y by
-        * padding min. I/O unit. And later 'ubifs_rcvry_gc_commit()' function
-        * fails, because it cannot find a dirty LEB which could be GC'd into
-        * LEB Y! Even LEB X does not match because the amount of valid nodes
-        * there does not fit the free space in LEB Y any more! And this is
-        * because of the padding node which we added to LEB Y. The
-        * user-visible effect of this which I once observed and analysed is
-        * that we cannot mount the file-system with -ENOSPC error.
-        *
-        * So obviously, to make sure that situation does not happen we should
-        * free min. I/O unit B in LEB Y completely and the last used min. I/O
-        * unit in LEB Y should be A. This is basically what the below code
-        * tries to do.
-        */
-       while (min_io_unit == round_down(offs, c->min_io_size) &&
-              min_io_unit != offs &&
-              drop_last_node(sleb, &offs, grouped));
+       if (jhead == GCHD) {
+               /*
+                * If this LEB belongs to the GC head then while we are in the
+                * middle of the same min. I/O unit keep dropping nodes. So
+                * basically, what we want is to make sure that the last min.
+                * I/O unit where we saw the corruption is dropped completely
+                * with all the uncorrupted nodes which may possibly sit there.
+                *
+                * In other words, let's name the min. I/O unit where the
+                * corruption starts B, and the previous min. I/O unit A. The
+                * below code tries to deal with a situation when half of B
+                * contains valid nodes or the end of a valid node, and the
+                * second half of B contains corrupted data or garbage. This
+                * means that UBIFS had been writing to B just before the power
+                * cut happened. I do not know how realistic is this scenario
+                * that half of the min. I/O unit had been written successfully
+                * and the other half not, but this is possible in our 'failure
+                * mode emulation' infrastructure at least.
+                *
+                * So what is the problem, why we need to drop those nodes? Why
+                * can't we just clean-up the second half of B by putting a
+                * padding node there? We can, and this works fine with one
+                * exception which was reproduced with power cut emulation
+                * testing and happens extremely rarely.
+                *
+                * Imagine the file-system is full, we run GC which starts
+                * moving valid nodes from LEB X to LEB Y (obviously, LEB Y is
+                * the current GC head LEB). The @c->gc_lnum is -1, which means
+                * that GC will retain LEB X and will try to continue. Imagine
+                * that LEB X is currently the dirtiest LEB, and the amount of
+                * used space in LEB Y is exactly the same as amount of free
+                * space in LEB X.
+                *
+                * And a power cut happens when nodes are moved from LEB X to
+                * LEB Y. We are here trying to recover LEB Y which is the GC
+                * head LEB. We find the min. I/O unit B as described above.
+                * Then we clean-up LEB Y by padding min. I/O unit. And later
+                * 'ubifs_rcvry_gc_commit()' function fails, because it cannot
+                * find a dirty LEB which could be GC'd into LEB Y! Even LEB X
+                * does not match because the amount of valid nodes there does
+                * not fit the free space in LEB Y any more! And this is
+                * because of the padding node which we added to LEB Y. The
+                * user-visible effect of this which I once observed and
+                * analysed is that we cannot mount the file-system with
+                * -ENOSPC error.
+                *
+                * So obviously, to make sure that situation does not happen we
+                * should free min. I/O unit B in LEB Y completely and the last
+                * used min. I/O unit in LEB Y should be A. This is basically
+                * what the below code tries to do.
+                */
+               while (offs > min_io_unit)
+                       drop_last_node(sleb, &offs);
+       }
 
        buf = sbuf + offs;
        len = c->leb_size - offs;
@@ -881,7 +905,7 @@ struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum,
                }
                ubifs_scan_destroy(sleb);
        }
-       return ubifs_recover_leb(c, lnum, offs, sbuf, 0);
+       return ubifs_recover_leb(c, lnum, offs, sbuf, -1);
 }
 
 /**
index 6617280d167938a7bbf8f925c1af0fe0986c8496..5e97161ce4d35f929a953184098057f37454dde2 100644 (file)
@@ -557,8 +557,7 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b)
                 * these LEBs could possibly be written to at the power cut
                 * time.
                 */
-               sleb = ubifs_recover_leb(c, lnum, offs, c->sbuf,
-                                        b->bud->jhead != GCHD);
+               sleb = ubifs_recover_leb(c, lnum, offs, c->sbuf, b->bud->jhead);
        else
                sleb = ubifs_scan(c, lnum, offs, c->sbuf, 0);
        if (IS_ERR(sleb))
index ca953a945029ad38ab770845e80b165f8d09a8e2..9e1d05666fed5d1ad03589995f8ecb3f78ec7b82 100644 (file)
@@ -284,7 +284,11 @@ int ubifs_shrinker(struct shrinker *shrink, struct shrink_control *sc)
        long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt);
 
        if (nr == 0)
-               return clean_zn_cnt;
+               /*
+                * Due to the way UBIFS updates the clean znode counter it may
+                * temporarily be negative.
+                */
+               return clean_zn_cnt >= 0 ? clean_zn_cnt : 1;
 
        if (!clean_zn_cnt) {
                /*
index 1ab0d22e4c941cd74db7c5486ea05e94b8b7a427..8c892c2d5300f2d6a44e8faf481b14ecb904d9fe 100644 (file)
@@ -811,15 +811,18 @@ static int alloc_wbufs(struct ubifs_info *c)
 
                c->jheads[i].wbuf.sync_callback = &bud_wbuf_callback;
                c->jheads[i].wbuf.jhead = i;
+               c->jheads[i].grouped = 1;
        }
 
        c->jheads[BASEHD].wbuf.dtype = UBI_SHORTTERM;
        /*
         * Garbage Collector head likely contains long-term data and
-        * does not need to be synchronized by timer.
+        * does not need to be synchronized by timer. Also GC head nodes are
+        * not grouped.
         */
        c->jheads[GCHD].wbuf.dtype = UBI_LONGTERM;
        c->jheads[GCHD].wbuf.no_timer = 1;
+       c->jheads[GCHD].grouped = 0;
 
        return 0;
 }
@@ -1284,12 +1287,25 @@ static int mount_ubifs(struct ubifs_info *c)
        if ((c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY)) != 0) {
                ubifs_msg("recovery needed");
                c->need_recovery = 1;
-               if (!c->ro_mount) {
-                       err = ubifs_recover_inl_heads(c, c->sbuf);
-                       if (err)
-                               goto out_master;
-               }
-       } else if (!c->ro_mount) {
+       }
+
+       if (c->need_recovery && !c->ro_mount) {
+               err = ubifs_recover_inl_heads(c, c->sbuf);
+               if (err)
+                       goto out_master;
+       }
+
+       err = ubifs_lpt_init(c, 1, !c->ro_mount);
+       if (err)
+               goto out_master;
+
+       if (!c->ro_mount && c->space_fixup) {
+               err = ubifs_fixup_free_space(c);
+               if (err)
+                       goto out_master;
+       }
+
+       if (!c->ro_mount) {
                /*
                 * Set the "dirty" flag so that if we reboot uncleanly we
                 * will notice this immediately on the next mount.
@@ -1297,13 +1313,9 @@ static int mount_ubifs(struct ubifs_info *c)
                c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY);
                err = ubifs_write_master(c);
                if (err)
-                       goto out_master;
+                       goto out_lpt;
        }
 
-       err = ubifs_lpt_init(c, 1, !c->ro_mount);
-       if (err)
-               goto out_lpt;
-
        err = dbg_check_idx_size(c, c->bi.old_idx_sz);
        if (err)
                goto out_lpt;
@@ -1396,12 +1408,6 @@ static int mount_ubifs(struct ubifs_info *c)
        } else
                ubifs_assert(c->lst.taken_empty_lebs > 0);
 
-       if (!c->ro_mount && c->space_fixup) {
-               err = ubifs_fixup_free_space(c);
-               if (err)
-                       goto out_infos;
-       }
-
        err = dbg_check_filesystem(c);
        if (err)
                goto out_infos;
@@ -1842,7 +1848,6 @@ static void ubifs_put_super(struct super_block *sb)
        bdi_destroy(&c->bdi);
        ubi_close_volume(c->ubi);
        mutex_unlock(&c->umount_mutex);
-       kfree(c);
 }
 
 static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
@@ -1965,61 +1970,65 @@ static struct ubi_volume_desc *open_ubi(const char *name, int mode)
        return ERR_PTR(-EINVAL);
 }
 
-static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
+static struct ubifs_info *alloc_ubifs_info(struct ubi_volume_desc *ubi)
 {
-       struct ubi_volume_desc *ubi = sb->s_fs_info;
        struct ubifs_info *c;
-       struct inode *root;
-       int err;
 
        c = kzalloc(sizeof(struct ubifs_info), GFP_KERNEL);
-       if (!c)
-               return -ENOMEM;
+       if (c) {
+               spin_lock_init(&c->cnt_lock);
+               spin_lock_init(&c->cs_lock);
+               spin_lock_init(&c->buds_lock);
+               spin_lock_init(&c->space_lock);
+               spin_lock_init(&c->orphan_lock);
+               init_rwsem(&c->commit_sem);
+               mutex_init(&c->lp_mutex);
+               mutex_init(&c->tnc_mutex);
+               mutex_init(&c->log_mutex);
+               mutex_init(&c->mst_mutex);
+               mutex_init(&c->umount_mutex);
+               mutex_init(&c->bu_mutex);
+               mutex_init(&c->write_reserve_mutex);
+               init_waitqueue_head(&c->cmt_wq);
+               c->buds = RB_ROOT;
+               c->old_idx = RB_ROOT;
+               c->size_tree = RB_ROOT;
+               c->orph_tree = RB_ROOT;
+               INIT_LIST_HEAD(&c->infos_list);
+               INIT_LIST_HEAD(&c->idx_gc);
+               INIT_LIST_HEAD(&c->replay_list);
+               INIT_LIST_HEAD(&c->replay_buds);
+               INIT_LIST_HEAD(&c->uncat_list);
+               INIT_LIST_HEAD(&c->empty_list);
+               INIT_LIST_HEAD(&c->freeable_list);
+               INIT_LIST_HEAD(&c->frdi_idx_list);
+               INIT_LIST_HEAD(&c->unclean_leb_list);
+               INIT_LIST_HEAD(&c->old_buds);
+               INIT_LIST_HEAD(&c->orph_list);
+               INIT_LIST_HEAD(&c->orph_new);
+               c->no_chk_data_crc = 1;
+
+               c->highest_inum = UBIFS_FIRST_INO;
+               c->lhead_lnum = c->ltail_lnum = UBIFS_LOG_LNUM;
+
+               ubi_get_volume_info(ubi, &c->vi);
+               ubi_get_device_info(c->vi.ubi_num, &c->di);
+       }
+       return c;
+}
 
-       spin_lock_init(&c->cnt_lock);
-       spin_lock_init(&c->cs_lock);
-       spin_lock_init(&c->buds_lock);
-       spin_lock_init(&c->space_lock);
-       spin_lock_init(&c->orphan_lock);
-       init_rwsem(&c->commit_sem);
-       mutex_init(&c->lp_mutex);
-       mutex_init(&c->tnc_mutex);
-       mutex_init(&c->log_mutex);
-       mutex_init(&c->mst_mutex);
-       mutex_init(&c->umount_mutex);
-       mutex_init(&c->bu_mutex);
-       mutex_init(&c->write_reserve_mutex);
-       init_waitqueue_head(&c->cmt_wq);
-       c->buds = RB_ROOT;
-       c->old_idx = RB_ROOT;
-       c->size_tree = RB_ROOT;
-       c->orph_tree = RB_ROOT;
-       INIT_LIST_HEAD(&c->infos_list);
-       INIT_LIST_HEAD(&c->idx_gc);
-       INIT_LIST_HEAD(&c->replay_list);
-       INIT_LIST_HEAD(&c->replay_buds);
-       INIT_LIST_HEAD(&c->uncat_list);
-       INIT_LIST_HEAD(&c->empty_list);
-       INIT_LIST_HEAD(&c->freeable_list);
-       INIT_LIST_HEAD(&c->frdi_idx_list);
-       INIT_LIST_HEAD(&c->unclean_leb_list);
-       INIT_LIST_HEAD(&c->old_buds);
-       INIT_LIST_HEAD(&c->orph_list);
-       INIT_LIST_HEAD(&c->orph_new);
-       c->no_chk_data_crc = 1;
+static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
+{
+       struct ubifs_info *c = sb->s_fs_info;
+       struct inode *root;
+       int err;
 
        c->vfs_sb = sb;
-       c->highest_inum = UBIFS_FIRST_INO;
-       c->lhead_lnum = c->ltail_lnum = UBIFS_LOG_LNUM;
-
-       ubi_get_volume_info(ubi, &c->vi);
-       ubi_get_device_info(c->vi.ubi_num, &c->di);
-
        /* Re-open the UBI device in read-write mode */
        c->ubi = ubi_open_volume(c->vi.ubi_num, c->vi.vol_id, UBI_READWRITE);
        if (IS_ERR(c->ubi)) {
                err = PTR_ERR(c->ubi);
-               goto out_free;
+               goto out;
        }
 
        /*
@@ -2085,24 +2094,29 @@ out_bdi:
        bdi_destroy(&c->bdi);
 out_close:
        ubi_close_volume(c->ubi);
-out_free:
-       kfree(c);
+out:
        return err;
 }
 
 static int sb_test(struct super_block *sb, void *data)
 {
-       dev_t *dev = data;
+       struct ubifs_info *c1 = data;
        struct ubifs_info *c = sb->s_fs_info;
 
-       return c->vi.cdev == *dev;
+       return c->vi.cdev == c1->vi.cdev;
+}
+
+static int sb_set(struct super_block *sb, void *data)
+{
+       sb->s_fs_info = data;
+       return set_anon_super(sb, NULL);
 }
 
 static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
                        const char *name, void *data)
 {
        struct ubi_volume_desc *ubi;
-       struct ubi_volume_info vi;
+       struct ubifs_info *c;
        struct super_block *sb;
        int err;
 
@@ -2119,19 +2133,24 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
                        name, (int)PTR_ERR(ubi));
                return ERR_CAST(ubi);
        }
-       ubi_get_volume_info(ubi, &vi);
 
-       dbg_gen("opened ubi%d_%d", vi.ubi_num, vi.vol_id);
+       c = alloc_ubifs_info(ubi);
+       if (!c) {
+               err = -ENOMEM;
+               goto out_close;
+       }
+
+       dbg_gen("opened ubi%d_%d", c->vi.ubi_num, c->vi.vol_id);
 
-       sb = sget(fs_type, &sb_test, &set_anon_super, &vi.cdev);
+       sb = sget(fs_type, sb_test, sb_set, c);
        if (IS_ERR(sb)) {
                err = PTR_ERR(sb);
-               goto out_close;
+               kfree(c);
        }
 
        if (sb->s_root) {
                struct ubifs_info *c1 = sb->s_fs_info;
-
+               kfree(c);
                /* A new mount point for already mounted UBIFS */
                dbg_gen("this ubi volume is already mounted");
                if (!!(flags & MS_RDONLY) != c1->ro_mount) {
@@ -2140,11 +2159,6 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
                }
        } else {
                sb->s_flags = flags;
-               /*
-                * Pass 'ubi' to 'fill_super()' in sb->s_fs_info where it is
-                * replaced by 'c'.
-                */
-               sb->s_fs_info = ubi;
                err = ubifs_fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
                if (err)
                        goto out_deact;
@@ -2164,11 +2178,18 @@ out_close:
        return ERR_PTR(err);
 }
 
+static void kill_ubifs_super(struct super_block *s)
+{
+       struct ubifs_info *c = s->s_fs_info;
+       kill_anon_super(s);
+       kfree(c);
+}
+
 static struct file_system_type ubifs_fs_type = {
        .name    = "ubifs",
        .owner   = THIS_MODULE,
        .mount   = ubifs_mount,
-       .kill_sb = kill_anon_super,
+       .kill_sb = kill_ubifs_super,
 };
 
 /*
index 8119b1fd8d94b9be59320e1c31e1d5b41d050dc1..91b4213dde84d11934fc32741b2f902d76a1a84e 100644 (file)
@@ -2876,12 +2876,13 @@ static void tnc_destroy_cnext(struct ubifs_info *c)
  */
 void ubifs_tnc_close(struct ubifs_info *c)
 {
-       long clean_freed;
-
        tnc_destroy_cnext(c);
        if (c->zroot.znode) {
-               clean_freed = ubifs_destroy_tnc_subtree(c->zroot.znode);
-               atomic_long_sub(clean_freed, &ubifs_clean_zn_cnt);
+               long n;
+
+               ubifs_destroy_tnc_subtree(c->zroot.znode);
+               n = atomic_long_read(&c->clean_zn_cnt);
+               atomic_long_sub(n, &ubifs_clean_zn_cnt);
        }
        kfree(c->gap_lebs);
        kfree(c->ilebs);
index a70d7b4ffb25e0c37af1cec0e22793b64d33a305..f79983d6f860eb43ec766dab9b6b044f1e23dd7b 100644 (file)
@@ -722,12 +722,14 @@ struct ubifs_bud {
  * struct ubifs_jhead - journal head.
  * @wbuf: head's write-buffer
  * @buds_list: list of bud LEBs belonging to this journal head
+ * @grouped: non-zero if UBIFS groups nodes when writing to this journal head
  *
  * Note, the @buds list is protected by the @c->buds_lock.
  */
 struct ubifs_jhead {
        struct ubifs_wbuf wbuf;
        struct list_head buds_list;
+       unsigned int grouped:1;
 };
 
 /**
@@ -1742,7 +1744,7 @@ struct inode *ubifs_iget(struct super_block *sb, unsigned long inum);
 int ubifs_recover_master_node(struct ubifs_info *c);
 int ubifs_write_rcvrd_mst_node(struct ubifs_info *c);
 struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
-                                        int offs, void *sbuf, int grouped);
+                                        int offs, void *sbuf, int jhead);
 struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum,
                                             int offs, void *sbuf);
 int ubifs_recover_inl_heads(const struct ubifs_info *c, void *sbuf);
index f4213ba1ff853dad53d16d27b6cd713f01784ea7..7f782af286bfa0edd73a125cdcb892339025913a 100644 (file)
@@ -131,19 +131,34 @@ xfs_file_fsync(
 {
        struct inode            *inode = file->f_mapping->host;
        struct xfs_inode        *ip = XFS_I(inode);
+       struct xfs_mount        *mp = ip->i_mount;
        struct xfs_trans        *tp;
        int                     error = 0;
        int                     log_flushed = 0;
 
        trace_xfs_file_fsync(ip);
 
-       if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+       if (XFS_FORCED_SHUTDOWN(mp))
                return -XFS_ERROR(EIO);
 
        xfs_iflags_clear(ip, XFS_ITRUNCATED);
 
        xfs_ioend_wait(ip);
 
+       if (mp->m_flags & XFS_MOUNT_BARRIER) {
+               /*
+                * If we have an RT and/or log subvolume we need to make sure
+                * to flush the write cache the device used for file data
+                * first.  This is to ensure newly written file data make
+                * it to disk before logging the new inode size in case of
+                * an extending write.
+                */
+               if (XFS_IS_REALTIME_INODE(ip))
+                       xfs_blkdev_issue_flush(mp->m_rtdev_targp);
+               else if (mp->m_logdev_targp != mp->m_ddev_targp)
+                       xfs_blkdev_issue_flush(mp->m_ddev_targp);
+       }
+
        /*
         * We always need to make sure that the required inode state is safe on
         * disk.  The inode might be clean but we still might need to force the
@@ -175,9 +190,9 @@ xfs_file_fsync(
                 * updates.  The sync transaction will also force the log.
                 */
                xfs_iunlock(ip, XFS_ILOCK_SHARED);
-               tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_FSYNC_TS);
+               tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
                error = xfs_trans_reserve(tp, 0,
-                               XFS_FSYNC_TS_LOG_RES(ip->i_mount), 0, 0, 0);
+                               XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
                if (error) {
                        xfs_trans_cancel(tp, 0);
                        return -error;
@@ -209,28 +224,25 @@ xfs_file_fsync(
                 * force the log.
                 */
                if (xfs_ipincount(ip)) {
-                       error = _xfs_log_force_lsn(ip->i_mount,
+                       error = _xfs_log_force_lsn(mp,
                                        ip->i_itemp->ili_last_lsn,
                                        XFS_LOG_SYNC, &log_flushed);
                }
                xfs_iunlock(ip, XFS_ILOCK_SHARED);
        }
 
-       if (ip->i_mount->m_flags & XFS_MOUNT_BARRIER) {
-               /*
-                * If the log write didn't issue an ordered tag we need
-                * to flush the disk cache for the data device now.
-                */
-               if (!log_flushed)
-                       xfs_blkdev_issue_flush(ip->i_mount->m_ddev_targp);
-
-               /*
-                * If this inode is on the RT dev we need to flush that
-                * cache as well.
-                */
-               if (XFS_IS_REALTIME_INODE(ip))
-                       xfs_blkdev_issue_flush(ip->i_mount->m_rtdev_targp);
-       }
+       /*
+        * If we only have a single device, and the log force about was
+        * a no-op we might have to flush the data device cache here.
+        * This can only happen for fdatasync/O_DSYNC if we were overwriting
+        * an already allocated file and thus do not have any metadata to
+        * commit.
+        */
+       if ((mp->m_flags & XFS_MOUNT_BARRIER) &&
+           mp->m_logdev_targp == mp->m_ddev_targp &&
+           !XFS_IS_REALTIME_INODE(ip) &&
+           !log_flushed)
+               xfs_blkdev_issue_flush(mp->m_ddev_targp);
 
        return -error;
 }
index dd21784525a8096ef76f6d6cd14c80a319918839..d44d92cd12b17c7645156b4754c39ea29b5b10e5 100644 (file)
@@ -182,7 +182,7 @@ xfs_vn_mknod(
        if (IS_POSIXACL(dir)) {
                default_acl = xfs_get_acl(dir, ACL_TYPE_DEFAULT);
                if (IS_ERR(default_acl))
-                       return -PTR_ERR(default_acl);
+                       return PTR_ERR(default_acl);
 
                if (!default_acl)
                        mode &= ~current_umask();
index 1e3a7ce804dce2feb1956036eec54ac7ed27da36..a1a881e68a9aa86a1aa76c27931e02202a57a08d 100644 (file)
@@ -627,68 +627,6 @@ xfs_blkdev_put(
                blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 }
 
-/*
- * Try to write out the superblock using barriers.
- */
-STATIC int
-xfs_barrier_test(
-       xfs_mount_t     *mp)
-{
-       xfs_buf_t       *sbp = xfs_getsb(mp, 0);
-       int             error;
-
-       XFS_BUF_UNDONE(sbp);
-       XFS_BUF_UNREAD(sbp);
-       XFS_BUF_UNDELAYWRITE(sbp);
-       XFS_BUF_WRITE(sbp);
-       XFS_BUF_UNASYNC(sbp);
-       XFS_BUF_ORDERED(sbp);
-
-       xfsbdstrat(mp, sbp);
-       error = xfs_buf_iowait(sbp);
-
-       /*
-        * Clear all the flags we set and possible error state in the
-        * buffer.  We only did the write to try out whether barriers
-        * worked and shouldn't leave any traces in the superblock
-        * buffer.
-        */
-       XFS_BUF_DONE(sbp);
-       XFS_BUF_ERROR(sbp, 0);
-       XFS_BUF_UNORDERED(sbp);
-
-       xfs_buf_relse(sbp);
-       return error;
-}
-
-STATIC void
-xfs_mountfs_check_barriers(xfs_mount_t *mp)
-{
-       int error;
-
-       if (mp->m_logdev_targp != mp->m_ddev_targp) {
-               xfs_notice(mp,
-                 "Disabling barriers, not supported with external log device");
-               mp->m_flags &= ~XFS_MOUNT_BARRIER;
-               return;
-       }
-
-       if (xfs_readonly_buftarg(mp->m_ddev_targp)) {
-               xfs_notice(mp,
-                       "Disabling barriers, underlying device is readonly");
-               mp->m_flags &= ~XFS_MOUNT_BARRIER;
-               return;
-       }
-
-       error = xfs_barrier_test(mp);
-       if (error) {
-               xfs_notice(mp,
-                       "Disabling barriers, trial barrier write failed");
-               mp->m_flags &= ~XFS_MOUNT_BARRIER;
-               return;
-       }
-}
-
 void
 xfs_blkdev_issue_flush(
        xfs_buftarg_t           *buftarg)
@@ -1240,14 +1178,6 @@ xfs_fs_remount(
                switch (token) {
                case Opt_barrier:
                        mp->m_flags |= XFS_MOUNT_BARRIER;
-
-                       /*
-                        * Test if barriers are actually working if we can,
-                        * else delay this check until the filesystem is
-                        * marked writeable.
-                        */
-                       if (!(mp->m_flags & XFS_MOUNT_RDONLY))
-                               xfs_mountfs_check_barriers(mp);
                        break;
                case Opt_nobarrier:
                        mp->m_flags &= ~XFS_MOUNT_BARRIER;
@@ -1282,8 +1212,6 @@ xfs_fs_remount(
        /* ro -> rw */
        if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) {
                mp->m_flags &= ~XFS_MOUNT_RDONLY;
-               if (mp->m_flags & XFS_MOUNT_BARRIER)
-                       xfs_mountfs_check_barriers(mp);
 
                /*
                 * If this is the first remount to writeable state we
@@ -1465,9 +1393,6 @@ xfs_fs_fill_super(
        if (error)
                goto out_free_sb;
 
-       if (mp->m_flags & XFS_MOUNT_BARRIER)
-               xfs_mountfs_check_barriers(mp);
-
        error = xfs_filestream_mount(mp);
        if (error)
                goto out_free_sb;
index 211930246f2073f4759a569936b79ab387003e21..41d5b8f2bf92d3fd3fae9773f667a0bf1cb42381 100644 (file)
@@ -1372,8 +1372,17 @@ xlog_sync(xlog_t         *log,
        XFS_BUF_ASYNC(bp);
        bp->b_flags |= XBF_LOG_BUFFER;
 
-       if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
+       if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) {
+               /*
+                * If we have an external log device, flush the data device
+                * before flushing the log to make sure all meta data
+                * written back from the AIL actually made it to disk
+                * before writing out the new log tail LSN in the log buffer.
+                */
+               if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp)
+                       xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp);
                XFS_BUF_ORDERED(bp);
+       }
 
        ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
        ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
index fcdcb5d5c99539871c448411dbdb0fddd2be56b8..d494001b12260a2f56b9337e01226f17dcee3634 100644 (file)
@@ -170,16 +170,6 @@ extern int __gpio_cansleep(unsigned gpio);
 
 extern int __gpio_to_irq(unsigned gpio);
 
-#define GPIOF_DIR_OUT  (0 << 0)
-#define GPIOF_DIR_IN   (1 << 0)
-
-#define GPIOF_INIT_LOW (0 << 1)
-#define GPIOF_INIT_HIGH        (1 << 1)
-
-#define GPIOF_IN               (GPIOF_DIR_IN)
-#define GPIOF_OUT_INIT_LOW     (GPIOF_DIR_OUT | GPIOF_INIT_LOW)
-#define GPIOF_OUT_INIT_HIGH    (GPIOF_DIR_OUT | GPIOF_INIT_HIGH)
-
 /**
  * struct gpio - a structure describing a GPIO with configuration
  * @gpio:      the GPIO number
index e9b8e5926befb7b38b03218ffd0f9e6d2d75d454..76bff2bff15e346532be60dc1ce1a13aff070471 100644 (file)
@@ -88,7 +88,7 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
        pmd_t pmd = *pmdp;
        pmd_clear(mm, address, pmdp);
        return pmd;
-})
+}
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif
 
index ae90e0f63995d39f0bf90e3dd86d222042e3f413..4f76959397fa88a869dff276ba5ab0ccc9494071 100644 (file)
@@ -683,9 +683,11 @@ __SC_COMP(__NR_clock_adjtime, sys_clock_adjtime, compat_sys_clock_adjtime)
 __SYSCALL(__NR_syncfs, sys_syncfs)
 #define __NR_setns 268
 __SYSCALL(__NR_setns, sys_setns)
+#define __NR_sendmmsg 269
+__SC_COMP(__NR_sendmmsg, sys_sendmmsg, compat_sys_sendmmsg)
 
 #undef __NR_syscalls
-#define __NR_syscalls 269
+#define __NR_syscalls 270
 
 /*
  * All syscalls below here should go away really,
index 9573e0ce312080caf9bcbcb72cd804e2ed200871..33d12f87f0e01735d41a800a73b7ea5ddc9b31a6 100644 (file)
@@ -520,6 +520,8 @@ struct drm_connector {
        uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
        uint32_t force_encoder_id;
        struct drm_encoder *encoder; /* currently active encoder */
+
+       int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
 };
 
 /**
index f04b2a3b0f49b7117cb74163af0997a7a6fd4742..e08f344c6cffc546660506b37e8659679ac78211 100644 (file)
        {0x1002, 0x9614, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9615, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9616, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9642, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9643, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9644, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
+       {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
+       {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x964e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
+       {0x1002, 0x964f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
        {0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9711, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
index 1ae12710d7328a160bef4b793a912995a07c474e..98999cf107ce01fec1fb5dc2559a73f308e31de2 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/gpio.h>
 #include <linux/types.h>
 #include <linux/compiler.h>
+#include <linux/spinlock_types.h>
 
 struct bgpio_pdata {
        int base;
index ae9091a684807ffcc95fa6c0fb6de3233383d4cf..1a23722e8878619aec4017cbbd31b951439fdb2b 100644 (file)
@@ -1282,8 +1282,8 @@ queue_max_integrity_segments(struct request_queue *q)
 #define blk_get_integrity(a)                   (0)
 #define blk_integrity_compare(a, b)            (0)
 #define blk_integrity_register(a, b)           (0)
-#define blk_integrity_unregister(a)            do { } while (0);
-#define blk_queue_max_integrity_segments(a, b) do { } while (0);
+#define blk_integrity_unregister(a)            do { } while (0)
+#define blk_queue_max_integrity_segments(a, b) do { } while (0)
 #define queue_max_integrity_segments(a)                (0)
 #define blk_integrity_merge_rq(a, b, c)                (0)
 #define blk_integrity_merge_bio(a, b, c)       (0)
index d4646b48dc4a7b0074dc7491e7f66186f398e70e..18a1baf31f2d531493526a48132dc38a5f2f0f03 100644 (file)
@@ -188,6 +188,7 @@ struct clocksource {
 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
        /* Watchdog related data, used by the framework */
        struct list_head wd_list;
+       cycle_t cs_last;
        cycle_t wd_last;
 #endif
 } ____cacheline_aligned;
index c6a850ab2ec5b6f0f8b2fdc75a8dec25510ffc0e..439b173c58822a6438bc73cdd71bf01a3beee64a 100644 (file)
@@ -268,7 +268,7 @@ struct ethtool_pauseparam {
        __u32   cmd;    /* ETHTOOL_{G,S}PAUSEPARAM */
 
        /* If the link is being auto-negotiated (via ethtool_cmd.autoneg
-        * being true) the user may set 'autonet' here non-zero to have the
+        * being true) the user may set 'autoneg' here non-zero to have the
         * pause parameters be auto-negotiated too.  In such a case, the
         * {rx,tx}_pause values below determine what capabilities are
         * advertised.
@@ -811,7 +811,7 @@ bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported);
  * @get_tx_csum: Deprecated as redundant. Report whether transmit checksums
  *     are turned on or off.
  * @set_tx_csum: Deprecated in favour of generic netdev features.  Turn
- *     transmit checksums on or off.  Returns a egative error code or zero.
+ *     transmit checksums on or off.  Returns a negative error code or zero.
  * @get_sg: Deprecated as redundant.  Report whether scatter-gather is
  *     enabled.  
  * @set_sg: Deprecated in favour of generic netdev features.  Turn
@@ -1087,7 +1087,7 @@ struct ethtool_ops {
 /* The following are all involved in forcing a particular link
  * mode for the device for setting things.  When getting the
  * devices settings, these indicate the current mode and whether
- * it was foced up into this mode or autonegotiated.
+ * it was forced up into this mode or autonegotiated.
  */
 
 /* The forced speed, 10Mb, 100Mb, gigabit, 2.5Gb, 10GbE. */
index c55d6b7cd5d686fdf4165c1cf1ccffba67e3b207..1c777878f1ea5d375be6f04f3364ac2fb9d6c8ba 100644 (file)
@@ -208,6 +208,7 @@ struct inodes_stat_t {
 #define MS_KERNMOUNT   (1<<22) /* this is a kern_mount call */
 #define MS_I_VERSION   (1<<23) /* Update inode I_version field */
 #define MS_STRICTATIME (1<<24) /* Always perform atime updates */
+#define MS_NOSEC       (1<<28)
 #define MS_BORN                (1<<29)
 #define MS_ACTIVE      (1<<30)
 #define MS_NOUSER      (1<<31)
@@ -743,9 +744,13 @@ struct inode {
 
        spinlock_t              i_lock; /* i_blocks, i_bytes, maybe i_size */
        unsigned int            i_flags;
+       unsigned int            i_state;
+#ifdef CONFIG_SECURITY
+       void                    *i_security;
+#endif
        struct mutex            i_mutex;
 
-       unsigned long           i_state;
+
        unsigned long           dirtied_when;   /* jiffies of first dirtying */
 
        struct hlist_node       i_hash;
@@ -797,9 +802,6 @@ struct inode {
        atomic_t                i_readcount; /* struct files open RO */
 #endif
        atomic_t                i_writecount;
-#ifdef CONFIG_SECURITY
-       void                    *i_security;
-#endif
 #ifdef CONFIG_FS_POSIX_ACL
        struct posix_acl        *i_acl;
        struct posix_acl        *i_default_acl;
@@ -2591,7 +2593,7 @@ static inline int is_sxid(mode_t mode)
 
 static inline void inode_has_no_xattr(struct inode *inode)
 {
-       if (!is_sxid(inode->i_mode))
+       if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & MS_NOSEC))
                inode->i_flags |= S_NOSEC;
 }
 
index b78956b3c2e749076078cf74e439144c447c9058..300d7582006e49f6ea08a3df09fc9070748c5256 100644 (file)
@@ -100,6 +100,7 @@ struct hd_struct {
        sector_t start_sect;
        sector_t nr_sects;
        sector_t alignment_offset;
+       unsigned int discard_alignment;
        struct device __dev;
        struct kobject *holder_dir;
        int policy, partno;
index 32d47e710661e55879b5473f9850316948f484fe..17b5a0d80e4239cc10fceb670be29dbf4e49a0f0 100644 (file)
@@ -3,6 +3,17 @@
 
 /* see Documentation/gpio.txt */
 
+/* make these flag values available regardless of GPIO kconfig options */
+#define GPIOF_DIR_OUT  (0 << 0)
+#define GPIOF_DIR_IN   (1 << 0)
+
+#define GPIOF_INIT_LOW (0 << 1)
+#define GPIOF_INIT_HIGH        (1 << 1)
+
+#define GPIOF_IN               (GPIOF_DIR_IN)
+#define GPIOF_OUT_INIT_LOW     (GPIOF_DIR_OUT | GPIOF_INIT_LOW)
+#define GPIOF_OUT_INIT_HIGH    (GPIOF_DIR_OUT | GPIOF_INIT_HIGH)
+
 #ifdef CONFIG_GENERIC_GPIO
 #include <asm/gpio.h>
 
diff --git a/include/linux/i2c/adp8870.h b/include/linux/i2c/adp8870.h
new file mode 100644 (file)
index 0000000..624dcec
--- /dev/null
@@ -0,0 +1,153 @@
+/*
+ * Definitions and platform data for Analog Devices
+ * Backlight drivers ADP8870
+ *
+ * Copyright 2009-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __LINUX_I2C_ADP8870_H
+#define __LINUX_I2C_ADP8870_H
+
+#define ID_ADP8870             8870
+
+#define ADP8870_MAX_BRIGHTNESS 0x7F
+#define FLAG_OFFT_SHIFT 8
+
+/*
+ * LEDs subdevice platform data
+ */
+
+#define ADP8870_LED_DIS_BLINK  (0 << FLAG_OFFT_SHIFT)
+#define ADP8870_LED_OFFT_600ms (1 << FLAG_OFFT_SHIFT)
+#define ADP8870_LED_OFFT_1200ms        (2 << FLAG_OFFT_SHIFT)
+#define ADP8870_LED_OFFT_1800ms        (3 << FLAG_OFFT_SHIFT)
+
+#define ADP8870_LED_ONT_200ms  0
+#define ADP8870_LED_ONT_600ms  1
+#define ADP8870_LED_ONT_800ms  2
+#define ADP8870_LED_ONT_1200ms 3
+
+#define ADP8870_LED_D7         (7)
+#define ADP8870_LED_D6         (6)
+#define ADP8870_LED_D5         (5)
+#define ADP8870_LED_D4         (4)
+#define ADP8870_LED_D3         (3)
+#define ADP8870_LED_D2         (2)
+#define ADP8870_LED_D1         (1)
+
+/*
+ * Backlight subdevice platform data
+ */
+
+#define ADP8870_BL_D7          (1 << 6)
+#define ADP8870_BL_D6          (1 << 5)
+#define ADP8870_BL_D5          (1 << 4)
+#define ADP8870_BL_D4          (1 << 3)
+#define ADP8870_BL_D3          (1 << 2)
+#define ADP8870_BL_D2          (1 << 1)
+#define ADP8870_BL_D1          (1 << 0)
+
+#define ADP8870_FADE_T_DIS     0       /* Fade Timer Disabled */
+#define ADP8870_FADE_T_300ms   1       /* 0.3 Sec */
+#define ADP8870_FADE_T_600ms   2
+#define ADP8870_FADE_T_900ms   3
+#define ADP8870_FADE_T_1200ms  4
+#define ADP8870_FADE_T_1500ms  5
+#define ADP8870_FADE_T_1800ms  6
+#define ADP8870_FADE_T_2100ms  7
+#define ADP8870_FADE_T_2400ms  8
+#define ADP8870_FADE_T_2700ms  9
+#define ADP8870_FADE_T_3000ms  10
+#define ADP8870_FADE_T_3500ms  11
+#define ADP8870_FADE_T_4000ms  12
+#define ADP8870_FADE_T_4500ms  13
+#define ADP8870_FADE_T_5000ms  14
+#define ADP8870_FADE_T_5500ms  15      /* 5.5 Sec */
+
+#define ADP8870_FADE_LAW_LINEAR        0
+#define ADP8870_FADE_LAW_SQUARE        1
+#define ADP8870_FADE_LAW_CUBIC1        2
+#define ADP8870_FADE_LAW_CUBIC2        3
+
+#define ADP8870_BL_AMBL_FILT_80ms      0       /* Light sensor filter time */
+#define ADP8870_BL_AMBL_FILT_160ms     1
+#define ADP8870_BL_AMBL_FILT_320ms     2
+#define ADP8870_BL_AMBL_FILT_640ms     3
+#define ADP8870_BL_AMBL_FILT_1280ms    4
+#define ADP8870_BL_AMBL_FILT_2560ms    5
+#define ADP8870_BL_AMBL_FILT_5120ms    6
+#define ADP8870_BL_AMBL_FILT_10240ms   7       /* 10.24 sec */
+
+/*
+ * Blacklight current 0..30mA
+ */
+#define ADP8870_BL_CUR_mA(I)           ((I * 127) / 30)
+
+/*
+ * L2 comparator current 0..1106uA
+ */
+#define ADP8870_L2_COMP_CURR_uA(I)     ((I * 255) / 1106)
+
+/*
+ * L3 comparator current 0..551uA
+ */
+#define ADP8870_L3_COMP_CURR_uA(I)     ((I * 255) / 551)
+
+/*
+ * L4 comparator current 0..275uA
+ */
+#define ADP8870_L4_COMP_CURR_uA(I)     ((I * 255) / 275)
+
+/*
+ * L5 comparator current 0..138uA
+ */
+#define ADP8870_L5_COMP_CURR_uA(I)     ((I * 255) / 138)
+
+struct adp8870_backlight_platform_data {
+       u8 bl_led_assign;       /* 1 = Backlight 0 = Individual LED */
+       u8 pwm_assign;          /* 1 = Enables PWM mode */
+
+       u8 bl_fade_in;          /* Backlight Fade-In Timer */
+       u8 bl_fade_out;         /* Backlight Fade-Out Timer */
+       u8 bl_fade_law;         /* fade-on/fade-off transfer characteristic */
+
+       u8 en_ambl_sens;        /* 1 = enable ambient light sensor */
+       u8 abml_filt;           /* Light sensor filter time */
+
+       u8 l1_daylight_max;     /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+       u8 l1_daylight_dim;     /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+       u8 l2_bright_max;       /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+       u8 l2_bright_dim;       /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+       u8 l3_office_max;       /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+       u8 l3_office_dim;       /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+       u8 l4_indoor_max;       /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+       u8 l4_indor_dim;        /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+       u8 l5_dark_max;         /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+       u8 l5_dark_dim;         /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+
+       u8 l2_trip;             /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */
+       u8 l2_hyst;             /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */
+       u8 l3_trip;             /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */
+       u8 l3_hyst;             /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */
+       u8 l4_trip;             /* use L4_COMP_CURR_uA(I) 0 <= I <= 275 uA */
+       u8 l4_hyst;             /* use L4_COMP_CURR_uA(I) 0 <= I <= 275 uA */
+       u8 l5_trip;             /* use L5_COMP_CURR_uA(I) 0 <= I <= 138 uA */
+       u8 l5_hyst;             /* use L6_COMP_CURR_uA(I) 0 <= I <= 138 uA */
+
+       /**
+        * Independent Current Sinks / LEDS
+        * Sinks not assigned to the Backlight can be exposed to
+        * user space using the LEDS CLASS interface
+        */
+
+       int num_leds;
+       struct led_info *leds;
+       u8 led_fade_in;         /* LED Fade-In Timer */
+       u8 led_fade_out;        /* LED Fade-Out Timer */
+       u8 led_fade_law;        /* fade-on/fade-off transfer characteristic */
+       u8 led_on_time;
+};
+
+#endif /* __LINUX_I2C_ADP8870_H */
index b2eee5879883b3fbaf4d00ce28330320c03a9213..bf56b6f78270e6a60a63e4870295006a07ca23d8 100644 (file)
@@ -1003,8 +1003,12 @@ struct ieee80211_ht_info {
 #define WLAN_CAPABILITY_ESS            (1<<0)
 #define WLAN_CAPABILITY_IBSS           (1<<1)
 
-/* A mesh STA sets the ESS and IBSS capability bits to zero */
-#define WLAN_CAPABILITY_IS_MBSS(cap)   \
+/*
+ * A mesh STA sets the ESS and IBSS capability bits to zero.
+ * however, this holds true for p2p probe responses (in the p2p_find
+ * phase) as well.
+ */
+#define WLAN_CAPABILITY_IS_STA_BSS(cap)        \
        (!((cap) & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)))
 
 #define WLAN_CAPABILITY_CF_POLLABLE    (1<<2)
index 72bfa5a034dd430a1a79aa2ef1cb24a1b79836df..7b318630139f3e50f55e82eefcbaae09490d6faf 100644 (file)
@@ -62,6 +62,7 @@ struct tpacket_auxdata {
        __u16           tp_mac;
        __u16           tp_net;
        __u16           tp_vlan_tci;
+       __u16           tp_padding;
 };
 
 /* Rx ring - header status */
@@ -70,6 +71,7 @@ struct tpacket_auxdata {
 #define TP_STATUS_COPY         0x2
 #define TP_STATUS_LOSING       0x4
 #define TP_STATUS_CSUMNOTREADY 0x8
+#define TP_STATUS_VLAN_VALID   0x10 /* auxdata has valid tp_vlan_tci */
 
 /* Tx ring - header status */
 #define TP_STATUS_AVAILABLE    0x0
@@ -100,6 +102,7 @@ struct tpacket2_hdr {
        __u32           tp_sec;
        __u32           tp_nsec;
        __u16           tp_vlan_tci;
+       __u16           tp_padding;
 };
 
 #define TPACKET2_HDRLEN                (TPACKET_ALIGN(sizeof(struct tpacket2_hdr)) + sizeof(struct sockaddr_ll))
index dc01681fbb42d5dc689d58c239d6b20a1ae7992f..affa27380b72e6096a060bf59c8f5261dfb03b1c 100644 (file)
@@ -225,7 +225,7 @@ static inline int vlan_hwaccel_receive_skb(struct sk_buff *skb,
 }
 
 /**
- * __vlan_put_tag - regular VLAN tag inserting
+ * vlan_insert_tag - regular VLAN tag inserting
  * @skb: skbuff to tag
  * @vlan_tci: VLAN TCI to insert
  *
@@ -234,8 +234,10 @@ static inline int vlan_hwaccel_receive_skb(struct sk_buff *skb,
  *
  * Following the skb_unshare() example, in case of error, the calling function
  * doesn't have to worry about freeing the original skb.
+ *
+ * Does not change skb->protocol so this function can be used during receive.
  */
-static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
+static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, u16 vlan_tci)
 {
        struct vlan_ethhdr *veth;
 
@@ -255,8 +257,25 @@ static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
        /* now, the TCI */
        veth->h_vlan_TCI = htons(vlan_tci);
 
-       skb->protocol = htons(ETH_P_8021Q);
+       return skb;
+}
 
+/**
+ * __vlan_put_tag - regular VLAN tag inserting
+ * @skb: skbuff to tag
+ * @vlan_tci: VLAN TCI to insert
+ *
+ * Inserts the VLAN tag into @skb as part of the payload
+ * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
+ *
+ * Following the skb_unshare() example, in case of error, the calling function
+ * doesn't have to worry about freeing the original skb.
+ */
+static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
+{
+       skb = vlan_insert_tag(skb, vlan_tci);
+       if (skb)
+               skb->protocol = htons(ETH_P_8021Q);
        return skb;
 }
 
index 6c12989839d9093f970b9f90d8dffe44dec6f7a2..f6efed0039edfdb06cbe1430588e51bf2db07aca 100644 (file)
@@ -414,6 +414,7 @@ enum
        TASKLET_SOFTIRQ,
        SCHED_SOFTIRQ,
        HRTIMER_SOFTIRQ,
+       RCU_SOFTIRQ,    /* Preferable RCU should always be the last softirq */
 
        NR_SOFTIRQS
 };
index 819acaaac3f5877bc4802bbbc928a5c21686fde2..714ba08dc09265922fe8ab81dee1caadc12a9100 100644 (file)
@@ -8,9 +8,9 @@
  * @IRQ_WAKE_THREAD    handler requests to wake the handler thread
  */
 enum irqreturn {
-       IRQ_NONE,
-       IRQ_HANDLED,
-       IRQ_WAKE_THREAD,
+       IRQ_NONE                = (0 << 0),
+       IRQ_HANDLED             = (1 << 0),
+       IRQ_WAKE_THREAD         = (1 << 1),
 };
 
 typedef enum irqreturn irqreturn_t;
index fb0e7329fee1a5ad370b2e5284ceb681670c7395..953352a88336c0385e2e385f93e41c278513be7b 100644 (file)
@@ -671,8 +671,8 @@ struct sysinfo {
 
 #ifdef __CHECKER__
 #define BUILD_BUG_ON_NOT_POWER_OF_2(n)
-#define BUILD_BUG_ON_ZERO(e)
-#define BUILD_BUG_ON_NULL(e)
+#define BUILD_BUG_ON_ZERO(e) (0)
+#define BUILD_BUG_ON_NULL(e) ((void*)0)
 #define BUILD_BUG_ON(condition)
 #else /* __CHECKER__ */
 
index d4a5c84c503d7307a577474bc0eb4d75cb7265f3..0da38cf7db7bddc8841d14620a9e831866afcfd2 100644 (file)
@@ -45,7 +45,7 @@ static inline int request_module_nowait(const char *name, ...) { return -ENOSYS;
 #endif
 
 
-struct key;
+struct cred;
 struct file;
 
 enum umh_wait {
@@ -62,7 +62,7 @@ struct subprocess_info {
        char **envp;
        enum umh_wait wait;
        int retval;
-       int (*init)(struct subprocess_info *info);
+       int (*init)(struct subprocess_info *info, struct cred *new);
        void (*cleanup)(struct subprocess_info *info);
        void *data;
 };
@@ -73,7 +73,7 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
 
 /* Set various pieces of state into the subprocess_info structure */
 void call_usermodehelper_setfns(struct subprocess_info *info,
-                   int (*init)(struct subprocess_info *info),
+                   int (*init)(struct subprocess_info *info, struct cred *new),
                    void (*cleanup)(struct subprocess_info *info),
                    void *data);
 
@@ -87,7 +87,7 @@ void call_usermodehelper_freeinfo(struct subprocess_info *info);
 static inline int
 call_usermodehelper_fns(char *path, char **argv, char **envp,
                        enum umh_wait wait,
-                       int (*init)(struct subprocess_info *info),
+                       int (*init)(struct subprocess_info *info, struct cred *new),
                        void (*cleanup)(struct subprocess_info *), void *data)
 {
        struct subprocess_info *info;
index 2a0d7d651dc34f98daff0d111c5e458339006945..ee0c952188de2c99281fd7567f701893e90a47dd 100644 (file)
@@ -12,6 +12,7 @@
 #ifndef _LINUX_KMSG_DUMP_H
 #define _LINUX_KMSG_DUMP_H
 
+#include <linux/errno.h>
 #include <linux/list.h>
 
 enum kmsg_dump_reason {
index 82cb5bf461fb4ace0c8fa5dc8e68c44a0e5fca19..f66b065a8b5ff1af1efdc698aadc248a79175917 100644 (file)
@@ -32,15 +32,17 @@ enum kobj_ns_type {
 
 /*
  * Callbacks so sysfs can determine namespaces
- *   @current_ns: return calling task's namespace
+ *   @grab_current_ns: return a new reference to calling task's namespace
  *   @netlink_ns: return namespace to which a sock belongs (right?)
  *   @initial_ns: return the initial namespace (i.e. init_net_ns)
+ *   @drop_ns: drops a reference to namespace
  */
 struct kobj_ns_type_operations {
        enum kobj_ns_type type;
-       const void *(*current_ns)(void);
+       void *(*grab_current_ns)(void);
        const void *(*netlink_ns)(struct sock *sk);
        const void *(*initial_ns)(void);
+       void (*drop_ns)(void *);
 };
 
 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
@@ -48,9 +50,9 @@ int kobj_ns_type_registered(enum kobj_ns_type type);
 const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
 const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
 
-const void *kobj_ns_current(enum kobj_ns_type type);
+void *kobj_ns_grab_current(enum kobj_ns_type type);
 const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk);
 const void *kobj_ns_initial(enum kobj_ns_type type);
-void kobj_ns_exit(enum kobj_ns_type type, const void *ns);
+void kobj_ns_drop(enum kobj_ns_type type, void *ns);
 
 #endif /* _LINUX_KOBJECT_NS_H */
index 9724a38ee69d5c9e292aa75c8fcc092486ebdbe7..50940da6adf36d7a544c32448b6873f83cef2224 100644 (file)
@@ -84,6 +84,7 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
 
 extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
 extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
+extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
 
 static inline
 int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
@@ -246,6 +247,11 @@ static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
        return NULL;
 }
 
+static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
+{
+       return NULL;
+}
+
 static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem)
 {
        return 1;
index ca333e79e10ff92e7fbc83a4908d2d54ccf514b0..54b8b4d7b68f1a2a42f68de4128f563f9f86eb4e 100644 (file)
@@ -2555,7 +2555,7 @@ extern void netdev_class_remove_file(struct class_attribute *class_attr);
 
 extern struct kobj_ns_type_operations net_ns_type_operations;
 
-extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len);
+extern const char *netdev_drivername(const struct net_device *dev);
 
 extern void linkwatch_run_queue(void);
 
index 50cdc2559a5aa05a10e4d1115c48da7185a5e7bd..0d3dd66322ecbb24529303f6634f36e5ce6f390d 100644 (file)
@@ -18,6 +18,9 @@ enum ip_conntrack_info {
        /* >= this indicates reply direction */
        IP_CT_IS_REPLY,
 
+       IP_CT_ESTABLISHED_REPLY = IP_CT_ESTABLISHED + IP_CT_IS_REPLY,
+       IP_CT_RELATED_REPLY = IP_CT_RELATED + IP_CT_IS_REPLY,
+       IP_CT_NEW_REPLY = IP_CT_NEW + IP_CT_IS_REPLY,   
        /* Number of distinct IP_CT types (no NEW in reply dirn). */
        IP_CT_NUMBER = IP_CT_IS_REPLY * 2 - 1
 };
index 8b97308e65df3ccf094d79af23266e2da5fc73af..9ca008f0c542935440f3c39c0a5fb070dd6f5e8c 100644 (file)
@@ -259,6 +259,9 @@ extern void __bad_size_call_parameter(void);
  * Special handling for cmpxchg_double.  cmpxchg_double is passed two
  * percpu variables.  The first has to be aligned to a double word
  * boundary and the second has to follow directly thereafter.
+ * We enforce this on all architectures even if they don't support
+ * a double cmpxchg instruction, since it's a cheap requirement, and it
+ * avoids breaking the requirement for architectures with the instruction.
  */
 #define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...)          \
 ({                                                                     \
index 2a8621c4be1e36d6e713f1b30e51cc607f8b4e02..a837b20ba190330c23b59ec5a3fb584fc9f6308f 100644 (file)
@@ -1063,6 +1063,7 @@ struct sched_domain;
  */
 #define WF_SYNC                0x01            /* waker goes to sleep after wakup */
 #define WF_FORK                0x02            /* child wakeup after fork */
+#define WF_MIGRATED    0x04            /* internal use, task got migrated */
 
 #define ENQUEUE_WAKEUP         1
 #define ENQUEUE_HEAD           2
index e9811892844f1cacca117fae323abef084ab615f..c6db9fb33c448f28197ffb6d135689daf58625b6 100644 (file)
@@ -28,6 +28,7 @@
 
 #include <linux/spinlock.h>
 #include <linux/preempt.h>
+#include <asm/processor.h>
 
 typedef struct {
        unsigned sequence;
index e8b78ce144741fda866f79bd32ce3c96ba0c4096..c0a4f3ab0cc047490eb9eaf20d9cf577cc5c2d65 100644 (file)
@@ -1256,6 +1256,11 @@ static inline void skb_reserve(struct sk_buff *skb, int len)
        skb->tail += len;
 }
 
+static inline void skb_reset_mac_len(struct sk_buff *skb)
+{
+       skb->mac_len = skb->network_header - skb->mac_header;
+}
+
 #ifdef NET_SKBUFF_DATA_USES_OFFSET
 static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
 {
index 7ad824d510a2138bca6b7e3d1ec8aae4c0ac014a..8cc38d3bab0c57a79f68fe53e4915f9393335cba 100644 (file)
@@ -85,12 +85,15 @@ int smp_call_function_any(const struct cpumask *mask,
  * Generic and arch helpers
  */
 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS
+void __init call_function_init(void);
 void generic_smp_call_function_single_interrupt(void);
 void generic_smp_call_function_interrupt(void);
 void ipi_call_lock(void);
 void ipi_call_unlock(void);
 void ipi_call_lock_irq(void);
 void ipi_call_unlock_irq(void);
+#else
+static inline void call_function_init(void) { }
 #endif
 
 /*
@@ -134,7 +137,7 @@ static inline void smp_send_reschedule(int cpu) { }
 #define smp_prepare_boot_cpu()                 do {} while (0)
 #define smp_call_function_many(mask, func, info, wait) \
                        (up_smp_call_function(func, info))
-static inline void init_call_single_data(void) { }
+static inline void call_function_init(void) { }
 
 static inline int
 smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
index 384eb5fe530b8e90c44c9f65ebb719f965cf65df..e70564647039fdcbdf5516aa9f8a9ed1cffd4d6d 100644 (file)
@@ -358,6 +358,7 @@ struct backing_dev_info;
 extern struct mm_struct *swap_token_mm;
 extern void grab_swap_token(struct mm_struct *);
 extern void __put_swap_token(struct mm_struct *);
+extern void disable_swap_token(struct mem_cgroup *memcg);
 
 static inline int has_swap_token(struct mm_struct *mm)
 {
@@ -370,11 +371,6 @@ static inline void put_swap_token(struct mm_struct *mm)
                __put_swap_token(mm);
 }
 
-static inline void disable_swap_token(void)
-{
-       put_swap_token(swap_token_mm);
-}
-
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
 extern void
 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout);
@@ -500,7 +496,7 @@ static inline int has_swap_token(struct mm_struct *mm)
        return 0;
 }
 
-static inline void disable_swap_token(void)
+static inline void disable_swap_token(struct mem_cgroup *memcg)
 {
 }
 
index 8c0e349f4a6cb83f616f1d0046c1db7ab504d531..445702c60d0468c38a14aa0624e3bb1c1df29a5c 100644 (file)
@@ -24,6 +24,7 @@ extern int swiotlb_force;
 
 extern void swiotlb_init(int verbose);
 extern void swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
+extern unsigned long swioltb_nr_tbl(void);
 
 /*
  * Enumeration for sync targets
index c3acda60eee0819f9735e059b925ed739bf5831e..e2696d76a59956eeea4bb3431fddc3b7ecbc4780 100644 (file)
@@ -177,9 +177,6 @@ struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd,
 struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd);
 void sysfs_put(struct sysfs_dirent *sd);
 
-/* Called to clear a ns tag when it is no longer valid */
-void sysfs_exit_ns(enum kobj_ns_type type, const void *tag);
-
 int __must_check sysfs_init(void);
 
 #else /* CONFIG_SYSFS */
@@ -338,10 +335,6 @@ static inline void sysfs_put(struct sysfs_dirent *sd)
 {
 }
 
-static inline void sysfs_exit_ns(int type, const void *tag)
-{
-}
-
 static inline int __must_check sysfs_init(void)
 {
        return 0;
index b91a40e847d236d9046dc3154a7e7a58ea594776..fc839bfa7935aff66380a22e9b95589127ec66d2 100644 (file)
@@ -60,7 +60,7 @@ int arch_update_cpu_topology(void);
  * (in whatever arch specific measurement units returned by node_distance())
  * then switch on zone reclaim on boot.
  */
-#define RECLAIM_DISTANCE 20
+#define RECLAIM_DISTANCE 30
 #endif
 #ifndef PENALTY_FOR_NODE_WITH_CPUS
 #define PENALTY_FOR_NODE_WITH_CPUS     (1)
index 5b07792ccb46cb62280438159f3f6e3f00de573d..ff7dc08696a8cc67c812ea516e9bcd9fee9c53c7 100644 (file)
@@ -76,7 +76,7 @@
  *     tty device.  It is solely the responsibility of the line
  *     discipline to handle poll requests.
  *
- * unsigned int (*receive_buf)(struct tty_struct *, const unsigned char *cp,
+ * void        (*receive_buf)(struct tty_struct *, const unsigned char *cp,
  *                    char *fp, int count);
  *
  *     This function is called by the low-level tty driver to send
@@ -84,8 +84,7 @@
  *     processing.  <cp> is a pointer to the buffer of input
  *     character received by the device.  <fp> is a pointer to a
  *     pointer of flag bytes which indicate whether a character was
- *     received with a parity error, etc. Returns the amount of bytes
- *     received.
+ *     received with a parity error, etc.
  * 
  * void        (*write_wakeup)(struct tty_struct *);
  *
@@ -141,8 +140,8 @@ struct tty_ldisc_ops {
        /*
         * The following routines are called from below.
         */
-       unsigned int (*receive_buf)(struct tty_struct *,
-                       const unsigned char *cp, char *fp, int count);
+       void    (*receive_buf)(struct tty_struct *, const unsigned char *cp,
+                              char *fp, int count);
        void    (*write_wakeup)(struct tty_struct *);
        void    (*dcd_change)(struct tty_struct *, unsigned int,
                                struct pps_event_time *);
index 71693d4a4fe187db6da029ec670ae124b90a3ef6..17df3600bcef74bf674b9d13c9db5657e64cc558 100644 (file)
@@ -62,7 +62,9 @@
        US_FLAG(NO_READ_DISC_INFO,      0x00040000)             \
                /* cannot handle READ_DISC_INFO */              \
        US_FLAG(NO_READ_CAPACITY_16,    0x00080000)             \
-               /* cannot handle READ_CAPACITY_16 */
+               /* cannot handle READ_CAPACITY_16 */            \
+       US_FLAG(INITIAL_READ10, 0x00100000)                     \
+               /* Initial READ(10) (and others) must be retried */
 
 #define US_FLAG(name, value)   US_FL_##name = value ,
 enum { US_DO_ALL_FLAGS };
index 73eb1ed36ec4cdb19b92a33b146b632ac9378f38..6ddbd86377dec3375549f2f02ef5c7c47fa9bfb3 100644 (file)
@@ -9,7 +9,7 @@
 #endif
 
 #ifndef UTS_NODENAME
-#define UTS_NODENAME "(none)"  /* set by sethostname() */
+#define UTS_NODENAME CONFIG_DEFAULT_HOSTNAME /* set by sethostname() */
 #endif
 
 #ifndef UTS_DOMAINNAME
index 93e96fb934526e44b327e8d677bb91f7accd80c4..c7c40f1d2624a7eb7988ee376e324904b54c1d39 100644 (file)
@@ -128,8 +128,8 @@ struct video_device
        struct mutex *lock;
 };
 
-#define media_entity_to_video_device(entity) \
-       container_of(entity, struct video_device, entity)
+#define media_entity_to_video_device(__e) \
+       container_of(__e, struct video_device, entity)
 /* dev to video-device */
 #define to_video_device(cd) container_of(cd, struct video_device, dev)
 
index 2bf9ed9ef26b266a583550238e8433861d218c80..aef430d779bdd07542f5be64967c8f5c8fb9d809 100644 (file)
@@ -35,8 +35,11 @@ struct netns_ipvs;
 #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
 
 struct net {
+       atomic_t                passive;        /* To decided when the network
+                                                * namespace should be freed.
+                                                */
        atomic_t                count;          /* To decided when the network
-                                                *  namespace should be freed.
+                                                *  namespace should be shut down.
                                                 */
 #ifdef NETNS_REFCNT_DEBUG
        atomic_t                use_count;      /* To track references we
@@ -154,6 +157,9 @@ int net_eq(const struct net *net1, const struct net *net2)
 {
        return net1 == net2;
 }
+
+extern void net_drop_ns(void *);
+
 #else
 
 static inline struct net *get_net(struct net *net)
@@ -175,6 +181,8 @@ int net_eq(const struct net *net1, const struct net *net2)
 {
        return 1;
 }
+
+#define net_drop_ns NULL
 #endif
 
 
index 2b447646ce4bf3d64926d4c8a7bdd969f1a8bb1d..dd6847e5d6e46264ffe6db00ccb7128ce61483fa 100644 (file)
@@ -107,6 +107,7 @@ typedef enum {
        SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */
        SCTP_CMD_SEND_MSG,       /* Send the whole use message */
        SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
+       SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/
        SCTP_CMD_LAST
 } sctp_verb_t;
 
index 795f4886e1112dc7245e841f29f0465ae4aa1d59..7df327a6d564e8b2b57609ff714d4eb9a0e8b872 100644 (file)
@@ -1993,7 +1993,7 @@ void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc);
 struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
                                        const struct sctp_association *asoc,
                                        __be32 serial);
-
+void sctp_asconf_queue_teardown(struct sctp_association *asoc);
 
 int sctp_cmp_addr_exact(const union sctp_addr *ss1,
                        const union sctp_addr *ss2);
index ae045ca7d356033d49c8a9650a618c206503f68c..1c09820df58564f8d0430d996998edc6f8d893c0 100644 (file)
@@ -20,7 +20,8 @@ struct softirq_action;
                         softirq_name(BLOCK_IOPOLL),    \
                         softirq_name(TASKLET),         \
                         softirq_name(SCHED),           \
-                        softirq_name(HRTIMER))
+                        softirq_name(HRTIMER),         \
+                        softirq_name(RCU))
 
 /**
  * irq_handler_entry - called immediately before the irq action handler
index 5f247f5ffc565c50e49f70eb84f52b101d3c03f0..f99645d05a8f201eea78a399468e51b3876d2019 100644 (file)
 TRACE_EVENT(net_dev_xmit,
 
        TP_PROTO(struct sk_buff *skb,
-                int rc),
+                int rc,
+                struct net_device *dev,
+                unsigned int skb_len),
 
-       TP_ARGS(skb, rc),
+       TP_ARGS(skb, rc, dev, skb_len),
 
        TP_STRUCT__entry(
                __field(        void *,         skbaddr         )
                __field(        unsigned int,   len             )
                __field(        int,            rc              )
-               __string(       name,           skb->dev->name  )
+               __string(       name,           dev->name       )
        ),
 
        TP_fast_assign(
                __entry->skbaddr = skb;
-               __entry->len = skb->len;
+               __entry->len = skb_len;
                __entry->rc = rc;
-               __assign_str(name, skb->dev->name);
+               __assign_str(name, dev->name);
        ),
 
        TP_printk("dev=%s skbaddr=%p len=%u rc=%d",
index ea422aaa23e12a2397e08a167e36dd65ce7041cf..b2c33bd955faa4536082e62e6c9f17946994afde 100644 (file)
@@ -6,6 +6,8 @@
 
 #include <linux/types.h>
 #include <linux/tracepoint.h>
+#include <linux/mm.h>
+#include <linux/memcontrol.h>
 #include "gfpflags.h"
 
 #define RECLAIM_WB_ANON                0x0001u
@@ -310,6 +312,87 @@ TRACE_EVENT(mm_vmscan_lru_shrink_inactive,
                show_reclaim_flags(__entry->reclaim_flags))
 );
 
+TRACE_EVENT(replace_swap_token,
+       TP_PROTO(struct mm_struct *old_mm,
+                struct mm_struct *new_mm),
+
+       TP_ARGS(old_mm, new_mm),
+
+       TP_STRUCT__entry(
+               __field(struct mm_struct*,      old_mm)
+               __field(unsigned int,           old_prio)
+               __field(struct mm_struct*,      new_mm)
+               __field(unsigned int,           new_prio)
+       ),
+
+       TP_fast_assign(
+               __entry->old_mm   = old_mm;
+               __entry->old_prio = old_mm ? old_mm->token_priority : 0;
+               __entry->new_mm   = new_mm;
+               __entry->new_prio = new_mm->token_priority;
+       ),
+
+       TP_printk("old_token_mm=%p old_prio=%u new_token_mm=%p new_prio=%u",
+                 __entry->old_mm, __entry->old_prio,
+                 __entry->new_mm, __entry->new_prio)
+);
+
+DECLARE_EVENT_CLASS(put_swap_token_template,
+       TP_PROTO(struct mm_struct *swap_token_mm),
+
+       TP_ARGS(swap_token_mm),
+
+       TP_STRUCT__entry(
+               __field(struct mm_struct*, swap_token_mm)
+       ),
+
+       TP_fast_assign(
+               __entry->swap_token_mm = swap_token_mm;
+       ),
+
+       TP_printk("token_mm=%p", __entry->swap_token_mm)
+);
+
+DEFINE_EVENT(put_swap_token_template, put_swap_token,
+       TP_PROTO(struct mm_struct *swap_token_mm),
+       TP_ARGS(swap_token_mm)
+);
+
+DEFINE_EVENT_CONDITION(put_swap_token_template, disable_swap_token,
+       TP_PROTO(struct mm_struct *swap_token_mm),
+       TP_ARGS(swap_token_mm),
+       TP_CONDITION(swap_token_mm != NULL)
+);
+
+TRACE_EVENT_CONDITION(update_swap_token_priority,
+       TP_PROTO(struct mm_struct *mm,
+                unsigned int old_prio,
+                struct mm_struct *swap_token_mm),
+
+       TP_ARGS(mm, old_prio, swap_token_mm),
+
+       TP_CONDITION(mm->token_priority != old_prio),
+
+       TP_STRUCT__entry(
+               __field(struct mm_struct*, mm)
+               __field(unsigned int, old_prio)
+               __field(unsigned int, new_prio)
+               __field(struct mm_struct*, swap_token_mm)
+               __field(unsigned int, swap_token_prio)
+       ),
+
+       TP_fast_assign(
+               __entry->mm             = mm;
+               __entry->old_prio       = old_prio;
+               __entry->new_prio       = mm->token_priority;
+               __entry->swap_token_mm  = swap_token_mm;
+               __entry->swap_token_prio = swap_token_mm ? swap_token_mm->token_priority : 0;
+       ),
+
+       TP_printk("mm=%p old_prio=%u new_prio=%u swap_token_mm=%p token_prio=%u",
+                 __entry->mm, __entry->old_prio, __entry->new_prio,
+                 __entry->swap_token_mm, __entry->swap_token_prio)
+);
 
 #endif /* _TRACE_VMSCAN_H */
 
index ebafac4231eeff15883809419e6df0c211b90e1a..412c21b00d513f81269d7860b1a9af1909e0225f 100644 (file)
@@ -19,7 +19,6 @@ config DEFCONFIG_LIST
 config CONSTRUCTORS
        bool
        depends on !UML
-       default y
 
 config HAVE_IRQ_WORK
        bool
@@ -204,6 +203,15 @@ config KERNEL_LZO
 
 endchoice
 
+config DEFAULT_HOSTNAME
+       string "Default hostname"
+       default "(none)"
+       help
+         This option determines the default system hostname before userspace
+         calls sethostname(2). The kernel traditionally uses "(none)" here,
+         but you may wish to use a different default here to make a minimal
+         system more usable with less configuration.
+
 config SWAP
        bool "Support for paging of anonymous memory (swap)"
        depends on MMU && BLOCK
index cfd7000c9d7108085b337876893fa6e91910dc04..2568d22a304ecc667b302b0b5d17956ed6d143a8 100644 (file)
@@ -93,9 +93,6 @@ static unsigned long __cpuinit calibrate_delay_direct(void)
                 * If the upper limit and lower limit of the timer_rate is
                 * >= 12.5% apart, redo calibration.
                 */
-               printk(KERN_DEBUG "calibrate_delay_direct() timer_rate_max=%lu "
-                           "timer_rate_min=%lu pre_start=%lu pre_end=%lu\n",
-                         timer_rate_max, timer_rate_min, pre_start, pre_end);
                if (start >= post_end)
                        printk(KERN_NOTICE "calibrate_delay_direct() ignoring "
                                        "timer_rate as we had a TSC wrap around"
index cafba67c13bf8b57493e49b0501ef4265e76204b..d7211faed2adfb295caf46bbb9c70835f622eabe 100644 (file)
@@ -542,6 +542,7 @@ asmlinkage void __init start_kernel(void)
        timekeeping_init();
        time_init();
        profile_init();
+       call_function_init();
        if (!irqs_disabled())
                printk(KERN_CRIT "start_kernel(): bug: interrupts were "
                                 "enabled early\n");
index 20a406471525af2087cf914d8569d401306b3a5b..f2b321bae44037c08d4b09b90b8610a80b979eb7 100644 (file)
@@ -561,29 +561,28 @@ void exit_files(struct task_struct *tsk)
 
 #ifdef CONFIG_MM_OWNER
 /*
- * Task p is exiting and it owned mm, lets find a new owner for it
+ * A task is exiting.   If it owned this mm, find a new owner for the mm.
  */
-static inline int
-mm_need_new_owner(struct mm_struct *mm, struct task_struct *p)
-{
-       /*
-        * If there are other users of the mm and the owner (us) is exiting
-        * we need to find a new owner to take on the responsibility.
-        */
-       if (atomic_read(&mm->mm_users) <= 1)
-               return 0;
-       if (mm->owner != p)
-               return 0;
-       return 1;
-}
-
 void mm_update_next_owner(struct mm_struct *mm)
 {
        struct task_struct *c, *g, *p = current;
 
 retry:
-       if (!mm_need_new_owner(mm, p))
+       /*
+        * If the exiting or execing task is not the owner, it's
+        * someone else's problem.
+        */
+       if (mm->owner != p)
                return;
+       /*
+        * The current owner is exiting/execing and there are no other
+        * candidates.  Do not leave the mm pointing to a possibly
+        * freed task structure.
+        */
+       if (atomic_read(&mm->mm_users) <= 1) {
+               mm->owner = NULL;
+               return;
+       }
 
        read_lock(&tasklist_lock);
        /*
index b8cadf70b1fbd109c4ca56f78f6f9a685e2f9974..5bf924d80b5c64be2888be0afd65008260e8db01 100644 (file)
@@ -2,7 +2,8 @@ menu "GCOV-based kernel profiling"
 
 config GCOV_KERNEL
        bool "Enable gcov-based kernel profiling"
-       depends on DEBUG_FS && CONSTRUCTORS
+       depends on DEBUG_FS
+       select CONSTRUCTORS
        default n
        ---help---
        This option enables gcov-based code profiling (e.g. for code coverage
index 90cb55f6d7ebe4496b339f5202f0a31926854963..470d08c82bbe29f373a9aa79d07794d56985763b 100644 (file)
@@ -132,12 +132,6 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
 
                switch (res) {
                case IRQ_WAKE_THREAD:
-                       /*
-                        * Set result to handled so the spurious check
-                        * does not trigger.
-                        */
-                       res = IRQ_HANDLED;
-
                        /*
                         * Catch drivers which return WAKE_THREAD but
                         * did not set up a thread function
index 886e80347b322795b56630857fd98996545ea5a4..4c60a50e66b237922381c5b0d0d82d55b436b6dc 100644 (file)
@@ -257,13 +257,11 @@ int __init early_irq_init(void)
        count = ARRAY_SIZE(irq_desc);
 
        for (i = 0; i < count; i++) {
-               desc[i].irq_data.irq = i;
-               desc[i].irq_data.chip = &no_irq_chip;
                desc[i].kstat_irqs = alloc_percpu(unsigned int);
-               irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
-               alloc_masks(desc + i, GFP_KERNEL, node);
-               desc_smp_init(desc + i, node);
+               alloc_masks(&desc[i], GFP_KERNEL, node);
+               raw_spin_lock_init(&desc[i].lock);
                lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
+               desc_set_defaults(i, &desc[i], node);
        }
        return arch_early_irq_init();
 }
@@ -346,6 +344,12 @@ irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node)
        if (!cnt)
                return -EINVAL;
 
+       if (irq >= 0) {
+               if (from > irq)
+                       return -EINVAL;
+               from = irq;
+       }
+
        mutex_lock(&sparse_irq_lock);
 
        start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
index f7ce0021e1c48eb02e2eb6531265ac84bfc24d96..0a7840aeb0fb9efbc18a6e6e8e6f01de17ed91cd 100644 (file)
@@ -491,6 +491,9 @@ int irq_set_irq_wake(unsigned int irq, unsigned int on)
        struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
        int ret = 0;
 
+       if (!desc)
+               return -EINVAL;
+
        /* wakeup-capable irqs can be shared between drivers that
         * don't need to have the same sleep mode behaviors.
         */
@@ -723,13 +726,16 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
  * context. So we need to disable bh here to avoid deadlocks and other
  * side effects.
  */
-static void
+static irqreturn_t
 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
 {
+       irqreturn_t ret;
+
        local_bh_disable();
-       action->thread_fn(action->irq, action->dev_id);
+       ret = action->thread_fn(action->irq, action->dev_id);
        irq_finalize_oneshot(desc, action, false);
        local_bh_enable();
+       return ret;
 }
 
 /*
@@ -737,10 +743,14 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
  * preemtible - many of them need to sleep and wait for slow busses to
  * complete.
  */
-static void irq_thread_fn(struct irq_desc *desc, struct irqaction *action)
+static irqreturn_t irq_thread_fn(struct irq_desc *desc,
+               struct irqaction *action)
 {
-       action->thread_fn(action->irq, action->dev_id);
+       irqreturn_t ret;
+
+       ret = action->thread_fn(action->irq, action->dev_id);
        irq_finalize_oneshot(desc, action, false);
+       return ret;
 }
 
 /*
@@ -753,7 +763,8 @@ static int irq_thread(void *data)
        };
        struct irqaction *action = data;
        struct irq_desc *desc = irq_to_desc(action->irq);
-       void (*handler_fn)(struct irq_desc *desc, struct irqaction *action);
+       irqreturn_t (*handler_fn)(struct irq_desc *desc,
+                       struct irqaction *action);
        int wake;
 
        if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
@@ -783,8 +794,12 @@ static int irq_thread(void *data)
                        desc->istate |= IRQS_PENDING;
                        raw_spin_unlock_irq(&desc->lock);
                } else {
+                       irqreturn_t action_ret;
+
                        raw_spin_unlock_irq(&desc->lock);
-                       handler_fn(desc, action);
+                       action_ret = handler_fn(desc, action);
+                       if (!noirqdebug)
+                               note_interrupt(action->irq, desc, action_ret);
                }
 
                wake = atomic_dec_and_test(&desc->threads_active);
index dfbd550401b28f29a4db69807fae318c8ec7341a..aa57d5da18c1de65e807098702ad9b841fa3788d 100644 (file)
@@ -167,6 +167,13 @@ out:
                  jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
 }
 
+static inline int bad_action_ret(irqreturn_t action_ret)
+{
+       if (likely(action_ret <= (IRQ_HANDLED | IRQ_WAKE_THREAD)))
+               return 0;
+       return 1;
+}
+
 /*
  * If 99,900 of the previous 100,000 interrupts have not been handled
  * then assume that the IRQ is stuck in some manner. Drop a diagnostic
@@ -182,7 +189,7 @@ __report_bad_irq(unsigned int irq, struct irq_desc *desc,
        struct irqaction *action;
        unsigned long flags;
 
-       if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
+       if (bad_action_ret(action_ret)) {
                printk(KERN_ERR "irq event %d: bogus return value %x\n",
                                irq, action_ret);
        } else {
@@ -201,10 +208,11 @@ __report_bad_irq(unsigned int irq, struct irq_desc *desc,
        raw_spin_lock_irqsave(&desc->lock, flags);
        action = desc->action;
        while (action) {
-               printk(KERN_ERR "[<%p>]", action->handler);
-               print_symbol(" (%s)",
-                       (unsigned long)action->handler);
-               printk("\n");
+               printk(KERN_ERR "[<%p>] %pf", action->handler, action->handler);
+               if (action->thread_fn)
+                       printk(KERN_CONT " threaded [<%p>] %pf",
+                                       action->thread_fn, action->thread_fn);
+               printk(KERN_CONT "\n");
                action = action->next;
        }
        raw_spin_unlock_irqrestore(&desc->lock, flags);
@@ -262,7 +270,16 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
        if (desc->istate & IRQS_POLL_INPROGRESS)
                return;
 
-       if (unlikely(action_ret != IRQ_HANDLED)) {
+       /* we get here again via the threaded handler */
+       if (action_ret == IRQ_WAKE_THREAD)
+               return;
+
+       if (bad_action_ret(action_ret)) {
+               report_bad_irq(irq, desc, action_ret);
+               return;
+       }
+
+       if (unlikely(action_ret == IRQ_NONE)) {
                /*
                 * If we are seeing only the odd spurious IRQ caused by
                 * bus asynchronicity then don't eventually trigger an error,
@@ -274,8 +291,6 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
                else
                        desc->irqs_unhandled++;
                desc->last_unhandled = jiffies;
-               if (unlikely(action_ret != IRQ_NONE))
-                       report_bad_irq(irq, desc, action_ret);
        }
 
        if (unlikely(try_misrouted_irq(irq, desc, action_ret))) {
index ad6a81c58b44e2d5ae57c5dbe866da1cf1008a03..47613dfb7b28c340825493ed36a4c3347a39290d 100644 (file)
@@ -156,12 +156,6 @@ static int ____call_usermodehelper(void *data)
         */
        set_user_nice(current, 0);
 
-       if (sub_info->init) {
-               retval = sub_info->init(sub_info);
-               if (retval)
-                       goto fail;
-       }
-
        retval = -ENOMEM;
        new = prepare_kernel_cred(current);
        if (!new)
@@ -173,6 +167,14 @@ static int ____call_usermodehelper(void *data)
                                             new->cap_inheritable);
        spin_unlock(&umh_sysctl_lock);
 
+       if (sub_info->init) {
+               retval = sub_info->init(sub_info, new);
+               if (retval) {
+                       abort_creds(new);
+                       goto fail;
+               }
+       }
+
        commit_creds(new);
 
        retval = kernel_execve(sub_info->path,
@@ -388,7 +390,7 @@ EXPORT_SYMBOL(call_usermodehelper_setup);
  * context in which call_usermodehelper_exec is called.
  */
 void call_usermodehelper_setfns(struct subprocess_info *info,
-                   int (*init)(struct subprocess_info *info),
+                   int (*init)(struct subprocess_info *info, struct cred *new),
                    void (*cleanup)(struct subprocess_info *info),
                    void *data)
 {
index 63437d065ac89d69bbc6e70fd3789e64ac7e1d6a..298c9276dfdb378877249384db860c3658485ef2 100644 (file)
@@ -3426,7 +3426,7 @@ int lock_is_held(struct lockdep_map *lock)
        int ret = 0;
 
        if (unlikely(current->lockdep_recursion))
-               return ret;
+               return 1; /* avoid false negative lockdep_assert_held() */
 
        raw_local_irq_save(flags);
        check_flags(flags);
index 89419ff92e996c1e52fade38475ba14604a8f7bd..7e59ffb3d0ba487c0474270a476b25cbc96d2ac1 100644 (file)
@@ -87,6 +87,8 @@ static struct rcu_state *rcu_state;
 int rcu_scheduler_active __read_mostly;
 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
 
+#ifdef CONFIG_RCU_BOOST
+
 /*
  * Control variables for per-CPU and per-rcu_node kthreads.  These
  * handle all flavors of RCU.
@@ -98,8 +100,11 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
 DEFINE_PER_CPU(char, rcu_cpu_has_work);
 static char rcu_kthreads_spawnable;
 
+#endif /* #ifdef CONFIG_RCU_BOOST */
+
 static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
-static void invoke_rcu_cpu_kthread(void);
+static void invoke_rcu_core(void);
+static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
 
 #define RCU_KTHREAD_PRIO 1     /* RT priority for per-CPU kthreads. */
 
@@ -1088,14 +1093,8 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
        int need_report = 0;
        struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
        struct rcu_node *rnp;
-       struct task_struct *t;
 
-       /* Stop the CPU's kthread. */
-       t = per_cpu(rcu_cpu_kthread_task, cpu);
-       if (t != NULL) {
-               per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
-               kthread_stop(t);
-       }
+       rcu_stop_cpu_kthread(cpu);
 
        /* Exclude any attempts to start a new grace period. */
        raw_spin_lock_irqsave(&rsp->onofflock, flags);
@@ -1231,7 +1230,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
 
        /* Re-raise the RCU softirq if there are callbacks remaining. */
        if (cpu_has_callbacks_ready_to_invoke(rdp))
-               invoke_rcu_cpu_kthread();
+               invoke_rcu_core();
 }
 
 /*
@@ -1277,7 +1276,7 @@ void rcu_check_callbacks(int cpu, int user)
        }
        rcu_preempt_check_callbacks(cpu);
        if (rcu_pending(cpu))
-               invoke_rcu_cpu_kthread();
+               invoke_rcu_core();
 }
 
 #ifdef CONFIG_SMP
@@ -1442,13 +1441,14 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
        }
 
        /* If there are callbacks ready, invoke them. */
-       rcu_do_batch(rsp, rdp);
+       if (cpu_has_callbacks_ready_to_invoke(rdp))
+               invoke_rcu_callbacks(rsp, rdp);
 }
 
 /*
  * Do softirq processing for the current CPU.
  */
-static void rcu_process_callbacks(void)
+static void rcu_process_callbacks(struct softirq_action *unused)
 {
        __rcu_process_callbacks(&rcu_sched_state,
                                &__get_cpu_var(rcu_sched_data));
@@ -1465,342 +1465,20 @@ static void rcu_process_callbacks(void)
  * the current CPU with interrupts disabled, the rcu_cpu_kthread_task
  * cannot disappear out from under us.
  */
-static void invoke_rcu_cpu_kthread(void)
-{
-       unsigned long flags;
-
-       local_irq_save(flags);
-       __this_cpu_write(rcu_cpu_has_work, 1);
-       if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) {
-               local_irq_restore(flags);
-               return;
-       }
-       wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
-       local_irq_restore(flags);
-}
-
-/*
- * Wake up the specified per-rcu_node-structure kthread.
- * Because the per-rcu_node kthreads are immortal, we don't need
- * to do anything to keep them alive.
- */
-static void invoke_rcu_node_kthread(struct rcu_node *rnp)
-{
-       struct task_struct *t;
-
-       t = rnp->node_kthread_task;
-       if (t != NULL)
-               wake_up_process(t);
-}
-
-/*
- * Set the specified CPU's kthread to run RT or not, as specified by
- * the to_rt argument.  The CPU-hotplug locks are held, so the task
- * is not going away.
- */
-static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
-{
-       int policy;
-       struct sched_param sp;
-       struct task_struct *t;
-
-       t = per_cpu(rcu_cpu_kthread_task, cpu);
-       if (t == NULL)
-               return;
-       if (to_rt) {
-               policy = SCHED_FIFO;
-               sp.sched_priority = RCU_KTHREAD_PRIO;
-       } else {
-               policy = SCHED_NORMAL;
-               sp.sched_priority = 0;
-       }
-       sched_setscheduler_nocheck(t, policy, &sp);
-}
-
-/*
- * Timer handler to initiate the waking up of per-CPU kthreads that
- * have yielded the CPU due to excess numbers of RCU callbacks.
- * We wake up the per-rcu_node kthread, which in turn will wake up
- * the booster kthread.
- */
-static void rcu_cpu_kthread_timer(unsigned long arg)
-{
-       struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
-       struct rcu_node *rnp = rdp->mynode;
-
-       atomic_or(rdp->grpmask, &rnp->wakemask);
-       invoke_rcu_node_kthread(rnp);
-}
-
-/*
- * Drop to non-real-time priority and yield, but only after posting a
- * timer that will cause us to regain our real-time priority if we
- * remain preempted.  Either way, we restore our real-time priority
- * before returning.
- */
-static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
-{
-       struct sched_param sp;
-       struct timer_list yield_timer;
-
-       setup_timer_on_stack(&yield_timer, f, arg);
-       mod_timer(&yield_timer, jiffies + 2);
-       sp.sched_priority = 0;
-       sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
-       set_user_nice(current, 19);
-       schedule();
-       sp.sched_priority = RCU_KTHREAD_PRIO;
-       sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
-       del_timer(&yield_timer);
-}
-
-/*
- * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
- * This can happen while the corresponding CPU is either coming online
- * or going offline.  We cannot wait until the CPU is fully online
- * before starting the kthread, because the various notifier functions
- * can wait for RCU grace periods.  So we park rcu_cpu_kthread() until
- * the corresponding CPU is online.
- *
- * Return 1 if the kthread needs to stop, 0 otherwise.
- *
- * Caller must disable bh.  This function can momentarily enable it.
- */
-static int rcu_cpu_kthread_should_stop(int cpu)
-{
-       while (cpu_is_offline(cpu) ||
-              !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
-              smp_processor_id() != cpu) {
-               if (kthread_should_stop())
-                       return 1;
-               per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
-               per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
-               local_bh_enable();
-               schedule_timeout_uninterruptible(1);
-               if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
-                       set_cpus_allowed_ptr(current, cpumask_of(cpu));
-               local_bh_disable();
-       }
-       per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
-       return 0;
-}
-
-/*
- * Per-CPU kernel thread that invokes RCU callbacks.  This replaces the
- * earlier RCU softirq.
- */
-static int rcu_cpu_kthread(void *arg)
-{
-       int cpu = (int)(long)arg;
-       unsigned long flags;
-       int spincnt = 0;
-       unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
-       char work;
-       char *workp = &per_cpu(rcu_cpu_has_work, cpu);
-
-       for (;;) {
-               *statusp = RCU_KTHREAD_WAITING;
-               rcu_wait(*workp != 0 || kthread_should_stop());
-               local_bh_disable();
-               if (rcu_cpu_kthread_should_stop(cpu)) {
-                       local_bh_enable();
-                       break;
-               }
-               *statusp = RCU_KTHREAD_RUNNING;
-               per_cpu(rcu_cpu_kthread_loops, cpu)++;
-               local_irq_save(flags);
-               work = *workp;
-               *workp = 0;
-               local_irq_restore(flags);
-               if (work)
-                       rcu_process_callbacks();
-               local_bh_enable();
-               if (*workp != 0)
-                       spincnt++;
-               else
-                       spincnt = 0;
-               if (spincnt > 10) {
-                       *statusp = RCU_KTHREAD_YIELDING;
-                       rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
-                       spincnt = 0;
-               }
-       }
-       *statusp = RCU_KTHREAD_STOPPED;
-       return 0;
-}
-
-/*
- * Spawn a per-CPU kthread, setting up affinity and priority.
- * Because the CPU hotplug lock is held, no other CPU will be attempting
- * to manipulate rcu_cpu_kthread_task.  There might be another CPU
- * attempting to access it during boot, but the locking in kthread_bind()
- * will enforce sufficient ordering.
- */
-static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
+static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
 {
-       struct sched_param sp;
-       struct task_struct *t;
-
-       if (!rcu_kthreads_spawnable ||
-           per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
-               return 0;
-       t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
-       if (IS_ERR(t))
-               return PTR_ERR(t);
-       kthread_bind(t, cpu);
-       per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
-       WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
-       per_cpu(rcu_cpu_kthread_task, cpu) = t;
-       sp.sched_priority = RCU_KTHREAD_PRIO;
-       sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
-       return 0;
-}
-
-/*
- * Per-rcu_node kthread, which is in charge of waking up the per-CPU
- * kthreads when needed.  We ignore requests to wake up kthreads
- * for offline CPUs, which is OK because force_quiescent_state()
- * takes care of this case.
- */
-static int rcu_node_kthread(void *arg)
-{
-       int cpu;
-       unsigned long flags;
-       unsigned long mask;
-       struct rcu_node *rnp = (struct rcu_node *)arg;
-       struct sched_param sp;
-       struct task_struct *t;
-
-       for (;;) {
-               rnp->node_kthread_status = RCU_KTHREAD_WAITING;
-               rcu_wait(atomic_read(&rnp->wakemask) != 0);
-               rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
-               raw_spin_lock_irqsave(&rnp->lock, flags);
-               mask = atomic_xchg(&rnp->wakemask, 0);
-               rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
-               for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
-                       if ((mask & 0x1) == 0)
-                               continue;
-                       preempt_disable();
-                       t = per_cpu(rcu_cpu_kthread_task, cpu);
-                       if (!cpu_online(cpu) || t == NULL) {
-                               preempt_enable();
-                               continue;
-                       }
-                       per_cpu(rcu_cpu_has_work, cpu) = 1;
-                       sp.sched_priority = RCU_KTHREAD_PRIO;
-                       sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
-                       preempt_enable();
-               }
-       }
-       /* NOTREACHED */
-       rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
-       return 0;
-}
-
-/*
- * Set the per-rcu_node kthread's affinity to cover all CPUs that are
- * served by the rcu_node in question.  The CPU hotplug lock is still
- * held, so the value of rnp->qsmaskinit will be stable.
- *
- * We don't include outgoingcpu in the affinity set, use -1 if there is
- * no outgoing CPU.  If there are no CPUs left in the affinity set,
- * this function allows the kthread to execute on any CPU.
- */
-static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
-{
-       cpumask_var_t cm;
-       int cpu;
-       unsigned long mask = rnp->qsmaskinit;
-
-       if (rnp->node_kthread_task == NULL)
-               return;
-       if (!alloc_cpumask_var(&cm, GFP_KERNEL))
+       if (likely(!rsp->boost)) {
+               rcu_do_batch(rsp, rdp);
                return;
-       cpumask_clear(cm);
-       for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
-               if ((mask & 0x1) && cpu != outgoingcpu)
-                       cpumask_set_cpu(cpu, cm);
-       if (cpumask_weight(cm) == 0) {
-               cpumask_setall(cm);
-               for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
-                       cpumask_clear_cpu(cpu, cm);
-               WARN_ON_ONCE(cpumask_weight(cm) == 0);
        }
-       set_cpus_allowed_ptr(rnp->node_kthread_task, cm);
-       rcu_boost_kthread_setaffinity(rnp, cm);
-       free_cpumask_var(cm);
+       invoke_rcu_callbacks_kthread();
 }
 
-/*
- * Spawn a per-rcu_node kthread, setting priority and affinity.
- * Called during boot before online/offline can happen, or, if
- * during runtime, with the main CPU-hotplug locks held.  So only
- * one of these can be executing at a time.
- */
-static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
-                                               struct rcu_node *rnp)
+static void invoke_rcu_core(void)
 {
-       unsigned long flags;
-       int rnp_index = rnp - &rsp->node[0];
-       struct sched_param sp;
-       struct task_struct *t;
-
-       if (!rcu_kthreads_spawnable ||
-           rnp->qsmaskinit == 0)
-               return 0;
-       if (rnp->node_kthread_task == NULL) {
-               t = kthread_create(rcu_node_kthread, (void *)rnp,
-                                  "rcun%d", rnp_index);
-               if (IS_ERR(t))
-                       return PTR_ERR(t);
-               raw_spin_lock_irqsave(&rnp->lock, flags);
-               rnp->node_kthread_task = t;
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
-               sp.sched_priority = 99;
-               sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
-       }
-       return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
+       raise_softirq(RCU_SOFTIRQ);
 }
 
-static void rcu_wake_one_boost_kthread(struct rcu_node *rnp);
-
-/*
- * Spawn all kthreads -- called as soon as the scheduler is running.
- */
-static int __init rcu_spawn_kthreads(void)
-{
-       int cpu;
-       struct rcu_node *rnp;
-       struct task_struct *t;
-
-       rcu_kthreads_spawnable = 1;
-       for_each_possible_cpu(cpu) {
-               per_cpu(rcu_cpu_has_work, cpu) = 0;
-               if (cpu_online(cpu)) {
-                       (void)rcu_spawn_one_cpu_kthread(cpu);
-                       t = per_cpu(rcu_cpu_kthread_task, cpu);
-                       if (t)
-                               wake_up_process(t);
-               }
-       }
-       rnp = rcu_get_root(rcu_state);
-       (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
-       if (rnp->node_kthread_task)
-               wake_up_process(rnp->node_kthread_task);
-       if (NUM_RCU_NODES > 1) {
-               rcu_for_each_leaf_node(rcu_state, rnp) {
-                       (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
-                       t = rnp->node_kthread_task;
-                       if (t)
-                               wake_up_process(t);
-                       rcu_wake_one_boost_kthread(rnp);
-               }
-       }
-       return 0;
-}
-early_initcall(rcu_spawn_kthreads);
-
 static void
 __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
           struct rcu_state *rsp)
@@ -2207,44 +1885,6 @@ static void __cpuinit rcu_prepare_cpu(int cpu)
        rcu_preempt_init_percpu_data(cpu);
 }
 
-static void __cpuinit rcu_prepare_kthreads(int cpu)
-{
-       struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
-       struct rcu_node *rnp = rdp->mynode;
-
-       /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
-       if (rcu_kthreads_spawnable) {
-               (void)rcu_spawn_one_cpu_kthread(cpu);
-               if (rnp->node_kthread_task == NULL)
-                       (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
-       }
-}
-
-/*
- * kthread_create() creates threads in TASK_UNINTERRUPTIBLE state,
- * but the RCU threads are woken on demand, and if demand is low this
- * could be a while triggering the hung task watchdog.
- *
- * In order to avoid this, poke all tasks once the CPU is fully
- * up and running.
- */
-static void __cpuinit rcu_online_kthreads(int cpu)
-{
-       struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
-       struct rcu_node *rnp = rdp->mynode;
-       struct task_struct *t;
-
-       t = per_cpu(rcu_cpu_kthread_task, cpu);
-       if (t)
-               wake_up_process(t);
-
-       t = rnp->node_kthread_task;
-       if (t)
-               wake_up_process(t);
-
-       rcu_wake_one_boost_kthread(rnp);
-}
-
 /*
  * Handle CPU online/offline notification events.
  */
@@ -2262,7 +1902,6 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
                rcu_prepare_kthreads(cpu);
                break;
        case CPU_ONLINE:
-               rcu_online_kthreads(cpu);
        case CPU_DOWN_FAILED:
                rcu_node_kthread_setaffinity(rnp, -1);
                rcu_cpu_kthread_setrt(cpu, 1);
@@ -2410,6 +2049,7 @@ void __init rcu_init(void)
        rcu_init_one(&rcu_sched_state, &rcu_sched_data);
        rcu_init_one(&rcu_bh_state, &rcu_bh_data);
        __rcu_init_preempt();
+        open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
 
        /*
         * We don't need protection against CPU-hotplug here because
index 7b9a08b4aaea01d48eacb8781296a7be205fb898..01b2ccda26fbf82880cc1d4a50bf9f2877e91bba 100644 (file)
@@ -369,6 +369,7 @@ struct rcu_state {
                                                /*  period because */
                                                /*  force_quiescent_state() */
                                                /*  was running. */
+       u8      boost;                          /* Subject to priority boost. */
        unsigned long gpnum;                    /* Current gp number. */
        unsigned long completed;                /* # of last completed gp. */
 
@@ -426,6 +427,7 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
 #ifdef CONFIG_HOTPLUG_CPU
 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
                                      unsigned long flags);
+static void rcu_stop_cpu_kthread(int cpu);
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
 static void rcu_print_detail_task_stall(struct rcu_state *rsp);
 static void rcu_print_task_stall(struct rcu_node *rnp);
@@ -450,11 +452,19 @@ static void rcu_preempt_send_cbs_to_online(void);
 static void __init __rcu_init_preempt(void);
 static void rcu_needs_cpu_flush(void);
 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
+static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
+static void invoke_rcu_callbacks_kthread(void);
+#ifdef CONFIG_RCU_BOOST
+static void rcu_preempt_do_callbacks(void);
 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
                                          cpumask_var_t cm);
-static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
 static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
                                                 struct rcu_node *rnp,
                                                 int rnp_index);
+static void invoke_rcu_node_kthread(struct rcu_node *rnp);
+static void rcu_yield(void (*f)(unsigned long), unsigned long arg);
+#endif /* #ifdef CONFIG_RCU_BOOST */
+static void rcu_cpu_kthread_setrt(int cpu, int to_rt);
+static void __cpuinit rcu_prepare_kthreads(int cpu);
 
 #endif /* #ifndef RCU_TREE_NONCORE */
index c8bff3099a89eeeccf9e3d330a16f6c4d9a2c340..14dc7dd0090220f717f83f666fb2a30a32f66143 100644 (file)
@@ -602,6 +602,15 @@ static void rcu_preempt_process_callbacks(void)
                                &__get_cpu_var(rcu_preempt_data));
 }
 
+#ifdef CONFIG_RCU_BOOST
+
+static void rcu_preempt_do_callbacks(void)
+{
+       rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
+}
+
+#endif /* #ifdef CONFIG_RCU_BOOST */
+
 /*
  * Queue a preemptible-RCU callback for invocation after a grace period.
  */
@@ -1248,6 +1257,23 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
        }
 }
 
+/*
+ * Wake up the per-CPU kthread to invoke RCU callbacks.
+ */
+static void invoke_rcu_callbacks_kthread(void)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+       __this_cpu_write(rcu_cpu_has_work, 1);
+       if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) {
+               local_irq_restore(flags);
+               return;
+       }
+       wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
+       local_irq_restore(flags);
+}
+
 /*
  * Set the affinity of the boost kthread.  The CPU-hotplug locks are
  * held, so no one should be messing with the existence of the boost
@@ -1288,6 +1314,7 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
 
        if (&rcu_preempt_state != rsp)
                return 0;
+       rsp->boost = 1;
        if (rnp->boost_kthread_task != NULL)
                return 0;
        t = kthread_create(rcu_boost_kthread, (void *)rnp,
@@ -1299,13 +1326,372 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
        sp.sched_priority = RCU_KTHREAD_PRIO;
        sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+       wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
        return 0;
 }
 
-static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp)
+#ifdef CONFIG_HOTPLUG_CPU
+
+/*
+ * Stop the RCU's per-CPU kthread when its CPU goes offline,.
+ */
+static void rcu_stop_cpu_kthread(int cpu)
 {
-       if (rnp->boost_kthread_task)
-               wake_up_process(rnp->boost_kthread_task);
+       struct task_struct *t;
+
+       /* Stop the CPU's kthread. */
+       t = per_cpu(rcu_cpu_kthread_task, cpu);
+       if (t != NULL) {
+               per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
+               kthread_stop(t);
+       }
+}
+
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+
+static void rcu_kthread_do_work(void)
+{
+       rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
+       rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
+       rcu_preempt_do_callbacks();
+}
+
+/*
+ * Wake up the specified per-rcu_node-structure kthread.
+ * Because the per-rcu_node kthreads are immortal, we don't need
+ * to do anything to keep them alive.
+ */
+static void invoke_rcu_node_kthread(struct rcu_node *rnp)
+{
+       struct task_struct *t;
+
+       t = rnp->node_kthread_task;
+       if (t != NULL)
+               wake_up_process(t);
+}
+
+/*
+ * Set the specified CPU's kthread to run RT or not, as specified by
+ * the to_rt argument.  The CPU-hotplug locks are held, so the task
+ * is not going away.
+ */
+static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
+{
+       int policy;
+       struct sched_param sp;
+       struct task_struct *t;
+
+       t = per_cpu(rcu_cpu_kthread_task, cpu);
+       if (t == NULL)
+               return;
+       if (to_rt) {
+               policy = SCHED_FIFO;
+               sp.sched_priority = RCU_KTHREAD_PRIO;
+       } else {
+               policy = SCHED_NORMAL;
+               sp.sched_priority = 0;
+       }
+       sched_setscheduler_nocheck(t, policy, &sp);
+}
+
+/*
+ * Timer handler to initiate the waking up of per-CPU kthreads that
+ * have yielded the CPU due to excess numbers of RCU callbacks.
+ * We wake up the per-rcu_node kthread, which in turn will wake up
+ * the booster kthread.
+ */
+static void rcu_cpu_kthread_timer(unsigned long arg)
+{
+       struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
+       struct rcu_node *rnp = rdp->mynode;
+
+       atomic_or(rdp->grpmask, &rnp->wakemask);
+       invoke_rcu_node_kthread(rnp);
+}
+
+/*
+ * Drop to non-real-time priority and yield, but only after posting a
+ * timer that will cause us to regain our real-time priority if we
+ * remain preempted.  Either way, we restore our real-time priority
+ * before returning.
+ */
+static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
+{
+       struct sched_param sp;
+       struct timer_list yield_timer;
+
+       setup_timer_on_stack(&yield_timer, f, arg);
+       mod_timer(&yield_timer, jiffies + 2);
+       sp.sched_priority = 0;
+       sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
+       set_user_nice(current, 19);
+       schedule();
+       sp.sched_priority = RCU_KTHREAD_PRIO;
+       sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
+       del_timer(&yield_timer);
+}
+
+/*
+ * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
+ * This can happen while the corresponding CPU is either coming online
+ * or going offline.  We cannot wait until the CPU is fully online
+ * before starting the kthread, because the various notifier functions
+ * can wait for RCU grace periods.  So we park rcu_cpu_kthread() until
+ * the corresponding CPU is online.
+ *
+ * Return 1 if the kthread needs to stop, 0 otherwise.
+ *
+ * Caller must disable bh.  This function can momentarily enable it.
+ */
+static int rcu_cpu_kthread_should_stop(int cpu)
+{
+       while (cpu_is_offline(cpu) ||
+              !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
+              smp_processor_id() != cpu) {
+               if (kthread_should_stop())
+                       return 1;
+               per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
+               per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
+               local_bh_enable();
+               schedule_timeout_uninterruptible(1);
+               if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
+                       set_cpus_allowed_ptr(current, cpumask_of(cpu));
+               local_bh_disable();
+       }
+       per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
+       return 0;
+}
+
+/*
+ * Per-CPU kernel thread that invokes RCU callbacks.  This replaces the
+ * earlier RCU softirq.
+ */
+static int rcu_cpu_kthread(void *arg)
+{
+       int cpu = (int)(long)arg;
+       unsigned long flags;
+       int spincnt = 0;
+       unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
+       char work;
+       char *workp = &per_cpu(rcu_cpu_has_work, cpu);
+
+       for (;;) {
+               *statusp = RCU_KTHREAD_WAITING;
+               rcu_wait(*workp != 0 || kthread_should_stop());
+               local_bh_disable();
+               if (rcu_cpu_kthread_should_stop(cpu)) {
+                       local_bh_enable();
+                       break;
+               }
+               *statusp = RCU_KTHREAD_RUNNING;
+               per_cpu(rcu_cpu_kthread_loops, cpu)++;
+               local_irq_save(flags);
+               work = *workp;
+               *workp = 0;
+               local_irq_restore(flags);
+               if (work)
+                       rcu_kthread_do_work();
+               local_bh_enable();
+               if (*workp != 0)
+                       spincnt++;
+               else
+                       spincnt = 0;
+               if (spincnt > 10) {
+                       *statusp = RCU_KTHREAD_YIELDING;
+                       rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
+                       spincnt = 0;
+               }
+       }
+       *statusp = RCU_KTHREAD_STOPPED;
+       return 0;
+}
+
+/*
+ * Spawn a per-CPU kthread, setting up affinity and priority.
+ * Because the CPU hotplug lock is held, no other CPU will be attempting
+ * to manipulate rcu_cpu_kthread_task.  There might be another CPU
+ * attempting to access it during boot, but the locking in kthread_bind()
+ * will enforce sufficient ordering.
+ *
+ * Please note that we cannot simply refuse to wake up the per-CPU
+ * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
+ * which can result in softlockup complaints if the task ends up being
+ * idle for more than a couple of minutes.
+ *
+ * However, please note also that we cannot bind the per-CPU kthread to its
+ * CPU until that CPU is fully online.  We also cannot wait until the
+ * CPU is fully online before we create its per-CPU kthread, as this would
+ * deadlock the system when CPU notifiers tried waiting for grace
+ * periods.  So we bind the per-CPU kthread to its CPU only if the CPU
+ * is online.  If its CPU is not yet fully online, then the code in
+ * rcu_cpu_kthread() will wait until it is fully online, and then do
+ * the binding.
+ */
+static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
+{
+       struct sched_param sp;
+       struct task_struct *t;
+
+       if (!rcu_kthreads_spawnable ||
+           per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
+               return 0;
+       t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
+       if (IS_ERR(t))
+               return PTR_ERR(t);
+       if (cpu_online(cpu))
+               kthread_bind(t, cpu);
+       per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
+       WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
+       sp.sched_priority = RCU_KTHREAD_PRIO;
+       sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+       per_cpu(rcu_cpu_kthread_task, cpu) = t;
+       wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */
+       return 0;
+}
+
+/*
+ * Per-rcu_node kthread, which is in charge of waking up the per-CPU
+ * kthreads when needed.  We ignore requests to wake up kthreads
+ * for offline CPUs, which is OK because force_quiescent_state()
+ * takes care of this case.
+ */
+static int rcu_node_kthread(void *arg)
+{
+       int cpu;
+       unsigned long flags;
+       unsigned long mask;
+       struct rcu_node *rnp = (struct rcu_node *)arg;
+       struct sched_param sp;
+       struct task_struct *t;
+
+       for (;;) {
+               rnp->node_kthread_status = RCU_KTHREAD_WAITING;
+               rcu_wait(atomic_read(&rnp->wakemask) != 0);
+               rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
+               raw_spin_lock_irqsave(&rnp->lock, flags);
+               mask = atomic_xchg(&rnp->wakemask, 0);
+               rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
+               for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
+                       if ((mask & 0x1) == 0)
+                               continue;
+                       preempt_disable();
+                       t = per_cpu(rcu_cpu_kthread_task, cpu);
+                       if (!cpu_online(cpu) || t == NULL) {
+                               preempt_enable();
+                               continue;
+                       }
+                       per_cpu(rcu_cpu_has_work, cpu) = 1;
+                       sp.sched_priority = RCU_KTHREAD_PRIO;
+                       sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+                       preempt_enable();
+               }
+       }
+       /* NOTREACHED */
+       rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
+       return 0;
+}
+
+/*
+ * Set the per-rcu_node kthread's affinity to cover all CPUs that are
+ * served by the rcu_node in question.  The CPU hotplug lock is still
+ * held, so the value of rnp->qsmaskinit will be stable.
+ *
+ * We don't include outgoingcpu in the affinity set, use -1 if there is
+ * no outgoing CPU.  If there are no CPUs left in the affinity set,
+ * this function allows the kthread to execute on any CPU.
+ */
+static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
+{
+       cpumask_var_t cm;
+       int cpu;
+       unsigned long mask = rnp->qsmaskinit;
+
+       if (rnp->node_kthread_task == NULL)
+               return;
+       if (!alloc_cpumask_var(&cm, GFP_KERNEL))
+               return;
+       cpumask_clear(cm);
+       for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
+               if ((mask & 0x1) && cpu != outgoingcpu)
+                       cpumask_set_cpu(cpu, cm);
+       if (cpumask_weight(cm) == 0) {
+               cpumask_setall(cm);
+               for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
+                       cpumask_clear_cpu(cpu, cm);
+               WARN_ON_ONCE(cpumask_weight(cm) == 0);
+       }
+       set_cpus_allowed_ptr(rnp->node_kthread_task, cm);
+       rcu_boost_kthread_setaffinity(rnp, cm);
+       free_cpumask_var(cm);
+}
+
+/*
+ * Spawn a per-rcu_node kthread, setting priority and affinity.
+ * Called during boot before online/offline can happen, or, if
+ * during runtime, with the main CPU-hotplug locks held.  So only
+ * one of these can be executing at a time.
+ */
+static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
+                                               struct rcu_node *rnp)
+{
+       unsigned long flags;
+       int rnp_index = rnp - &rsp->node[0];
+       struct sched_param sp;
+       struct task_struct *t;
+
+       if (!rcu_kthreads_spawnable ||
+           rnp->qsmaskinit == 0)
+               return 0;
+       if (rnp->node_kthread_task == NULL) {
+               t = kthread_create(rcu_node_kthread, (void *)rnp,
+                                  "rcun%d", rnp_index);
+               if (IS_ERR(t))
+                       return PTR_ERR(t);
+               raw_spin_lock_irqsave(&rnp->lock, flags);
+               rnp->node_kthread_task = t;
+               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               sp.sched_priority = 99;
+               sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+               wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
+       }
+       return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
+}
+
+/*
+ * Spawn all kthreads -- called as soon as the scheduler is running.
+ */
+static int __init rcu_spawn_kthreads(void)
+{
+       int cpu;
+       struct rcu_node *rnp;
+
+       rcu_kthreads_spawnable = 1;
+       for_each_possible_cpu(cpu) {
+               per_cpu(rcu_cpu_has_work, cpu) = 0;
+               if (cpu_online(cpu))
+                       (void)rcu_spawn_one_cpu_kthread(cpu);
+       }
+       rnp = rcu_get_root(rcu_state);
+       (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
+       if (NUM_RCU_NODES > 1) {
+               rcu_for_each_leaf_node(rcu_state, rnp)
+                       (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
+       }
+       return 0;
+}
+early_initcall(rcu_spawn_kthreads);
+
+static void __cpuinit rcu_prepare_kthreads(int cpu)
+{
+       struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
+       struct rcu_node *rnp = rdp->mynode;
+
+       /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
+       if (rcu_kthreads_spawnable) {
+               (void)rcu_spawn_one_cpu_kthread(cpu);
+               if (rnp->node_kthread_task == NULL)
+                       (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
+       }
 }
 
 #else /* #ifdef CONFIG_RCU_BOOST */
@@ -1315,23 +1701,32 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
 }
 
-static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
-                                         cpumask_var_t cm)
+static void invoke_rcu_callbacks_kthread(void)
 {
+       WARN_ON_ONCE(1);
 }
 
 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
 {
 }
 
-static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
-                                                struct rcu_node *rnp,
-                                                int rnp_index)
+#ifdef CONFIG_HOTPLUG_CPU
+
+static void rcu_stop_cpu_kthread(int cpu)
+{
+}
+
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+
+static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
+{
+}
+
+static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
 {
-       return 0;
 }
 
-static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp)
+static void __cpuinit rcu_prepare_kthreads(int cpu)
 {
 }
 
@@ -1509,7 +1904,7 @@ static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
  *
  * Because it is not legal to invoke rcu_process_callbacks() with irqs
  * disabled, we do one pass of force_quiescent_state(), then do a
- * invoke_rcu_cpu_kthread() to cause rcu_process_callbacks() to be invoked
+ * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
  * later.  The per-cpu rcu_dyntick_drain variable controls the sequencing.
  */
 int rcu_needs_cpu(int cpu)
@@ -1560,7 +1955,7 @@ int rcu_needs_cpu(int cpu)
 
        /* If RCU callbacks are still pending, RCU still needs this CPU. */
        if (c)
-               invoke_rcu_cpu_kthread();
+               invoke_rcu_core();
        return c;
 }
 
index 9678cc3650f5e9c8f4e3eb409d26e9c4e784e7d5..4e144876dc68208b89931518c2d64d1433def061 100644 (file)
@@ -46,6 +46,8 @@
 #define RCU_TREE_NONCORE
 #include "rcutree.h"
 
+#ifdef CONFIG_RCU_BOOST
+
 DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
 DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_cpu);
 DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
@@ -58,6 +60,8 @@ static char convert_kthread_status(unsigned int kthread_status)
        return "SRWOY"[kthread_status];
 }
 
+#endif /* #ifdef CONFIG_RCU_BOOST */
+
 static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
 {
        if (!rdp->beenonline)
@@ -76,7 +80,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
                   rdp->dynticks_fqs);
 #endif /* #ifdef CONFIG_NO_HZ */
        seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi);
-       seq_printf(m, " ql=%ld qs=%c%c%c%c kt=%d/%c/%d ktl=%x b=%ld",
+       seq_printf(m, " ql=%ld qs=%c%c%c%c",
                   rdp->qlen,
                   ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
                        rdp->nxttail[RCU_NEXT_TAIL]],
@@ -84,13 +88,16 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
                        rdp->nxttail[RCU_NEXT_READY_TAIL]],
                   ".W"[rdp->nxttail[RCU_DONE_TAIL] !=
                        rdp->nxttail[RCU_WAIT_TAIL]],
-                  ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]],
+                  ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]);
+#ifdef CONFIG_RCU_BOOST
+       seq_printf(m, " kt=%d/%c/%d ktl=%x",
                   per_cpu(rcu_cpu_has_work, rdp->cpu),
                   convert_kthread_status(per_cpu(rcu_cpu_kthread_status,
                                          rdp->cpu)),
                   per_cpu(rcu_cpu_kthread_cpu, rdp->cpu),
-                  per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff,
-                  rdp->blimit);
+                  per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff);
+#endif /* #ifdef CONFIG_RCU_BOOST */
+       seq_printf(m, " b=%ld", rdp->blimit);
        seq_printf(m, " ci=%lu co=%lu ca=%lu\n",
                   rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
 }
@@ -147,18 +154,21 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
                   rdp->dynticks_fqs);
 #endif /* #ifdef CONFIG_NO_HZ */
        seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi);
-       seq_printf(m, ",%ld,\"%c%c%c%c\",%d,\"%c\",%ld", rdp->qlen,
+       seq_printf(m, ",%ld,\"%c%c%c%c\"", rdp->qlen,
                   ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
                        rdp->nxttail[RCU_NEXT_TAIL]],
                   ".R"[rdp->nxttail[RCU_WAIT_TAIL] !=
                        rdp->nxttail[RCU_NEXT_READY_TAIL]],
                   ".W"[rdp->nxttail[RCU_DONE_TAIL] !=
                        rdp->nxttail[RCU_WAIT_TAIL]],
-                  ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]],
+                  ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]);
+#ifdef CONFIG_RCU_BOOST
+       seq_printf(m, ",%d,\"%c\"",
                   per_cpu(rcu_cpu_has_work, rdp->cpu),
                   convert_kthread_status(per_cpu(rcu_cpu_kthread_status,
-                                         rdp->cpu)),
-                  rdp->blimit);
+                                         rdp->cpu)));
+#endif /* #ifdef CONFIG_RCU_BOOST */
+       seq_printf(m, ",%ld", rdp->blimit);
        seq_printf(m, ",%lu,%lu,%lu\n",
                   rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
 }
@@ -169,7 +179,11 @@ static int show_rcudata_csv(struct seq_file *m, void *unused)
 #ifdef CONFIG_NO_HZ
        seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\",");
 #endif /* #ifdef CONFIG_NO_HZ */
-       seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\",\"ci\",\"co\",\"ca\"\n");
+       seq_puts(m, "\"of\",\"ri\",\"ql\",\"qs\"");
+#ifdef CONFIG_RCU_BOOST
+       seq_puts(m, "\"kt\",\"ktl\"");
+#endif /* #ifdef CONFIG_RCU_BOOST */
+       seq_puts(m, ",\"b\",\"ci\",\"co\",\"ca\"\n");
 #ifdef CONFIG_TREE_PREEMPT_RCU
        seq_puts(m, "\"rcu_preempt:\"\n");
        PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m);
index cbb3a0eee58eb2c5c6748b949fca5579bbb57432..3f2e502d609bb7957e10b4ec6aa647c9b3069c6e 100644 (file)
@@ -605,10 +605,10 @@ static inline int cpu_of(struct rq *rq)
 /*
  * Return the group to which this tasks belongs.
  *
- * We use task_subsys_state_check() and extend the RCU verification
- * with lockdep_is_held(&p->pi_lock) because cpu_cgroup_attach()
- * holds that lock for each task it moves into the cgroup. Therefore
- * by holding that lock, we pin the task to the current cgroup.
+ * We use task_subsys_state_check() and extend the RCU verification with
+ * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
+ * task it moves into the cgroup. Therefore by holding either of those locks,
+ * we pin the task to the current cgroup.
  */
 static inline struct task_group *task_group(struct task_struct *p)
 {
@@ -616,7 +616,8 @@ static inline struct task_group *task_group(struct task_struct *p)
        struct cgroup_subsys_state *css;
 
        css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
-                       lockdep_is_held(&p->pi_lock));
+                       lockdep_is_held(&p->pi_lock) ||
+                       lockdep_is_held(&task_rq(p)->lock));
        tg = container_of(css, struct task_group, css);
 
        return autogroup_task_group(p, tg);
@@ -2200,6 +2201,16 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
                        !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
 
 #ifdef CONFIG_LOCKDEP
+       /*
+        * The caller should hold either p->pi_lock or rq->lock, when changing
+        * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
+        *
+        * sched_move_task() holds both and thus holding either pins the cgroup,
+        * see set_task_rq().
+        *
+        * Furthermore, all task_rq users should acquire both locks, see
+        * task_rq_lock().
+        */
        WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
                                      lockdep_is_held(&task_rq(p)->lock)));
 #endif
@@ -2447,6 +2458,10 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
                }
                rcu_read_unlock();
        }
+
+       if (wake_flags & WF_MIGRATED)
+               schedstat_inc(p, se.statistics.nr_wakeups_migrate);
+
 #endif /* CONFIG_SMP */
 
        schedstat_inc(rq, ttwu_count);
@@ -2455,9 +2470,6 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
        if (wake_flags & WF_SYNC)
                schedstat_inc(p, se.statistics.nr_wakeups_sync);
 
-       if (cpu != task_cpu(p))
-               schedstat_inc(p, se.statistics.nr_wakeups_migrate);
-
 #endif /* CONFIG_SCHEDSTATS */
 }
 
@@ -2600,6 +2612,7 @@ static void ttwu_queue(struct task_struct *p, int cpu)
 
 #if defined(CONFIG_SMP)
        if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) {
+               sched_clock_cpu(cpu); /* sync clocks x-cpu */
                ttwu_queue_remote(p, cpu);
                return;
        }
@@ -2674,8 +2687,10 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
                p->sched_class->task_waking(p);
 
        cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
-       if (task_cpu(p) != cpu)
+       if (task_cpu(p) != cpu) {
+               wake_flags |= WF_MIGRATED;
                set_task_cpu(p, cpu);
+       }
 #endif /* CONFIG_SMP */
 
        ttwu_queue(p, cpu);
index 88725c939e0b8000253905332db8725507781f48..10d018212bab8b1ad2f53e1ed388a4bbd56d4656 100644 (file)
@@ -1096,7 +1096,7 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flag
         * to move current somewhere else, making room for our non-migratable
         * task.
         */
-       if (p->prio == rq->curr->prio && !need_resched())
+       if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
                check_preempt_equal_prio(rq, p);
 #endif
 }
@@ -1239,6 +1239,10 @@ static int find_lowest_rq(struct task_struct *task)
        int this_cpu = smp_processor_id();
        int cpu      = task_cpu(task);
 
+       /* Make sure the mask is initialized first */
+       if (unlikely(!lowest_mask))
+               return -1;
+
        if (task->rt.nr_cpus_allowed == 1)
                return -1; /* No other targets possible */
 
index 86c32b884f8efbb8f071b6343b3c37667b4e8fa3..ff7678603328b3ba5e00c74dc9fd08bcbf113987 100644 (file)
@@ -2365,7 +2365,7 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
 /**
  *  sys_rt_sigprocmask - change the list of currently blocked signals
  *  @how: whether to add, remove, or set signals
- *  @set: stores pending signals
+ *  @nset: stores pending signals
  *  @oset: previous value of signal mask if non-null
  *  @sigsetsize: size of sigset_t type
  */
index 73a1951935581f48915d77e8409a3049b78de8df..fb67dfa8394edc70174a51fd93b72965a3929b71 100644 (file)
@@ -74,7 +74,7 @@ static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
        .notifier_call          = hotplug_cfd,
 };
 
-static int __cpuinit init_call_single_data(void)
+void __init call_function_init(void)
 {
        void *cpu = (void *)(long)smp_processor_id();
        int i;
@@ -88,10 +88,7 @@ static int __cpuinit init_call_single_data(void)
 
        hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
        register_cpu_notifier(&hotplug_cfd_notifier);
-
-       return 0;
 }
-early_initcall(init_call_single_data);
 
 /*
  * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
index 13960170cad4d91f0ec00dba8e0132d64e7c68fd..40cf63ddd4b3d740d2620ddbf1fa245830b1d703 100644 (file)
@@ -58,7 +58,7 @@ DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
 
 char *softirq_to_name[NR_SOFTIRQS] = {
        "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
-       "TASKLET", "SCHED", "HRTIMER"
+       "TASKLET", "SCHED", "HRTIMER", "RCU"
 };
 
 /*
index c027d4f602f18276f73694a593cefb6ea77bc3a0..e4c699dfa4e8776ee3ed9e671922e962002b6a06 100644 (file)
@@ -182,7 +182,10 @@ void clockevents_register_device(struct clock_event_device *dev)
        unsigned long flags;
 
        BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
-       BUG_ON(!dev->cpumask);
+       if (!dev->cpumask) {
+               WARN_ON(num_possible_cpus() > 1);
+               dev->cpumask = cpumask_of(smp_processor_id());
+       }
 
        raw_spin_lock_irqsave(&clockevents_lock, flags);
 
index 1c95fd677328e4fe3d0e582fc62284d9ec150a68..e0980f0d9a0ad2d559b98c12f26317b55044cad1 100644 (file)
@@ -185,7 +185,6 @@ static struct clocksource *watchdog;
 static struct timer_list watchdog_timer;
 static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
 static DEFINE_SPINLOCK(watchdog_lock);
-static cycle_t watchdog_last;
 static int watchdog_running;
 
 static int clocksource_watchdog_kthread(void *data);
@@ -254,11 +253,6 @@ static void clocksource_watchdog(unsigned long data)
        if (!watchdog_running)
                goto out;
 
-       wdnow = watchdog->read(watchdog);
-       wd_nsec = clocksource_cyc2ns((wdnow - watchdog_last) & watchdog->mask,
-                                    watchdog->mult, watchdog->shift);
-       watchdog_last = wdnow;
-
        list_for_each_entry(cs, &watchdog_list, wd_list) {
 
                /* Clocksource already marked unstable? */
@@ -268,19 +262,28 @@ static void clocksource_watchdog(unsigned long data)
                        continue;
                }
 
+               local_irq_disable();
                csnow = cs->read(cs);
+               wdnow = watchdog->read(watchdog);
+               local_irq_enable();
 
                /* Clocksource initialized ? */
                if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
                        cs->flags |= CLOCK_SOURCE_WATCHDOG;
-                       cs->wd_last = csnow;
+                       cs->wd_last = wdnow;
+                       cs->cs_last = csnow;
                        continue;
                }
 
-               /* Check the deviation from the watchdog clocksource. */
-               cs_nsec = clocksource_cyc2ns((csnow - cs->wd_last) &
+               wd_nsec = clocksource_cyc2ns((wdnow - cs->wd_last) & watchdog->mask,
+                                            watchdog->mult, watchdog->shift);
+
+               cs_nsec = clocksource_cyc2ns((csnow - cs->cs_last) &
                                             cs->mask, cs->mult, cs->shift);
-               cs->wd_last = csnow;
+               cs->cs_last = csnow;
+               cs->wd_last = wdnow;
+
+               /* Check the deviation from the watchdog clocksource. */
                if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
                        clocksource_unstable(cs, cs_nsec - wd_nsec);
                        continue;
@@ -318,7 +321,6 @@ static inline void clocksource_start_watchdog(void)
                return;
        init_timer(&watchdog_timer);
        watchdog_timer.function = clocksource_watchdog;
-       watchdog_last = watchdog->read(watchdog);
        watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
        add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
        watchdog_running = 1;
index fd6198692b57b16e47132d1eb5183bafd98b050e..8cff36119e4d50f7336a06e8d01e59e8b162e694 100644 (file)
@@ -749,16 +749,15 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
        unsigned long expires_limit, mask;
        int bit;
 
-       expires_limit = expires;
-
        if (timer->slack >= 0) {
                expires_limit = expires + timer->slack;
        } else {
-               unsigned long now = jiffies;
+               long delta = expires - jiffies;
+
+               if (delta < 256)
+                       return expires;
 
-               /* No slack, if already expired else auto slack 0.4% */
-               if (time_after(expires, now))
-                       expires_limit = expires + (expires - now)/256;
+               expires_limit = expires + delta / 256;
        }
        mask = expires ^ expires_limit;
        if (mask == 0)
@@ -795,6 +794,8 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
  */
 int mod_timer(struct timer_list *timer, unsigned long expires)
 {
+       expires = apply_slack(timer, expires);
+
        /*
         * This is a common optimization triggered by the
         * networking code - if the timer is re-modified
@@ -803,8 +804,6 @@ int mod_timer(struct timer_list *timer, unsigned long expires)
        if (timer_pending(timer) && timer->expires == expires)
                return 1;
 
-       expires = apply_slack(timer, expires);
-
        return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
 }
 EXPORT_SYMBOL(mod_timer);
index 28afa4c5333c121e8a9feac719892fb17e5ef9a0..dd373c8ee94399efad67ad5bc3adf57e51cf4a3d 100644 (file)
@@ -697,7 +697,7 @@ config DEBUG_BUGVERBOSE
        bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT
        depends on BUG
        depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \
-                  FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300
+                  FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 || TILE
        default y
        help
          Say Y here to make BUG() panics output the file name and line number
index 41baf02924e64cc926ef3b2560a8f02c76c1bbf0..3f3b68199d744bd4fc9b94dcb9ca6e831502b0f2 100644 (file)
@@ -572,7 +572,7 @@ EXPORT_SYMBOL(bitmap_scnlistprintf);
 
 /**
  * __bitmap_parselist - convert list format ASCII string to bitmap
- * @bp: read nul-terminated user string from this buffer
+ * @buf: read nul-terminated user string from this buffer
  * @buflen: buffer size in bytes.  If string is smaller than this
  *    then it must be terminated with a \0.
  * @is_user: location of buffer, 0 indicates kernel space
index 82dc34c095c25ea0e14619d68bda9d3942836bd5..640bd98a4c8aa59fd3f597da6f0706287de5609c 100644 (file)
@@ -948,14 +948,14 @@ const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj)
 }
 
 
-const void *kobj_ns_current(enum kobj_ns_type type)
+void *kobj_ns_grab_current(enum kobj_ns_type type)
 {
-       const void *ns = NULL;
+       void *ns = NULL;
 
        spin_lock(&kobj_ns_type_lock);
        if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
            kobj_ns_ops_tbl[type])
-               ns = kobj_ns_ops_tbl[type]->current_ns();
+               ns = kobj_ns_ops_tbl[type]->grab_current_ns();
        spin_unlock(&kobj_ns_type_lock);
 
        return ns;
@@ -987,23 +987,15 @@ const void *kobj_ns_initial(enum kobj_ns_type type)
        return ns;
 }
 
-/*
- * kobj_ns_exit - invalidate a namespace tag
- *
- * @type: the namespace type (i.e. KOBJ_NS_TYPE_NET)
- * @ns: the actual namespace being invalidated
- *
- * This is called when a tag is no longer valid.  For instance,
- * when a network namespace exits, it uses this helper to
- * make sure no sb's sysfs_info points to the now-invalidated
- * netns.
- */
-void kobj_ns_exit(enum kobj_ns_type type, const void *ns)
+void kobj_ns_drop(enum kobj_ns_type type, void *ns)
 {
-       sysfs_exit_ns(type, ns);
+       spin_lock(&kobj_ns_type_lock);
+       if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
+           kobj_ns_ops_tbl[type] && kobj_ns_ops_tbl[type]->drop_ns)
+               kobj_ns_ops_tbl[type]->drop_ns(ns);
+       spin_unlock(&kobj_ns_type_lock);
 }
 
-
 EXPORT_SYMBOL(kobject_get);
 EXPORT_SYMBOL(kobject_put);
 EXPORT_SYMBOL(kobject_del);
index 93ca08b8a451411c3c4ac18f1f9525637e4a7140..99093b396145957d52b5e88a183ee1eb9cedeb60 100644 (file)
@@ -110,6 +110,11 @@ setup_io_tlb_npages(char *str)
 __setup("swiotlb=", setup_io_tlb_npages);
 /* make io_tlb_overflow tunable too? */
 
+unsigned long swioltb_nr_tbl(void)
+{
+       return io_tlb_nslabs;
+}
+
 /* Note that this doesn't work with highmem page */
 static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
                                      volatile void *address)
index c11205688fb4d4ff070409557b633cf05c2d7c07..4365df31a1d52122cd670d5ad2fe321fc887337f 100644 (file)
@@ -666,6 +666,8 @@ char *ip6_compressed_string(char *p, const char *addr)
                        colonpos = i;
                }
        }
+       if (longest == 1)               /* don't compress a single 0 */
+               colonpos = -1;
 
        /* emit address */
        for (i = 0; i < range; i++) {
@@ -826,7 +828,7 @@ int kptr_restrict __read_mostly;
  *       IPv4 uses dot-separated decimal with leading 0's (010.123.045.006)
  * - '[Ii]4[hnbl]' IPv4 addresses in host, network, big or little endian order
  * - 'I6c' for IPv6 addresses printed as specified by
- *       http://tools.ietf.org/html/draft-ietf-6man-text-addr-representation-00
+ *       http://tools.ietf.org/html/rfc5952
  * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form
  *       "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
  *       Options for %pU are:
index 021a2960ef9e18d061c972590f7651a35e7652ce..6cc604bd56496e2742e371d63234af08f9f0859b 100644 (file)
@@ -144,9 +144,20 @@ static void isolate_freepages(struct zone *zone,
        int nr_freepages = cc->nr_freepages;
        struct list_head *freelist = &cc->freepages;
 
+       /*
+        * Initialise the free scanner. The starting point is where we last
+        * scanned from (or the end of the zone if starting). The low point
+        * is the end of the pageblock the migration scanner is using.
+        */
        pfn = cc->free_pfn;
        low_pfn = cc->migrate_pfn + pageblock_nr_pages;
-       high_pfn = low_pfn;
+
+       /*
+        * Take care that if the migration scanner is at the end of the zone
+        * that the free scanner does not accidentally move to the next zone
+        * in the next isolation cycle.
+        */
+       high_pfn = min(low_pfn, pfn);
 
        /*
         * Isolate free pages until enough are available to migrate the
@@ -240,11 +251,18 @@ static bool too_many_isolated(struct zone *zone)
        return isolated > (inactive + active) / 2;
 }
 
+/* possible outcome of isolate_migratepages */
+typedef enum {
+       ISOLATE_ABORT,          /* Abort compaction now */
+       ISOLATE_NONE,           /* No pages isolated, continue scanning */
+       ISOLATE_SUCCESS,        /* Pages isolated, migrate */
+} isolate_migrate_t;
+
 /*
  * Isolate all pages that can be migrated from the block pointed to by
  * the migrate scanner within compact_control.
  */
-static unsigned long isolate_migratepages(struct zone *zone,
+static isolate_migrate_t isolate_migratepages(struct zone *zone,
                                        struct compact_control *cc)
 {
        unsigned long low_pfn, end_pfn;
@@ -261,7 +279,7 @@ static unsigned long isolate_migratepages(struct zone *zone,
        /* Do not cross the free scanner or scan within a memory hole */
        if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
                cc->migrate_pfn = end_pfn;
-               return 0;
+               return ISOLATE_NONE;
        }
 
        /*
@@ -270,10 +288,14 @@ static unsigned long isolate_migratepages(struct zone *zone,
         * delay for some time until fewer pages are isolated
         */
        while (unlikely(too_many_isolated(zone))) {
+               /* async migration should just abort */
+               if (!cc->sync)
+                       return ISOLATE_ABORT;
+
                congestion_wait(BLK_RW_ASYNC, HZ/10);
 
                if (fatal_signal_pending(current))
-                       return 0;
+                       return ISOLATE_ABORT;
        }
 
        /* Time to isolate some pages for migration */
@@ -358,7 +380,7 @@ static unsigned long isolate_migratepages(struct zone *zone,
 
        trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
 
-       return cc->nr_migratepages;
+       return ISOLATE_SUCCESS;
 }
 
 /*
@@ -420,13 +442,6 @@ static int compact_finished(struct zone *zone,
        if (cc->free_pfn <= cc->migrate_pfn)
                return COMPACT_COMPLETE;
 
-       /* Compaction run is not finished if the watermark is not met */
-       watermark = low_wmark_pages(zone);
-       watermark += (1 << cc->order);
-
-       if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
-               return COMPACT_CONTINUE;
-
        /*
         * order == -1 is expected when compacting via
         * /proc/sys/vm/compact_memory
@@ -434,6 +449,13 @@ static int compact_finished(struct zone *zone,
        if (cc->order == -1)
                return COMPACT_CONTINUE;
 
+       /* Compaction run is not finished if the watermark is not met */
+       watermark = low_wmark_pages(zone);
+       watermark += (1 << cc->order);
+
+       if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
+               return COMPACT_CONTINUE;
+
        /* Direct compactor: Is a suitable page free? */
        for (order = cc->order; order < MAX_ORDER; order++) {
                /* Job done if page is free of the right migratetype */
@@ -460,6 +482,13 @@ unsigned long compaction_suitable(struct zone *zone, int order)
        int fragindex;
        unsigned long watermark;
 
+       /*
+        * order == -1 is expected when compacting via
+        * /proc/sys/vm/compact_memory
+        */
+       if (order == -1)
+               return COMPACT_CONTINUE;
+
        /*
         * Watermarks for order-0 must be met for compaction. Note the 2UL.
         * This is because during migration, copies of pages need to be
@@ -469,18 +498,12 @@ unsigned long compaction_suitable(struct zone *zone, int order)
        if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
                return COMPACT_SKIPPED;
 
-       /*
-        * order == -1 is expected when compacting via
-        * /proc/sys/vm/compact_memory
-        */
-       if (order == -1)
-               return COMPACT_CONTINUE;
-
        /*
         * fragmentation index determines if allocation failures are due to
         * low memory or external fragmentation
         *
-        * index of -1 implies allocations might succeed dependingon watermarks
+        * index of -1000 implies allocations might succeed depending on
+        * watermarks
         * index towards 0 implies failure is due to lack of memory
         * index towards 1000 implies failure is due to fragmentation
         *
@@ -490,7 +513,8 @@ unsigned long compaction_suitable(struct zone *zone, int order)
        if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
                return COMPACT_SKIPPED;
 
-       if (fragindex == -1 && zone_watermark_ok(zone, order, watermark, 0, 0))
+       if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark,
+           0, 0))
                return COMPACT_PARTIAL;
 
        return COMPACT_CONTINUE;
@@ -522,8 +546,15 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
                unsigned long nr_migrate, nr_remaining;
                int err;
 
-               if (!isolate_migratepages(zone, cc))
+               switch (isolate_migratepages(zone, cc)) {
+               case ISOLATE_ABORT:
+                       ret = COMPACT_PARTIAL;
+                       goto out;
+               case ISOLATE_NONE:
                        continue;
+               case ISOLATE_SUCCESS:
+                       ;
+               }
 
                nr_migrate = cc->nr_migratepages;
                err = migrate_pages(&cc->migratepages, compaction_alloc,
@@ -547,6 +578,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
 
        }
 
+out:
        /* Release free pages and check accounting */
        cc->nr_freepages -= release_freepages(&cc->freepages);
        VM_BUG_ON(cc->nr_freepages != 0);
index d7b10578a64ba39b2862464f71d6439609c6fa22..a8251a8d3457d28802164d6538c9226e98416f73 100644 (file)
@@ -2000,7 +2000,7 @@ int file_remove_suid(struct file *file)
                error = security_inode_killpriv(dentry);
        if (!error && killsuid)
                error = __remove_suid(dentry, killsuid);
-       if (!error)
+       if (!error && (inode->i_sb->s_flags & MS_NOSEC))
                inode->i_flags |= S_NOSEC;
 
        return error;
index 615d9743a3cbad1ca19d745c45115eecb1c463f4..81532f297fd22cd11e1c7a4161c8e320a7015ec2 100644 (file)
@@ -2234,11 +2234,8 @@ static void khugepaged_loop(void)
        while (likely(khugepaged_enabled())) {
 #ifndef CONFIG_NUMA
                hpage = khugepaged_alloc_hugepage();
-               if (unlikely(!hpage)) {
-                       count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
+               if (unlikely(!hpage))
                        break;
-               }
-               count_vm_event(THP_COLLAPSE_ALLOC);
 #else
                if (IS_ERR(hpage)) {
                        khugepaged_alloc_sleep();
index f33bb319b73f093ce38e828a265b24d9c18e0525..bfcf153bc82907f31ccc1921256622a82bf20d2f 100644 (file)
@@ -1033,10 +1033,10 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
         */
        chg = vma_needs_reservation(h, vma, addr);
        if (chg < 0)
-               return ERR_PTR(chg);
+               return ERR_PTR(-VM_FAULT_OOM);
        if (chg)
                if (hugetlb_get_quota(inode->i_mapping, chg))
-                       return ERR_PTR(-ENOSPC);
+                       return ERR_PTR(-VM_FAULT_SIGBUS);
 
        spin_lock(&hugetlb_lock);
        page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
@@ -1111,6 +1111,14 @@ static void __init gather_bootmem_prealloc(void)
                WARN_ON(page_count(page) != 1);
                prep_compound_huge_page(page, h->order);
                prep_new_huge_page(h, page, page_to_nid(page));
+               /*
+                * If we had gigantic hugepages allocated at boot time, we need
+                * to restore the 'stolen' pages to totalram_pages in order to
+                * fix confusing memory reports from free(1) and another
+                * side-effects, like CommitLimit going negative.
+                */
+               if (h->order > (MAX_ORDER - 1))
+                       totalram_pages += 1 << h->order;
        }
 }
 
index d708b3ef2260282a3d6e5784a60c1fb003339f8e..9a68b0cf0a1c4c8009ee25d2990530d7e2927132 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1302,6 +1302,12 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page)
                slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list);
                ksm_scan.mm_slot = slot;
                spin_unlock(&ksm_mmlist_lock);
+               /*
+                * Although we tested list_empty() above, a racing __ksm_exit
+                * of the last mm on the list may have removed it since then.
+                */
+               if (slot == &ksm_mm_head)
+                       return NULL;
 next_mm:
                ksm_scan.address = 0;
                ksm_scan.rmap_list = &slot->rmap_list;
index bd9052a5d3ad74aa2db023c953f0b498e5ab7dd0..cf7d027a8844b115bcc6d213264707d220c6c3d9 100644 (file)
@@ -359,7 +359,7 @@ enum charge_type {
 static void mem_cgroup_get(struct mem_cgroup *mem);
 static void mem_cgroup_put(struct mem_cgroup *mem);
 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
-static void drain_all_stock_async(void);
+static void drain_all_stock_async(struct mem_cgroup *mem);
 
 static struct mem_cgroup_per_zone *
 mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
@@ -735,7 +735,7 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
                                struct mem_cgroup, css);
 }
 
-static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
+struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
 {
        struct mem_cgroup *mem = NULL;
 
@@ -1663,15 +1663,21 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
        excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT;
 
        /* If memsw_is_minimum==1, swap-out is of-no-use. */
-       if (root_mem->memsw_is_minimum)
+       if (!check_soft && root_mem->memsw_is_minimum)
                noswap = true;
 
        while (1) {
                victim = mem_cgroup_select_victim(root_mem);
                if (victim == root_mem) {
                        loop++;
-                       if (loop >= 1)
-                               drain_all_stock_async();
+                       /*
+                        * We are not draining per cpu cached charges during
+                        * soft limit reclaim  because global reclaim doesn't
+                        * care about charges. It tries to free some memory and
+                        * charges will not give any.
+                        */
+                       if (!check_soft && loop >= 1)
+                               drain_all_stock_async(root_mem);
                        if (loop >= 2) {
                                /*
                                 * If we have not been able to reclaim
@@ -1934,9 +1940,11 @@ struct memcg_stock_pcp {
        struct mem_cgroup *cached; /* this never be root cgroup */
        unsigned int nr_pages;
        struct work_struct work;
+       unsigned long flags;
+#define FLUSHING_CACHED_CHARGE (0)
 };
 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
-static atomic_t memcg_drain_count;
+static DEFINE_MUTEX(percpu_charge_mutex);
 
 /*
  * Try to consume stocked charge on this cpu. If success, one page is consumed
@@ -1984,6 +1992,7 @@ static void drain_local_stock(struct work_struct *dummy)
 {
        struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
        drain_stock(stock);
+       clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
 }
 
 /*
@@ -2008,26 +2017,45 @@ static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages)
  * expects some charges will be back to res_counter later but cannot wait for
  * it.
  */
-static void drain_all_stock_async(void)
+static void drain_all_stock_async(struct mem_cgroup *root_mem)
 {
-       int cpu;
-       /* This function is for scheduling "drain" in asynchronous way.
-        * The result of "drain" is not directly handled by callers. Then,
-        * if someone is calling drain, we don't have to call drain more.
-        * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if
-        * there is a race. We just do loose check here.
+       int cpu, curcpu;
+       /*
+        * If someone calls draining, avoid adding more kworker runs.
         */
-       if (atomic_read(&memcg_drain_count))
+       if (!mutex_trylock(&percpu_charge_mutex))
                return;
        /* Notify other cpus that system-wide "drain" is running */
-       atomic_inc(&memcg_drain_count);
        get_online_cpus();
+       /*
+        * Get a hint for avoiding draining charges on the current cpu,
+        * which must be exhausted by our charging.  It is not required that
+        * this be a precise check, so we use raw_smp_processor_id() instead of
+        * getcpu()/putcpu().
+        */
+       curcpu = raw_smp_processor_id();
        for_each_online_cpu(cpu) {
                struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
-               schedule_work_on(cpu, &stock->work);
+               struct mem_cgroup *mem;
+
+               if (cpu == curcpu)
+                       continue;
+
+               mem = stock->cached;
+               if (!mem)
+                       continue;
+               if (mem != root_mem) {
+                       if (!root_mem->use_hierarchy)
+                               continue;
+                       /* check whether "mem" is under tree of "root_mem" */
+                       if (!css_is_ancestor(&mem->css, &root_mem->css))
+                               continue;
+               }
+               if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
+                       schedule_work_on(cpu, &stock->work);
        }
        put_online_cpus();
-       atomic_dec(&memcg_drain_count);
+       mutex_unlock(&percpu_charge_mutex);
        /* We don't wait for flush_work */
 }
 
@@ -2035,9 +2063,9 @@ static void drain_all_stock_async(void)
 static void drain_all_stock_sync(void)
 {
        /* called when force_empty is called */
-       atomic_inc(&memcg_drain_count);
+       mutex_lock(&percpu_charge_mutex);
        schedule_on_each_cpu(drain_local_stock);
-       atomic_dec(&memcg_drain_count);
+       mutex_unlock(&percpu_charge_mutex);
 }
 
 /*
@@ -4640,6 +4668,7 @@ static struct cftype mem_cgroup_files[] = {
        {
                .name = "numa_stat",
                .open = mem_control_numa_stat_open,
+               .mode = S_IRUGO,
        },
 #endif
 };
@@ -5414,18 +5443,16 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
                                struct cgroup *old_cont,
                                struct task_struct *p)
 {
-       struct mm_struct *mm;
+       struct mm_struct *mm = get_task_mm(p);
 
-       if (!mc.to)
-               /* no need to move charge */
-               return;
-
-       mm = get_task_mm(p);
        if (mm) {
-               mem_cgroup_move_charge(mm);
+               if (mc.to)
+                       mem_cgroup_move_charge(mm);
+               put_swap_token(mm);
                mmput(mm);
        }
-       mem_cgroup_clear_mc();
+       if (mc.to)
+               mem_cgroup_clear_mc();
 }
 #else  /* !CONFIG_MMU */
 static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
index 5c8f7e08928d5faba209a0392b4b2befe17a67e3..eac0ba5614912e57c7b83b22382bc1165138ef2a 100644 (file)
@@ -52,6 +52,7 @@
 #include <linux/swapops.h>
 #include <linux/hugetlb.h>
 #include <linux/memory_hotplug.h>
+#include <linux/mm_inline.h>
 #include "internal.h"
 
 int sysctl_memory_failure_early_kill __read_mostly = 0;
@@ -1468,7 +1469,8 @@ int soft_offline_page(struct page *page, int flags)
        put_page(page);
        if (!ret) {
                LIST_HEAD(pagelist);
-
+               inc_zone_page_state(page, NR_ISOLATED_ANON +
+                                           page_is_file_cache(page));
                list_add(&page->lru, &pagelist);
                ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
                                                                0, true);
index 6953d3926e01e8ad1370304064ec2a0e94ad1ebb..87d935333f0dda3d477c5cacd1219b78e7b910e7 100644 (file)
@@ -1112,11 +1112,13 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
        int force_flush = 0;
        int rss[NR_MM_COUNTERS];
        spinlock_t *ptl;
+       pte_t *start_pte;
        pte_t *pte;
 
 again:
        init_rss_vec(rss);
-       pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+       start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+       pte = start_pte;
        arch_enter_lazy_mmu_mode();
        do {
                pte_t ptent = *pte;
@@ -1196,7 +1198,7 @@ again:
 
        add_mm_rss_vec(mm, rss);
        arch_leave_lazy_mmu_mode();
-       pte_unmap_unlock(pte - 1, ptl);
+       pte_unmap_unlock(start_pte, ptl);
 
        /*
         * mmu_gather ran out of room to batch pages, we break out of
@@ -1296,7 +1298,7 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
 
 /**
  * unmap_vmas - unmap a range of memory covered by a list of vma's
- * @tlbp: address of the caller's struct mmu_gather
+ * @tlb: address of the caller's struct mmu_gather
  * @vma: the starting vma
  * @start_addr: virtual address at which to start unmapping
  * @end_addr: virtual address at which to end unmapping
index 9f646374e32f5ec7704b86ad653c2794037b830a..02159c755136ea0463f57a2a951f9cad223aceb1 100644 (file)
@@ -494,6 +494,12 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
        /* init node's zones as empty zones, we don't have any present pages.*/
        free_area_init_node(nid, zones_size, start_pfn, zholes_size);
 
+       /*
+        * The node we allocated has no zone fallback lists. For avoiding
+        * to access not-initialized zonelist, build here.
+        */
+       build_all_zonelists(NULL);
+
        return pgdat;
 }
 
index e4a5c912983df5352d701bce82fb691160c2e657..666e4e677414e6d790de715761e395116441e6c4 100644 (file)
@@ -288,7 +288,7 @@ static int migrate_page_move_mapping(struct address_space *mapping,
         */
        __dec_zone_page_state(page, NR_FILE_PAGES);
        __inc_zone_page_state(newpage, NR_FILE_PAGES);
-       if (PageSwapBacked(page)) {
+       if (!PageSwapCache(page) && PageSwapBacked(page)) {
                __dec_zone_page_state(page, NR_SHMEM);
                __inc_zone_page_state(newpage, NR_SHMEM);
        }
index bbdc9af5e1177108894d881126761fa71ef9cada..d49736ff8a8dad10420a4f2f76ca89da52c7da89 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -906,14 +906,7 @@ struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
        if (anon_vma)
                return anon_vma;
 try_prev:
-       /*
-        * It is potentially slow to have to call find_vma_prev here.
-        * But it's only on the first write fault on the vma, not
-        * every time, and we could devise a way to avoid it later
-        * (e.g. stash info in next's anon_vma_node when assigning
-        * an anon_vma, or when trying vma_merge).  Another time.
-        */
-       BUG_ON(find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma);
+       near = vma->vm_prev;
        if (!near)
                goto none;
 
@@ -2044,9 +2037,10 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
                return -EINVAL;
 
        /* Find the first overlapping VMA */
-       vma = find_vma_prev(mm, start, &prev);
+       vma = find_vma(mm, start);
        if (!vma)
                return 0;
+       prev = vma->vm_prev;
        /* we have  start < vma->vm_end  */
 
        /* if it doesn't overlap, we have nothing.. */
index 74ccff61d1bea8d6e258170280ea5b20523dad23..53bffc6c293eb4fb7aa2a00be5a8712946dab5c9 100644 (file)
@@ -162,13 +162,13 @@ static void free_page_cgroup(void *addr)
 }
 #endif
 
-static int __meminit init_section_page_cgroup(unsigned long pfn)
+static int __meminit init_section_page_cgroup(unsigned long pfn, int nid)
 {
        struct page_cgroup *base, *pc;
        struct mem_section *section;
        unsigned long table_size;
        unsigned long nr;
-       int nid, index;
+       int index;
 
        nr = pfn_to_section_nr(pfn);
        section = __nr_to_section(nr);
@@ -176,7 +176,6 @@ static int __meminit init_section_page_cgroup(unsigned long pfn)
        if (section->page_cgroup)
                return 0;
 
-       nid = page_to_nid(pfn_to_page(pfn));
        table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
        base = alloc_page_cgroup(table_size, nid);
 
@@ -196,7 +195,11 @@ static int __meminit init_section_page_cgroup(unsigned long pfn)
                pc = base + index;
                init_page_cgroup(pc, nr);
        }
-
+       /*
+        * The passed "pfn" may not be aligned to SECTION.  For the calculation
+        * we need to apply a mask.
+        */
+       pfn &= PAGE_SECTION_MASK;
        section->page_cgroup = base - pfn;
        total_usage += table_size;
        return 0;
@@ -225,10 +228,20 @@ int __meminit online_page_cgroup(unsigned long start_pfn,
        start = start_pfn & ~(PAGES_PER_SECTION - 1);
        end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
 
+       if (nid == -1) {
+               /*
+                * In this case, "nid" already exists and contains valid memory.
+                * "start_pfn" passed to us is a pfn which is an arg for
+                * online__pages(), and start_pfn should exist.
+                */
+               nid = pfn_to_nid(start_pfn);
+               VM_BUG_ON(!node_state(nid, N_ONLINE));
+       }
+
        for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
                if (!pfn_present(pfn))
                        continue;
-               fail = init_section_page_cgroup(pfn);
+               fail = init_section_page_cgroup(pfn, nid);
        }
        if (!fail)
                return 0;
@@ -284,25 +297,47 @@ static int __meminit page_cgroup_callback(struct notifier_block *self,
 void __init page_cgroup_init(void)
 {
        unsigned long pfn;
-       int fail = 0;
+       int nid;
 
        if (mem_cgroup_disabled())
                return;
 
-       for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) {
-               if (!pfn_present(pfn))
-                       continue;
-               fail = init_section_page_cgroup(pfn);
-       }
-       if (fail) {
-               printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n");
-               panic("Out of memory");
-       } else {
-               hotplug_memory_notifier(page_cgroup_callback, 0);
+       for_each_node_state(nid, N_HIGH_MEMORY) {
+               unsigned long start_pfn, end_pfn;
+
+               start_pfn = node_start_pfn(nid);
+               end_pfn = node_end_pfn(nid);
+               /*
+                * start_pfn and end_pfn may not be aligned to SECTION and the
+                * page->flags of out of node pages are not initialized.  So we
+                * scan [start_pfn, the biggest section's pfn < end_pfn) here.
+                */
+               for (pfn = start_pfn;
+                    pfn < end_pfn;
+                     pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
+
+                       if (!pfn_valid(pfn))
+                               continue;
+                       /*
+                        * Nodes's pfns can be overlapping.
+                        * We know some arch can have a nodes layout such as
+                        * -------------pfn-------------->
+                        * N0 | N1 | N2 | N0 | N1 | N2|....
+                        */
+                       if (pfn_to_nid(pfn) != nid)
+                               continue;
+                       if (init_section_page_cgroup(pfn, nid))
+                               goto oom;
+               }
        }
+       hotplug_memory_notifier(page_cgroup_callback, 0);
        printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
-       printk(KERN_INFO "please try 'cgroup_disable=memory' option if you don't"
-       " want memory cgroups\n");
+       printk(KERN_INFO "please try 'cgroup_disable=memory' option if you "
+                        "don't want memory cgroups\n");
+       return;
+oom:
+       printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n");
+       panic("Out of memory");
 }
 
 void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
index 0eb463ea88dd71326b70342f8492df873a6b5df1..27dfd3b82b0f39cfcd38ff8b2a02c55151651f30 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -112,9 +112,9 @@ static inline void anon_vma_free(struct anon_vma *anon_vma)
        kmem_cache_free(anon_vma_cachep, anon_vma);
 }
 
-static inline struct anon_vma_chain *anon_vma_chain_alloc(void)
+static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
 {
-       return kmem_cache_alloc(anon_vma_chain_cachep, GFP_KERNEL);
+       return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
 }
 
 static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
@@ -159,7 +159,7 @@ int anon_vma_prepare(struct vm_area_struct *vma)
                struct mm_struct *mm = vma->vm_mm;
                struct anon_vma *allocated;
 
-               avc = anon_vma_chain_alloc();
+               avc = anon_vma_chain_alloc(GFP_KERNEL);
                if (!avc)
                        goto out_enomem;
 
@@ -200,6 +200,32 @@ int anon_vma_prepare(struct vm_area_struct *vma)
        return -ENOMEM;
 }
 
+/*
+ * This is a useful helper function for locking the anon_vma root as
+ * we traverse the vma->anon_vma_chain, looping over anon_vma's that
+ * have the same vma.
+ *
+ * Such anon_vma's should have the same root, so you'd expect to see
+ * just a single mutex_lock for the whole traversal.
+ */
+static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
+{
+       struct anon_vma *new_root = anon_vma->root;
+       if (new_root != root) {
+               if (WARN_ON_ONCE(root))
+                       mutex_unlock(&root->mutex);
+               root = new_root;
+               mutex_lock(&root->mutex);
+       }
+       return root;
+}
+
+static inline void unlock_anon_vma_root(struct anon_vma *root)
+{
+       if (root)
+               mutex_unlock(&root->mutex);
+}
+
 static void anon_vma_chain_link(struct vm_area_struct *vma,
                                struct anon_vma_chain *avc,
                                struct anon_vma *anon_vma)
@@ -208,13 +234,11 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
        avc->anon_vma = anon_vma;
        list_add(&avc->same_vma, &vma->anon_vma_chain);
 
-       anon_vma_lock(anon_vma);
        /*
         * It's critical to add new vmas to the tail of the anon_vma,
         * see comment in huge_memory.c:__split_huge_page().
         */
        list_add_tail(&avc->same_anon_vma, &anon_vma->head);
-       anon_vma_unlock(anon_vma);
 }
 
 /*
@@ -224,13 +248,24 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
 {
        struct anon_vma_chain *avc, *pavc;
+       struct anon_vma *root = NULL;
 
        list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
-               avc = anon_vma_chain_alloc();
-               if (!avc)
-                       goto enomem_failure;
-               anon_vma_chain_link(dst, avc, pavc->anon_vma);
+               struct anon_vma *anon_vma;
+
+               avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
+               if (unlikely(!avc)) {
+                       unlock_anon_vma_root(root);
+                       root = NULL;
+                       avc = anon_vma_chain_alloc(GFP_KERNEL);
+                       if (!avc)
+                               goto enomem_failure;
+               }
+               anon_vma = pavc->anon_vma;
+               root = lock_anon_vma_root(root, anon_vma);
+               anon_vma_chain_link(dst, avc, anon_vma);
        }
+       unlock_anon_vma_root(root);
        return 0;
 
  enomem_failure:
@@ -263,7 +298,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
        anon_vma = anon_vma_alloc();
        if (!anon_vma)
                goto out_error;
-       avc = anon_vma_chain_alloc();
+       avc = anon_vma_chain_alloc(GFP_KERNEL);
        if (!avc)
                goto out_error_free_anon_vma;
 
@@ -280,7 +315,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
        get_anon_vma(anon_vma->root);
        /* Mark this anon_vma as the one where our new (COWed) pages go. */
        vma->anon_vma = anon_vma;
+       anon_vma_lock(anon_vma);
        anon_vma_chain_link(vma, avc, anon_vma);
+       anon_vma_unlock(anon_vma);
 
        return 0;
 
@@ -291,36 +328,43 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
        return -ENOMEM;
 }
 
-static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain)
-{
-       struct anon_vma *anon_vma = anon_vma_chain->anon_vma;
-       int empty;
-
-       /* If anon_vma_fork fails, we can get an empty anon_vma_chain. */
-       if (!anon_vma)
-               return;
-
-       anon_vma_lock(anon_vma);
-       list_del(&anon_vma_chain->same_anon_vma);
-
-       /* We must garbage collect the anon_vma if it's empty */
-       empty = list_empty(&anon_vma->head);
-       anon_vma_unlock(anon_vma);
-
-       if (empty)
-               put_anon_vma(anon_vma);
-}
-
 void unlink_anon_vmas(struct vm_area_struct *vma)
 {
        struct anon_vma_chain *avc, *next;
+       struct anon_vma *root = NULL;
 
        /*
         * Unlink each anon_vma chained to the VMA.  This list is ordered
         * from newest to oldest, ensuring the root anon_vma gets freed last.
         */
        list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
-               anon_vma_unlink(avc);
+               struct anon_vma *anon_vma = avc->anon_vma;
+
+               root = lock_anon_vma_root(root, anon_vma);
+               list_del(&avc->same_anon_vma);
+
+               /*
+                * Leave empty anon_vmas on the list - we'll need
+                * to free them outside the lock.
+                */
+               if (list_empty(&anon_vma->head))
+                       continue;
+
+               list_del(&avc->same_vma);
+               anon_vma_chain_free(avc);
+       }
+       unlock_anon_vma_root(root);
+
+       /*
+        * Iterate the list once more, it now only contains empty and unlinked
+        * anon_vmas, destroy them. Could not do before due to __put_anon_vma()
+        * needing to acquire the anon_vma->root->mutex.
+        */
+       list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
+               struct anon_vma *anon_vma = avc->anon_vma;
+
+               put_anon_vma(anon_vma);
+
                list_del(&avc->same_vma);
                anon_vma_chain_free(avc);
        }
index bcfa4987c8ae2c9fc273f1baf75d4461ed743052..d96e223de775378f78d7597d8c2d87a9e24e70a2 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3604,13 +3604,14 @@ free_done:
  * Release an obj back to its cache. If the obj has a constructed state, it must
  * be in this state _before_ it is released.  Called with disabled ints.
  */
-static inline void __cache_free(struct kmem_cache *cachep, void *objp)
+static inline void __cache_free(struct kmem_cache *cachep, void *objp,
+    void *caller)
 {
        struct array_cache *ac = cpu_cache_get(cachep);
 
        check_irq_off();
        kmemleak_free_recursive(objp, cachep->flags);
-       objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
+       objp = cache_free_debugcheck(cachep, objp, caller);
 
        kmemcheck_slab_free(cachep, objp, obj_size(cachep));
 
@@ -3801,7 +3802,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
        debug_check_no_locks_freed(objp, obj_size(cachep));
        if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
                debug_check_no_obj_freed(objp, obj_size(cachep));
-       __cache_free(cachep, objp);
+       __cache_free(cachep, objp, __builtin_return_address(0));
        local_irq_restore(flags);
 
        trace_kmem_cache_free(_RET_IP_, objp);
@@ -3831,7 +3832,7 @@ void kfree(const void *objp)
        c = virt_to_cache(objp);
        debug_check_no_locks_freed(objp, obj_size(c));
        debug_check_no_obj_freed(objp, obj_size(c));
-       __cache_free(c, (void *)objp);
+       __cache_free(c, (void *)objp, __builtin_return_address(0));
        local_irq_restore(flags);
 }
 EXPORT_SYMBOL(kfree);
index 7be0223531b090dbd9f716104714bc4d9ca9c7b5..35f351f26193a47145cd8bcb7f55ae091331ca97 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2320,16 +2320,12 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
        BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
                        SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
 
-#ifdef CONFIG_CMPXCHG_LOCAL
        /*
-        * Must align to double word boundary for the double cmpxchg instructions
-        * to work.
+        * Must align to double word boundary for the double cmpxchg
+        * instructions to work; see __pcpu_double_call_return_bool().
         */
-       s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 2 * sizeof(void *));
-#else
-       /* Regular alignment is sufficient */
-       s->cpu_slab = alloc_percpu(struct kmem_cache_cpu);
-#endif
+       s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
+                                    2 * sizeof(void *));
 
        if (!s->cpu_slab)
                return 0;
index 2372d4ed5dd8ff763d3aaed5cb546c8033eca1ff..fabf2d0f51695eddacdb6b013fac0b9cdee3a655 100644 (file)
 #include <linux/mm.h>
 #include <linux/sched.h>
 #include <linux/swap.h>
+#include <linux/memcontrol.h>
+
+#include <trace/events/vmscan.h>
+
+#define TOKEN_AGING_INTERVAL   (0xFF)
 
 static DEFINE_SPINLOCK(swap_token_lock);
 struct mm_struct *swap_token_mm;
+struct mem_cgroup *swap_token_memcg;
 static unsigned int global_faults;
+static unsigned int last_aging;
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
+{
+       struct mem_cgroup *memcg;
+
+       memcg = try_get_mem_cgroup_from_mm(mm);
+       if (memcg)
+               css_put(mem_cgroup_css(memcg));
+
+       return memcg;
+}
+#else
+static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
+{
+       return NULL;
+}
+#endif
 
 void grab_swap_token(struct mm_struct *mm)
 {
        int current_interval;
+       unsigned int old_prio = mm->token_priority;
 
        global_faults++;
 
@@ -38,40 +64,81 @@ void grab_swap_token(struct mm_struct *mm)
                return;
 
        /* First come first served */
-       if (swap_token_mm == NULL) {
-               mm->token_priority = mm->token_priority + 2;
-               swap_token_mm = mm;
-               goto out;
+       if (!swap_token_mm)
+               goto replace_token;
+
+       if ((global_faults - last_aging) > TOKEN_AGING_INTERVAL) {
+               swap_token_mm->token_priority /= 2;
+               last_aging = global_faults;
        }
 
-       if (mm != swap_token_mm) {
-               if (current_interval < mm->last_interval)
-                       mm->token_priority++;
-               else {
-                       if (likely(mm->token_priority > 0))
-                               mm->token_priority--;
-               }
-               /* Check if we deserve the token */
-               if (mm->token_priority > swap_token_mm->token_priority) {
-                       mm->token_priority += 2;
-                       swap_token_mm = mm;
-               }
-       } else {
-               /* Token holder came in again! */
+       if (mm == swap_token_mm) {
                mm->token_priority += 2;
+               goto update_priority;
+       }
+
+       if (current_interval < mm->last_interval)
+               mm->token_priority++;
+       else {
+               if (likely(mm->token_priority > 0))
+                       mm->token_priority--;
        }
 
+       /* Check if we deserve the token */
+       if (mm->token_priority > swap_token_mm->token_priority)
+               goto replace_token;
+
+update_priority:
+       trace_update_swap_token_priority(mm, old_prio, swap_token_mm);
+
 out:
        mm->faultstamp = global_faults;
        mm->last_interval = current_interval;
        spin_unlock(&swap_token_lock);
+       return;
+
+replace_token:
+       mm->token_priority += 2;
+       trace_replace_swap_token(swap_token_mm, mm);
+       swap_token_mm = mm;
+       swap_token_memcg = swap_token_memcg_from_mm(mm);
+       last_aging = global_faults;
+       goto out;
 }
 
 /* Called on process exit. */
 void __put_swap_token(struct mm_struct *mm)
 {
        spin_lock(&swap_token_lock);
-       if (likely(mm == swap_token_mm))
+       if (likely(mm == swap_token_mm)) {
+               trace_put_swap_token(swap_token_mm);
                swap_token_mm = NULL;
+               swap_token_memcg = NULL;
+       }
        spin_unlock(&swap_token_lock);
 }
+
+static bool match_memcg(struct mem_cgroup *a, struct mem_cgroup *b)
+{
+       if (!a)
+               return true;
+       if (!b)
+               return true;
+       if (a == b)
+               return true;
+       return false;
+}
+
+void disable_swap_token(struct mem_cgroup *memcg)
+{
+       /* memcg reclaim don't disable unrelated mm token. */
+       if (match_memcg(memcg, swap_token_memcg)) {
+               spin_lock(&swap_token_lock);
+               if (match_memcg(memcg, swap_token_memcg)) {
+                       trace_disable_swap_token(swap_token_mm);
+                       swap_token_mm = NULL;
+                       swap_token_memcg = NULL;
+               }
+               spin_unlock(&swap_token_lock);
+       }
+}
index faa0a088f9cc83a5a4cd7d05e40924144c795c19..8ff834e19c2460d33844e43154b058974a6c9849 100644 (file)
@@ -1124,8 +1124,20 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                                        nr_lumpy_dirty++;
                                scan++;
                        } else {
-                               /* the page is freed already. */
-                               if (!page_count(cursor_page))
+                               /*
+                                * Check if the page is freed already.
+                                *
+                                * We can't use page_count() as that
+                                * requires compound_head and we don't
+                                * have a pin on the page here. If a
+                                * page is tail, we may or may not
+                                * have isolated the head, so assume
+                                * it's not free, it'd be tricky to
+                                * track the head status without a
+                                * page pin.
+                                */
+                               if (!PageTail(cursor_page) &&
+                                   !atomic_read(&cursor_page->_count))
                                        continue;
                                break;
                        }
@@ -2081,7 +2093,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
        for (priority = DEF_PRIORITY; priority >= 0; priority--) {
                sc->nr_scanned = 0;
                if (!priority)
-                       disable_swap_token();
+                       disable_swap_token(sc->mem_cgroup);
                total_scanned += shrink_zones(priority, zonelist, sc);
                /*
                 * Don't shrink slabs when reclaiming memory from
@@ -2407,7 +2419,7 @@ loop_again:
 
                /* The swap token gets in the way of swapout... */
                if (!priority)
-                       disable_swap_token();
+                       disable_swap_token(NULL);
 
                all_zones_ok = 1;
                balanced = 0;
index 41495dc2a4c9a2649579938a1894916efb723684..fcc684678af67e644c6cc043dacee9f4159fb5b4 100644 (file)
@@ -23,6 +23,31 @@ bool vlan_do_receive(struct sk_buff **skbp)
                return false;
 
        skb->dev = vlan_dev;
+       if (skb->pkt_type == PACKET_OTHERHOST) {
+               /* Our lower layer thinks this is not local, let's make sure.
+                * This allows the VLAN to have a different MAC than the
+                * underlying device, and still route correctly. */
+               if (!compare_ether_addr(eth_hdr(skb)->h_dest,
+                                       vlan_dev->dev_addr))
+                       skb->pkt_type = PACKET_HOST;
+       }
+
+       if (!(vlan_dev_info(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
+               unsigned int offset = skb->data - skb_mac_header(skb);
+
+               /*
+                * vlan_insert_tag expect skb->data pointing to mac header.
+                * So change skb->data before calling it and change back to
+                * original position later
+                */
+               skb_push(skb, offset);
+               skb = *skbp = vlan_insert_tag(skb, skb->vlan_tci);
+               if (!skb)
+                       return false;
+               skb_pull(skb, offset + VLAN_HLEN);
+               skb_reset_mac_len(skb);
+       }
+
        skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
        skb->vlan_tci = 0;
 
@@ -31,22 +56,8 @@ bool vlan_do_receive(struct sk_buff **skbp)
        u64_stats_update_begin(&rx_stats->syncp);
        rx_stats->rx_packets++;
        rx_stats->rx_bytes += skb->len;
-
-       switch (skb->pkt_type) {
-       case PACKET_BROADCAST:
-               break;
-       case PACKET_MULTICAST:
+       if (skb->pkt_type == PACKET_MULTICAST)
                rx_stats->rx_multicast++;
-               break;
-       case PACKET_OTHERHOST:
-               /* Our lower layer thinks this is not local, let's make sure.
-                * This allows the VLAN to have a different MAC than the
-                * underlying device, and still route correctly. */
-               if (!compare_ether_addr(eth_hdr(skb)->h_dest,
-                                       vlan_dev->dev_addr))
-                       skb->pkt_type = PACKET_HOST;
-               break;
-       }
        u64_stats_update_end(&rx_stats->syncp);
 
        return true;
@@ -89,18 +100,13 @@ gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
 }
 EXPORT_SYMBOL(vlan_gro_frags);
 
-static struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb)
+static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
 {
-       if (vlan_dev_info(skb->dev)->flags & VLAN_FLAG_REORDER_HDR) {
-               if (skb_cow(skb, skb_headroom(skb)) < 0)
-                       skb = NULL;
-               if (skb) {
-                       /* Lifted from Gleb's VLAN code... */
-                       memmove(skb->data - ETH_HLEN,
-                               skb->data - VLAN_ETH_HLEN, 12);
-                       skb->mac_header += VLAN_HLEN;
-               }
-       }
+       if (skb_cow(skb, skb_headroom(skb)) < 0)
+               return NULL;
+       memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
+       skb->mac_header += VLAN_HLEN;
+       skb_reset_mac_len(skb);
        return skb;
 }
 
@@ -161,7 +167,7 @@ struct sk_buff *vlan_untag(struct sk_buff *skb)
        skb_pull_rcsum(skb, VLAN_HLEN);
        vlan_set_encap_proto(skb, vhdr);
 
-       skb = vlan_check_reorder_header(skb);
+       skb = vlan_reorder_header(skb);
        if (unlikely(!skb))
                goto err_free;
 
index f247f5bff88ddb61d252951f57a965789e63f45d..7ea5cf9ea08a9db7f3b6edaf083f14030914463c 100644 (file)
@@ -165,7 +165,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
                u64_stats_update_begin(&stats->syncp);
                stats->tx_packets++;
                stats->tx_bytes += len;
-               u64_stats_update_begin(&stats->syncp);
+               u64_stats_update_end(&stats->syncp);
        } else {
                this_cpu_inc(vlan_dev_info(dev)->vlan_pcpu_stats->tx_dropped);
        }
index a86f9ba4f05cd473b6ccd20c17a1d293a459877a..e64a1c2df238d3fc0aac368075b8b0fd3bbb6917 100644 (file)
@@ -906,7 +906,7 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr
                if (c->psm == psm) {
                        /* Exact match. */
                        if (!bacmp(&bt_sk(sk)->src, src)) {
-                               read_unlock_bh(&chan_list_lock);
+                               read_unlock(&chan_list_lock);
                                return c;
                        }
 
index 3fa123185e891b93cd98b3ac23bdec3518b6f066..56149ec36d7fd5d8a411b3996f5e7985a3792c77 100644 (file)
@@ -104,10 +104,16 @@ static void fake_update_pmtu(struct dst_entry *dst, u32 mtu)
 {
 }
 
+static u32 *fake_cow_metrics(struct dst_entry *dst, unsigned long old)
+{
+       return NULL;
+}
+
 static struct dst_ops fake_dst_ops = {
        .family =               AF_INET,
        .protocol =             cpu_to_be16(ETH_P_IP),
        .update_pmtu =          fake_update_pmtu,
+       .cow_metrics =          fake_cow_metrics,
 };
 
 /*
index 649ebacaf6bce69f29028e33c20233fb1932bbee..adbb424403d48982624f36e62418ec9cb47c0132 100644 (file)
@@ -139,17 +139,14 @@ static void close_work(struct work_struct *work)
        struct chnl_net *dev = NULL;
        struct list_head *list_node;
        struct list_head *_tmp;
-       /* May be called with or without RTNL lock held */
-       int islocked = rtnl_is_locked();
-       if (!islocked)
-               rtnl_lock();
+
+       rtnl_lock();
        list_for_each_safe(list_node, _tmp, &chnl_net_list) {
                dev = list_entry(list_node, struct chnl_net, list_field);
                if (dev->state == CAIF_SHUTDOWN)
                        dev_close(dev->netdev);
        }
-       if (!islocked)
-               rtnl_unlock();
+       rtnl_unlock();
 }
 static DECLARE_WORK(close_worker, close_work);
 
index 6ea2b892f44b8ab498cdec7344fc07c73b9a6819..9cb627a4073aef3007afe4dd600ae96d85f0af0b 100644 (file)
@@ -1144,6 +1144,13 @@ static void handle_osds_timeout(struct work_struct *work)
                              round_jiffies_relative(delay));
 }
 
+static void complete_request(struct ceph_osd_request *req)
+{
+       if (req->r_safe_callback)
+               req->r_safe_callback(req, NULL);
+       complete_all(&req->r_safe_completion);  /* fsync waiter */
+}
+
 /*
  * handle osd op reply.  either call the callback if it is specified,
  * or do the completion to wake up the waiting thread.
@@ -1226,11 +1233,8 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
        else
                complete_all(&req->r_completion);
 
-       if (flags & CEPH_OSD_FLAG_ONDISK) {
-               if (req->r_safe_callback)
-                       req->r_safe_callback(req, msg);
-               complete_all(&req->r_safe_completion);  /* fsync waiter */
-       }
+       if (flags & CEPH_OSD_FLAG_ONDISK)
+               complete_request(req);
 
 done:
        dout("req=%p req->r_linger=%d\n", req, req->r_linger);
@@ -1732,6 +1736,7 @@ int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
                __cancel_request(req);
                __unregister_request(osdc, req);
                mutex_unlock(&osdc->request_mutex);
+               complete_request(req);
                dout("wait_request tid %llu canceled/timed out\n", req->r_tid);
                return rc;
        }
index c7e305d13b71b9371aa8973354c2b4cbe09889f6..9c58c1ec41a9dba24cca0318f723b01208c7b880 100644 (file)
@@ -2096,6 +2096,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
 {
        const struct net_device_ops *ops = dev->netdev_ops;
        int rc = NETDEV_TX_OK;
+       unsigned int skb_len;
 
        if (likely(!skb->next)) {
                u32 features;
@@ -2146,8 +2147,9 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                        }
                }
 
+               skb_len = skb->len;
                rc = ops->ndo_start_xmit(skb, dev);
-               trace_net_dev_xmit(skb, rc);
+               trace_net_dev_xmit(skb, rc, dev, skb_len);
                if (rc == NETDEV_TX_OK)
                        txq_trans_update(txq);
                return rc;
@@ -2167,8 +2169,9 @@ gso:
                if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
                        skb_dst_drop(nskb);
 
+               skb_len = nskb->len;
                rc = ops->ndo_start_xmit(nskb, dev);
-               trace_net_dev_xmit(nskb, rc);
+               trace_net_dev_xmit(nskb, rc, dev, skb_len);
                if (unlikely(rc != NETDEV_TX_OK)) {
                        if (rc & ~NETDEV_TX_MASK)
                                goto out_kfree_gso_skb;
@@ -3111,7 +3114,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
 
        skb_reset_network_header(skb);
        skb_reset_transport_header(skb);
-       skb->mac_len = skb->network_header - skb->mac_header;
+       skb_reset_mac_len(skb);
 
        pt_prev = NULL;
 
@@ -6175,6 +6178,11 @@ static int dev_cpu_callback(struct notifier_block *nfb,
                oldsd->output_queue = NULL;
                oldsd->output_queue_tailp = &oldsd->output_queue;
        }
+       /* Append NAPI poll list from offline CPU. */
+       if (!list_empty(&oldsd->poll_list)) {
+               list_splice_init(&oldsd->poll_list, &sd->poll_list);
+               raise_softirq_irqoff(NET_RX_SOFTIRQ);
+       }
 
        raise_softirq_irqoff(NET_TX_SOFTIRQ);
        local_irq_enable();
@@ -6261,29 +6269,23 @@ err_name:
 /**
  *     netdev_drivername - network driver for the device
  *     @dev: network device
- *     @buffer: buffer for resulting name
- *     @len: size of buffer
  *
  *     Determine network driver for device.
  */
-char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
+const char *netdev_drivername(const struct net_device *dev)
 {
        const struct device_driver *driver;
        const struct device *parent;
-
-       if (len <= 0 || !buffer)
-               return buffer;
-       buffer[0] = 0;
+       const char *empty = "";
 
        parent = dev->dev.parent;
-
        if (!parent)
-               return buffer;
+               return empty;
 
        driver = parent->driver;
        if (driver && driver->name)
-               strlcpy(buffer, driver->name, len);
-       return buffer;
+               return driver->name;
+       return empty;
 }
 
 static int __netdev_printk(const char *level, const struct net_device *dev,
index 11b98bc2aa8f4db7805cfe14f2087f5d8ced9824..33d2a1fba131695451de05e191ae9c10fb2cd307 100644 (file)
@@ -1179,9 +1179,14 @@ static void remove_queue_kobjects(struct net_device *net)
 #endif
 }
 
-static const void *net_current_ns(void)
+static void *net_grab_current_ns(void)
 {
-       return current->nsproxy->net_ns;
+       struct net *ns = current->nsproxy->net_ns;
+#ifdef CONFIG_NET_NS
+       if (ns)
+               atomic_inc(&ns->passive);
+#endif
+       return ns;
 }
 
 static const void *net_initial_ns(void)
@@ -1196,22 +1201,13 @@ static const void *net_netlink_ns(struct sock *sk)
 
 struct kobj_ns_type_operations net_ns_type_operations = {
        .type = KOBJ_NS_TYPE_NET,
-       .current_ns = net_current_ns,
+       .grab_current_ns = net_grab_current_ns,
        .netlink_ns = net_netlink_ns,
        .initial_ns = net_initial_ns,
+       .drop_ns = net_drop_ns,
 };
 EXPORT_SYMBOL_GPL(net_ns_type_operations);
 
-static void net_kobj_ns_exit(struct net *net)
-{
-       kobj_ns_exit(KOBJ_NS_TYPE_NET, net);
-}
-
-static struct pernet_operations kobj_net_ops = {
-       .exit = net_kobj_ns_exit,
-};
-
-
 #ifdef CONFIG_HOTPLUG
 static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
 {
@@ -1339,6 +1335,5 @@ EXPORT_SYMBOL(netdev_class_remove_file);
 int netdev_kobject_init(void)
 {
        kobj_ns_type_register(&net_ns_type_operations);
-       register_pernet_subsys(&kobj_net_ops);
        return class_register(&net_class);
 }
index 6c6b86d0da155c057488cc66fcea108bebf7f391..ea489db1bc2361c20001576a5e909aa22cb689d2 100644 (file)
@@ -128,6 +128,7 @@ static __net_init int setup_net(struct net *net)
        LIST_HEAD(net_exit_list);
 
        atomic_set(&net->count, 1);
+       atomic_set(&net->passive, 1);
 
 #ifdef NETNS_REFCNT_DEBUG
        atomic_set(&net->use_count, 0);
@@ -210,6 +211,13 @@ static void net_free(struct net *net)
        kmem_cache_free(net_cachep, net);
 }
 
+void net_drop_ns(void *p)
+{
+       struct net *ns = p;
+       if (ns && atomic_dec_and_test(&ns->passive))
+               net_free(ns);
+}
+
 struct net *copy_net_ns(unsigned long flags, struct net *old_net)
 {
        struct net *net;
@@ -230,7 +238,7 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
        }
        mutex_unlock(&net_mutex);
        if (rv < 0) {
-               net_free(net);
+               net_drop_ns(net);
                return ERR_PTR(rv);
        }
        return net;
@@ -286,7 +294,7 @@ static void cleanup_net(struct work_struct *work)
        /* Finally it is safe to free my network namespace structure */
        list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
                list_del_init(&net->exit_list);
-               net_free(net);
+               net_drop_ns(net);
        }
 }
 static DECLARE_WORK(net_cleanup_work, cleanup_net);
@@ -310,19 +318,17 @@ struct net *get_net_ns_by_fd(int fd)
        struct file *file;
        struct net *net;
 
-       net = ERR_PTR(-EINVAL);
        file = proc_ns_fget(fd);
-       if (!file)
-               goto out;
+       if (IS_ERR(file))
+               return ERR_CAST(file);
 
        ei = PROC_I(file->f_dentry->d_inode);
-       if (ei->ns_ops != &netns_operations)
-               goto out;
+       if (ei->ns_ops == &netns_operations)
+               net = get_net(ei->ns);
+       else
+               net = ERR_PTR(-EINVAL);
 
-       net = get_net(ei->ns);
-out:
-       if (file)
-               fput(file);
+       fput(file);
        return net;
 }
 
index 2d7d6d4737810c0bd9d0b93defbf2c972f1c6b2e..18d9cbda3a39c0b19d0751350489c08249cba635 100644 (file)
@@ -792,6 +792,13 @@ int netpoll_setup(struct netpoll *np)
                return -ENODEV;
        }
 
+       if (ndev->master) {
+               printk(KERN_ERR "%s: %s is a slave device, aborting.\n",
+                      np->name, np->dev_name);
+               err = -EBUSY;
+               goto put;
+       }
+
        if (!netif_running(ndev)) {
                unsigned long atmost, atleast;
 
index cc1463156cd037ad08b6a2f637c90b4d43fc5e7a..9c1926027a268162a3c37374a5f3397661c6dba6 100644 (file)
@@ -465,6 +465,9 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        if (addr_len < sizeof(struct sockaddr_in))
                goto out;
 
+       if (addr->sin_family != AF_INET)
+               goto out;
+
        chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
 
        /* Not specified by any standard per-se, however it breaks too
index c3118e1cd3bb502c293748e40b2c3ccc648c8b94..ec93335901ddc45d09f7c85c53270ea9a6772207 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <asm/uaccess.h>
+#include <asm/unaligned.h>
 #include <linux/skbuff.h>
 #include <linux/ip.h>
 #include <linux/icmp.h>
@@ -350,7 +351,7 @@ int ip_options_compile(struct net *net,
                                goto error;
                        }
                        if (optptr[2] <= optlen) {
-                               __be32 *timeptr = NULL;
+                               unsigned char *timeptr = NULL;
                                if (optptr[2]+3 > optptr[1]) {
                                        pp_ptr = optptr + 2;
                                        goto error;
@@ -359,7 +360,7 @@ int ip_options_compile(struct net *net,
                                      case IPOPT_TS_TSONLY:
                                        opt->ts = optptr - iph;
                                        if (skb)
-                                               timeptr = (__be32*)&optptr[optptr[2]-1];
+                                               timeptr = &optptr[optptr[2]-1];
                                        opt->ts_needtime = 1;
                                        optptr[2] += 4;
                                        break;
@@ -371,7 +372,7 @@ int ip_options_compile(struct net *net,
                                        opt->ts = optptr - iph;
                                        if (rt)  {
                                                memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
-                                               timeptr = (__be32*)&optptr[optptr[2]+3];
+                                               timeptr = &optptr[optptr[2]+3];
                                        }
                                        opt->ts_needaddr = 1;
                                        opt->ts_needtime = 1;
@@ -389,7 +390,7 @@ int ip_options_compile(struct net *net,
                                                if (inet_addr_type(net, addr) == RTN_UNICAST)
                                                        break;
                                                if (skb)
-                                                       timeptr = (__be32*)&optptr[optptr[2]+3];
+                                                       timeptr = &optptr[optptr[2]+3];
                                        }
                                        opt->ts_needtime = 1;
                                        optptr[2] += 8;
@@ -403,10 +404,10 @@ int ip_options_compile(struct net *net,
                                }
                                if (timeptr) {
                                        struct timespec tv;
-                                       __be32  midtime;
+                                       u32  midtime;
                                        getnstimeofday(&tv);
-                                       midtime = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC);
-                                       memcpy(timeptr, &midtime, sizeof(__be32));
+                                       midtime = (tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC;
+                                       put_unaligned_be32(midtime, timeptr);
                                        opt->is_changed = 1;
                                }
                        } else {
index 98af3697c7188870effc865e3caaef76e579a809..a8024eaa0e87b15ebdc86058a87c24f0813a610a 100644 (file)
@@ -799,7 +799,9 @@ static int __ip_append_data(struct sock *sk,
        int csummode = CHECKSUM_NONE;
        struct rtable *rt = (struct rtable *)cork->dst;
 
-       exthdrlen = transhdrlen ? rt->dst.header_len : 0;
+       skb = skb_peek_tail(queue);
+
+       exthdrlen = !skb ? rt->dst.header_len : 0;
        length += exthdrlen;
        transhdrlen += exthdrlen;
        mtu = cork->fragsize;
@@ -825,8 +827,6 @@ static int __ip_append_data(struct sock *sk,
            !exthdrlen)
                csummode = CHECKSUM_PARTIAL;
 
-       skb = skb_peek_tail(queue);
-
        cork->length += length;
        if (((length > mtu) || (skb && skb_is_gso(skb))) &&
            (sk->sk_protocol == IPPROTO_UDP) &&
index d2c1311cb28d6aee7b218351580210b4e5783a3e..f7f9bd7ba12d8b4f6d0f938835efac7e34dfbed5 100644 (file)
@@ -402,7 +402,8 @@ ipq_dev_drop(int ifindex)
 static inline void
 __ipq_rcv_skb(struct sk_buff *skb)
 {
-       int status, type, pid, flags, nlmsglen, skblen;
+       int status, type, pid, flags;
+       unsigned int nlmsglen, skblen;
        struct nlmsghdr *nlh;
 
        skblen = skb->len;
index d609ac3cb9a450e69d47e4d5bdb7365347cbb46a..5c9e97c790172984b7d87bd2d2f40b61028184cc 100644 (file)
@@ -307,7 +307,7 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
         * error messages (RELATED) and information requests (see below) */
        if (ip_hdr(skb)->protocol == IPPROTO_ICMP &&
            (ctinfo == IP_CT_RELATED ||
-            ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY))
+            ctinfo == IP_CT_RELATED_REPLY))
                return XT_CONTINUE;
 
        /* ip_conntrack_icmp guarantees us that we only have ICMP_ECHO,
@@ -321,12 +321,12 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
                        ct->mark = hash;
                        break;
                case IP_CT_RELATED:
-               case IP_CT_RELATED+IP_CT_IS_REPLY:
+               case IP_CT_RELATED_REPLY:
                        /* FIXME: we don't handle expectations at the
                         * moment.  they can arrive on a different node than
                         * the master connection (e.g. FTP passive mode) */
                case IP_CT_ESTABLISHED:
-               case IP_CT_ESTABLISHED+IP_CT_IS_REPLY:
+               case IP_CT_ESTABLISHED_REPLY:
                        break;
                default:
                        break;
index d2ed9dc74ebc3c029e134295f4ed490f27af0ef6..9931152a78b54e4512bc95bbbe720258ebb44d09 100644 (file)
@@ -60,7 +60,7 @@ masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par)
        nat = nfct_nat(ct);
 
        NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
-                           ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY));
+                           ctinfo == IP_CT_RELATED_REPLY));
 
        /* Source address is 0.0.0.0 - locally generated packet that is
         * probably not supposed to be masqueraded.
index 5a03c02af999a45ac7d3f9e403ea1c2d38bc0c5a..db10075dd88e4720e1ba97bf7365b424e2fb124b 100644 (file)
@@ -101,7 +101,7 @@ static unsigned int ipv4_confirm(unsigned int hooknum,
 
        /* This is where we call the helper: as the packet goes out. */
        ct = nf_ct_get(skb, &ctinfo);
-       if (!ct || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)
+       if (!ct || ctinfo == IP_CT_RELATED_REPLY)
                goto out;
 
        help = nfct_help(ct);
index 7404bde959943d4fbbcee7901f3006bb4a971574..ab5b27a2916f70b79430f1a90ac7d7af6cceaebe 100644 (file)
@@ -160,7 +160,7 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
        /* Update skb to refer to this connection */
        skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general;
        skb->nfctinfo = *ctinfo;
-       return -NF_ACCEPT;
+       return NF_ACCEPT;
 }
 
 /* Small and modified version of icmp_rcv */
index 9c71b2755ce3e4eea5331fe17a49ec1701dccf02..3346de5d94d009ffe3466ce61f1103d5f66a5ca2 100644 (file)
@@ -433,7 +433,7 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
 
        /* Must be RELATED */
        NF_CT_ASSERT(skb->nfctinfo == IP_CT_RELATED ||
-                    skb->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY);
+                    skb->nfctinfo == IP_CT_RELATED_REPLY);
 
        /* Redirects on non-null nats must be dropped, else they'll
           start talking to each other without our translation, and be
index 99cfa28b6d38f12fef3e1e94c07223d83e066e14..ebc5f8894f99eb06b8ef5616cafc06b427dd6a42 100644 (file)
@@ -160,7 +160,7 @@ static void nf_nat_csum(struct sk_buff *skb, const struct iphdr *iph, void *data
 
        if (skb->ip_summed != CHECKSUM_PARTIAL) {
                if (!(rt->rt_flags & RTCF_LOCAL) &&
-                   skb->dev->features & NETIF_F_V4_CSUM) {
+                   (!skb->dev || skb->dev->features & NETIF_F_V4_CSUM)) {
                        skb->ip_summed = CHECKSUM_PARTIAL;
                        skb->csum_start = skb_headroom(skb) +
                                          skb_network_offset(skb) +
index 21c30426480b0d08cdc10a0257ee2f6ce736840f..733c9abc1cbd9ddb7c45a0b7ffae9d72f608ae62 100644 (file)
@@ -53,7 +53,7 @@ ipt_snat_target(struct sk_buff *skb, const struct xt_action_param *par)
 
        /* Connection must be valid and new. */
        NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
-                           ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY));
+                           ctinfo == IP_CT_RELATED_REPLY));
        NF_CT_ASSERT(par->out != NULL);
 
        return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_SRC);
index 7317bdf1d457544daeef7dc05dce0b3dc23f7a2a..483b76d042da5ad2836005ab1dad3d1c17727c61 100644 (file)
@@ -116,7 +116,7 @@ nf_nat_fn(unsigned int hooknum,
 
        switch (ctinfo) {
        case IP_CT_RELATED:
-       case IP_CT_RELATED+IP_CT_IS_REPLY:
+       case IP_CT_RELATED_REPLY:
                if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
                        if (!nf_nat_icmp_reply_translation(ct, ctinfo,
                                                           hooknum, skb))
@@ -144,7 +144,7 @@ nf_nat_fn(unsigned int hooknum,
        default:
                /* ESTABLISHED */
                NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
-                            ctinfo == (IP_CT_ESTABLISHED+IP_CT_IS_REPLY));
+                            ctinfo == IP_CT_ESTABLISHED_REPLY);
        }
 
        return nf_nat_packet(ct, ctinfo, hooknum, skb);
index 52b0b956508b93046dd0cc3ef9dfd157f3d57ea5..045f0ec6a4a02a257cd60760283cdd3fb8cd5fda 100644 (file)
@@ -1316,6 +1316,23 @@ reject_redirect:
        ;
 }
 
+static bool peer_pmtu_expired(struct inet_peer *peer)
+{
+       unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
+
+       return orig &&
+              time_after_eq(jiffies, orig) &&
+              cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
+}
+
+static bool peer_pmtu_cleaned(struct inet_peer *peer)
+{
+       unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
+
+       return orig &&
+              cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
+}
+
 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
 {
        struct rtable *rt = (struct rtable *)dst;
@@ -1331,14 +1348,8 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
                                                rt_genid(dev_net(dst->dev)));
                        rt_del(hash, rt);
                        ret = NULL;
-               } else if (rt->peer &&
-                          rt->peer->pmtu_expires &&
-                          time_after_eq(jiffies, rt->peer->pmtu_expires)) {
-                       unsigned long orig = rt->peer->pmtu_expires;
-
-                       if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
-                               dst_metric_set(dst, RTAX_MTU,
-                                              rt->peer->pmtu_orig);
+               } else if (rt->peer && peer_pmtu_expired(rt->peer)) {
+                       dst_metric_set(dst, RTAX_MTU, rt->peer->pmtu_orig);
                }
        }
        return ret;
@@ -1531,8 +1542,10 @@ unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph,
 
 static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer)
 {
-       unsigned long expires = peer->pmtu_expires;
+       unsigned long expires = ACCESS_ONCE(peer->pmtu_expires);
 
+       if (!expires)
+               return;
        if (time_before(jiffies, expires)) {
                u32 orig_dst_mtu = dst_mtu(dst);
                if (peer->pmtu_learned < orig_dst_mtu) {
@@ -1555,10 +1568,11 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
                rt_bind_peer(rt, rt->rt_dst, 1);
        peer = rt->peer;
        if (peer) {
+               unsigned long pmtu_expires = ACCESS_ONCE(peer->pmtu_expires);
+
                if (mtu < ip_rt_min_pmtu)
                        mtu = ip_rt_min_pmtu;
-               if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
-                       unsigned long pmtu_expires;
+               if (!pmtu_expires || mtu < peer->pmtu_learned) {
 
                        pmtu_expires = jiffies + ip_rt_mtu_expires;
                        if (!pmtu_expires)
@@ -1612,13 +1626,14 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
                        rt_bind_peer(rt, rt->rt_dst, 0);
 
                peer = rt->peer;
-               if (peer && peer->pmtu_expires)
+               if (peer) {
                        check_peer_pmtu(dst, peer);
 
-               if (peer && peer->redirect_learned.a4 &&
-                   peer->redirect_learned.a4 != rt->rt_gateway) {
-                       if (check_peer_redir(dst, peer))
-                               return NULL;
+                       if (peer->redirect_learned.a4 &&
+                           peer->redirect_learned.a4 != rt->rt_gateway) {
+                               if (check_peer_redir(dst, peer))
+                                       return NULL;
+                       }
                }
 
                rt->rt_peer_genid = rt_peer_genid();
@@ -1649,14 +1664,8 @@ static void ipv4_link_failure(struct sk_buff *skb)
        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
 
        rt = skb_rtable(skb);
-       if (rt &&
-           rt->peer &&
-           rt->peer->pmtu_expires) {
-               unsigned long orig = rt->peer->pmtu_expires;
-
-               if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
-                       dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig);
-       }
+       if (rt && rt->peer && peer_pmtu_cleaned(rt->peer))
+               dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig);
 }
 
 static int ip_rt_bug(struct sk_buff *skb)
@@ -1770,8 +1779,7 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
                               sizeof(u32) * RTAX_MAX);
                dst_init_metrics(&rt->dst, peer->metrics, false);
 
-               if (peer->pmtu_expires)
-                       check_peer_pmtu(&rt->dst, peer);
+               check_peer_pmtu(&rt->dst, peer);
                if (peer->redirect_learned.a4 &&
                    peer->redirect_learned.a4 != rt->rt_gateway) {
                        rt->rt_gateway = peer->redirect_learned.a4;
@@ -2775,7 +2783,8 @@ static int rt_fill_info(struct net *net,
        struct rtable *rt = skb_rtable(skb);
        struct rtmsg *r;
        struct nlmsghdr *nlh;
-       long expires;
+       long expires = 0;
+       const struct inet_peer *peer = rt->peer;
        u32 id = 0, ts = 0, tsage = 0, error;
 
        nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
@@ -2823,15 +2832,16 @@ static int rt_fill_info(struct net *net,
                NLA_PUT_BE32(skb, RTA_MARK, rt->rt_mark);
 
        error = rt->dst.error;
-       expires = (rt->peer && rt->peer->pmtu_expires) ?
-               rt->peer->pmtu_expires - jiffies : 0;
-       if (rt->peer) {
+       if (peer) {
                inet_peer_refcheck(rt->peer);
-               id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
-               if (rt->peer->tcp_ts_stamp) {
-                       ts = rt->peer->tcp_ts;
-                       tsage = get_seconds() - rt->peer->tcp_ts_stamp;
+               id = atomic_read(&peer->ip_id_count) & 0xffff;
+               if (peer->tcp_ts_stamp) {
+                       ts = peer->tcp_ts;
+                       tsage = get_seconds() - peer->tcp_ts_stamp;
                }
+               expires = ACCESS_ONCE(peer->pmtu_expires);
+               if (expires)
+                       expires -= jiffies;
        }
 
        if (rt_is_input_route(rt)) {
index b7919f901fbf76ce511313d39e638d69a18269ad..d450a2f9fc0645b7addef3fcbf2d586e89d86da7 100644 (file)
@@ -272,6 +272,10 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 
        if (addr_len < SIN6_LEN_RFC2133)
                return -EINVAL;
+
+       if (addr->sin6_family != AF_INET6)
+               return -EINVAL;
+
        addr_type = ipv6_addr_type(&addr->sin6_addr);
        if ((addr_type & IPV6_ADDR_MULTICAST) && sock->type == SOCK_STREAM)
                return -EINVAL;
index 413ab0754e1fe4923c139e632444f2cfcc623d86..065fe405fb58486430a1f81209d9d4b6ec31abdf 100644 (file)
@@ -403,7 +403,8 @@ ipq_dev_drop(int ifindex)
 static inline void
 __ipq_rcv_skb(struct sk_buff *skb)
 {
-       int status, type, pid, flags, nlmsglen, skblen;
+       int status, type, pid, flags;
+       unsigned int nlmsglen, skblen;
        struct nlmsghdr *nlh;
 
        skblen = skb->len;
index c8af58b225620795af240156ae1e5b735fa78a2d..4111050a9fc524e0cd225aa81d5dfa4c4e166d06 100644 (file)
@@ -160,7 +160,7 @@ static unsigned int ipv6_confirm(unsigned int hooknum,
 
        /* This is where we call the helper: as the packet goes out. */
        ct = nf_ct_get(skb, &ctinfo);
-       if (!ct || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)
+       if (!ct || ctinfo == IP_CT_RELATED_REPLY)
                goto out;
 
        help = nfct_help(ct);
index 1df3c8b6bf4723668e6b8c43666b3e31f4583fd8..7c05e7eacbc6561744566168c4ff3c3f64fc828d 100644 (file)
@@ -177,7 +177,7 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
        /* Update skb to refer to this connection */
        skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general;
        skb->nfctinfo = *ctinfo;
-       return -NF_ACCEPT;
+       return NF_ACCEPT;
 }
 
 static int
index 36477538cea8eba501f000253b433d673b8d579f..f876eed7d4aad3099869acad4670e102e1bf368e 100644 (file)
@@ -87,6 +87,8 @@ static inline void iriap_start_watchdog_timer(struct iriap_cb *self,
                         iriap_watchdog_timer_expired);
 }
 
+static struct lock_class_key irias_objects_key;
+
 /*
  * Function iriap_init (void)
  *
@@ -114,6 +116,9 @@ int __init iriap_init(void)
                return -ENOMEM;
        }
 
+       lockdep_set_class_and_name(&irias_objects->hb_spinlock, &irias_objects_key,
+                                  "irias_objects");
+
        /*
         *  Register some default services for IrLMP
         */
index b8dbae82fab8612974d603c42b4cbac3b0509165..76130134bfa6215d3cb7ffce9813f95724dc7d65 100644 (file)
@@ -258,7 +258,7 @@ static int l2tp_dfs_seq_open(struct inode *inode, struct file *file)
         */
        pd->net = get_net_ns_by_pid(current->pid);
        if (IS_ERR(pd->net)) {
-               rc = -PTR_ERR(pd->net);
+               rc = PTR_ERR(pd->net);
                goto err_free_pd;
        }
 
index 421eaa6b0c2b36cbb9a1131b78d995a248debaf0..56c24cabf26d3cb6552f157320e1e782acff16cf 100644 (file)
@@ -965,6 +965,10 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
 
        mutex_lock(&sdata->u.ibss.mtx);
 
+       sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH;
+       memset(sdata->u.ibss.bssid, 0, ETH_ALEN);
+       sdata->u.ibss.ssid_len = 0;
+
        active_ibss = ieee80211_sta_active_ibss(sdata);
 
        if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) {
@@ -999,8 +1003,6 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
        kfree_skb(skb);
 
        skb_queue_purge(&sdata->skb_queue);
-       memset(sdata->u.ibss.bssid, 0, ETH_ALEN);
-       sdata->u.ibss.ssid_len = 0;
 
        del_timer_sync(&sdata->u.ibss.timer);
 
index 2025af52b195647a3e83794ae090b8ca84a7cdfa..090b0ec1e05653c246973a74de8fe4c6057f261e 100644 (file)
@@ -775,9 +775,6 @@ struct ieee80211_local {
 
        int tx_headroom; /* required headroom for hardware/radiotap */
 
-       /* count for keys needing tailroom space allocation */
-       int crypto_tx_tailroom_needed_cnt;
-
        /* Tasklet and skb queue to process calls from IRQ mode. All frames
         * added to skb_queue will be processed, but frames in
         * skb_queue_unreliable may be dropped if the total length of these
index 49d4f869e0bc74cfcd397558b2283440da761c1d..dee30aea9ab31eba7021812894c35d495e28ab71 100644 (file)
@@ -1145,6 +1145,10 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
                                + IEEE80211_ENCRYPT_HEADROOM;
        ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM;
 
+       ret = dev_alloc_name(ndev, ndev->name);
+       if (ret < 0)
+               goto fail;
+
        ieee80211_assign_perm_addr(local, ndev, type);
        memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN);
        SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy));
index 31afd712930df2a7507505a26cd24f4f4e1ee2bd..f825e2f0a57e034a839f37bfafc19e12206367b9 100644 (file)
@@ -101,11 +101,6 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
 
        if (!ret) {
                key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
-
-               if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
-                     (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)))
-                       key->local->crypto_tx_tailroom_needed_cnt--;
-
                return 0;
        }
 
@@ -161,10 +156,6 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
                          key->conf.keyidx, sta ? sta->addr : bcast_addr, ret);
 
        key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
-
-       if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
-             (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)))
-               key->local->crypto_tx_tailroom_needed_cnt++;
 }
 
 void ieee80211_key_removed(struct ieee80211_key_conf *key_conf)
@@ -403,10 +394,8 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key)
                ieee80211_aes_key_free(key->u.ccmp.tfm);
        if (key->conf.cipher == WLAN_CIPHER_SUITE_AES_CMAC)
                ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm);
-       if (key->local) {
+       if (key->local)
                ieee80211_debugfs_key_remove(key);
-               key->local->crypto_tx_tailroom_needed_cnt--;
-       }
 
        kfree(key);
 }
@@ -468,8 +457,6 @@ int ieee80211_key_link(struct ieee80211_key *key,
 
        ieee80211_debugfs_key_add(key);
 
-       key->local->crypto_tx_tailroom_needed_cnt++;
-
        ret = ieee80211_key_enable_hw_accel(key);
 
        mutex_unlock(&sdata->local->key_mtx);
@@ -511,12 +498,8 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
 
        mutex_lock(&sdata->local->key_mtx);
 
-       sdata->local->crypto_tx_tailroom_needed_cnt = 0;
-
-       list_for_each_entry(key, &sdata->key_list, list) {
-               sdata->local->crypto_tx_tailroom_needed_cnt++;
+       list_for_each_entry(key, &sdata->key_list, list)
                ieee80211_key_enable_hw_accel(key);
-       }
 
        mutex_unlock(&sdata->local->key_mtx);
 }
index 4f6b2675e41d43fd8335e35094875b41774d9ca8..d595265d6c22c4a8e618daa17111b28da495836b 100644 (file)
@@ -1089,6 +1089,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
                local->hw.conf.flags &= ~IEEE80211_CONF_PS;
                config_changed |= IEEE80211_CONF_CHANGE_PS;
        }
+       local->ps_sdata = NULL;
 
        ieee80211_hw_config(local, config_changed);
 
index 27af6723cb5e5496efc606010bf04541806da43e..58ffa7d069c791c7d2c2c681861212d806260956 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/if_arp.h>
 #include <linux/rtnetlink.h>
 #include <linux/pm_qos_params.h>
-#include <linux/slab.h>
 #include <net/sch_generic.h>
 #include <linux/slab.h>
 #include <net/mac80211.h>
index 64e0f7587e6d6f09e11a8029ef7a995e406ff60d..3104c844b544c5465ef5ba1e7efcff05ca920501 100644 (file)
@@ -1480,7 +1480,12 @@ static int ieee80211_skb_resize(struct ieee80211_local *local,
 {
        int tail_need = 0;
 
-       if (may_encrypt && local->crypto_tx_tailroom_needed_cnt) {
+       /*
+        * This could be optimised, devices that do full hardware
+        * crypto (including TKIP MMIC) need no tailroom... But we
+        * have no drivers for such devices currently.
+        */
+       if (may_encrypt) {
                tail_need = IEEE80211_ENCRYPT_TAILROOM;
                tail_need -= skb_tailroom(skb);
                tail_need = max_t(int, tail_need, 0);
index 8041befc65553588eda40e4a41e6b9e440a21367..42aa64b6b0b1b6c7ae87dbe7831776ea2d345366 100644 (file)
@@ -767,7 +767,7 @@ ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
        if (!attr[IPSET_ATTR_SETNAME]) {
                for (i = 0; i < ip_set_max; i++) {
                        if (ip_set_list[i] != NULL && ip_set_list[i]->ref) {
-                               ret = IPSET_ERR_BUSY;
+                               ret = -IPSET_ERR_BUSY;
                                goto out;
                        }
                }
index 4743e5402522fb6793c2022ade4bed020b240295..565a7c5b8818a22038f9a4f1b87c03a0cf8c31ad 100644 (file)
@@ -146,8 +146,9 @@ hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
 {
        const struct ip_set_hash *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
-       struct hash_ipportnet4_elem data =
-               { .cidr = h->nets[0].cidr || HOST_MASK };
+       struct hash_ipportnet4_elem data = {
+               .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+       };
 
        if (data.cidr == 0)
                return -EINVAL;
@@ -394,8 +395,9 @@ hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
 {
        const struct ip_set_hash *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
-       struct hash_ipportnet6_elem data =
-               { .cidr = h->nets[0].cidr || HOST_MASK };
+       struct hash_ipportnet6_elem data = {
+               .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+       };
 
        if (data.cidr == 0)
                return -EINVAL;
index c4db202b7da4bcaefc22e9c637e3a242233c5c9e..2aeeabcd5a211af76b3dd4c6c5eabda69ea97705 100644 (file)
@@ -131,7 +131,9 @@ hash_net4_kadt(struct ip_set *set, const struct sk_buff *skb,
 {
        const struct ip_set_hash *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
-       struct hash_net4_elem data = { .cidr = h->nets[0].cidr || HOST_MASK };
+       struct hash_net4_elem data = {
+               .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+       };
 
        if (data.cidr == 0)
                return -EINVAL;
@@ -296,7 +298,9 @@ hash_net6_kadt(struct ip_set *set, const struct sk_buff *skb,
 {
        const struct ip_set_hash *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
-       struct hash_net6_elem data = { .cidr = h->nets[0].cidr || HOST_MASK };
+       struct hash_net6_elem data = {
+               .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+       };
 
        if (data.cidr == 0)
                return -EINVAL;
index d2a40362dd3aadb6510ff1603a42e132eeeb5c7a..e50d9bb8820b24c7bb4bda72429eddd4ce746dfd 100644 (file)
@@ -144,7 +144,8 @@ hash_netport4_kadt(struct ip_set *set, const struct sk_buff *skb,
        const struct ip_set_hash *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netport4_elem data = {
-               .cidr = h->nets[0].cidr || HOST_MASK };
+               .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+       };
 
        if (data.cidr == 0)
                return -EINVAL;
@@ -357,7 +358,8 @@ hash_netport6_kadt(struct ip_set *set, const struct sk_buff *skb,
        const struct ip_set_hash *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netport6_elem data = {
-               .cidr = h->nets[0].cidr || HOST_MASK };
+               .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+       };
 
        if (data.cidr == 0)
                return -EINVAL;
index bfa808f4da137fbe4d112155db205633a3e042a8..55af2242bccd482b8cb762a8f3419cc922724791 100644 (file)
@@ -1772,7 +1772,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
                .owner          = THIS_MODULE,
                .pf             = PF_INET,
                .hooknum        = NF_INET_LOCAL_IN,
-               .priority       = 99,
+               .priority       = NF_IP_PRI_NAT_SRC - 2,
        },
        /* After packet filtering, forward packet through VS/DR, VS/TUN,
         * or VS/NAT(change destination), so that filtering rules can be
@@ -1782,7 +1782,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
                .owner          = THIS_MODULE,
                .pf             = PF_INET,
                .hooknum        = NF_INET_LOCAL_IN,
-               .priority       = 101,
+               .priority       = NF_IP_PRI_NAT_SRC - 1,
        },
        /* Before ip_vs_in, change source only for VS/NAT */
        {
@@ -1790,7 +1790,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
                .owner          = THIS_MODULE,
                .pf             = PF_INET,
                .hooknum        = NF_INET_LOCAL_OUT,
-               .priority       = -99,
+               .priority       = NF_IP_PRI_NAT_DST + 1,
        },
        /* After mangle, schedule and forward local requests */
        {
@@ -1798,7 +1798,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
                .owner          = THIS_MODULE,
                .pf             = PF_INET,
                .hooknum        = NF_INET_LOCAL_OUT,
-               .priority       = -98,
+               .priority       = NF_IP_PRI_NAT_DST + 2,
        },
        /* After packet filtering (but before ip_vs_out_icmp), catch icmp
         * destined for 0.0.0.0/0, which is for incoming IPVS connections */
@@ -1824,7 +1824,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
                .owner          = THIS_MODULE,
                .pf             = PF_INET6,
                .hooknum        = NF_INET_LOCAL_IN,
-               .priority       = 99,
+               .priority       = NF_IP6_PRI_NAT_SRC - 2,
        },
        /* After packet filtering, forward packet through VS/DR, VS/TUN,
         * or VS/NAT(change destination), so that filtering rules can be
@@ -1834,7 +1834,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
                .owner          = THIS_MODULE,
                .pf             = PF_INET6,
                .hooknum        = NF_INET_LOCAL_IN,
-               .priority       = 101,
+               .priority       = NF_IP6_PRI_NAT_SRC - 1,
        },
        /* Before ip_vs_in, change source only for VS/NAT */
        {
@@ -1842,7 +1842,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
                .owner          = THIS_MODULE,
                .pf             = PF_INET,
                .hooknum        = NF_INET_LOCAL_OUT,
-               .priority       = -99,
+               .priority       = NF_IP6_PRI_NAT_DST + 1,
        },
        /* After mangle, schedule and forward local requests */
        {
@@ -1850,7 +1850,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
                .owner          = THIS_MODULE,
                .pf             = PF_INET6,
                .hooknum        = NF_INET_LOCAL_OUT,
-               .priority       = -98,
+               .priority       = NF_IP6_PRI_NAT_DST + 2,
        },
        /* After packet filtering (but before ip_vs_out_icmp), catch icmp
         * destined for 0.0.0.0/0, which is for incoming IPVS connections */
index 2e1c11f784193b4b319b37f246cc8f7d6fded26b..f7af8b866017040600fb842d9441ff4fa9912fc7 100644 (file)
@@ -850,7 +850,7 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
 
        /* It exists; we have (non-exclusive) reference. */
        if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
-               *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY;
+               *ctinfo = IP_CT_ESTABLISHED_REPLY;
                /* Please set reply bit if this packet OK */
                *set_reply = 1;
        } else {
@@ -922,6 +922,9 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
                        ret = -ret;
                        goto out;
                }
+               /* ICMP[v6] protocol trackers may assign one conntrack. */
+               if (skb->nfct)
+                       goto out;
        }
 
        ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
@@ -1143,7 +1146,7 @@ static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
        /* This ICMP is in reverse direction to the packet which caused it */
        ct = nf_ct_get(skb, &ctinfo);
        if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
-               ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY;
+               ctinfo = IP_CT_RELATED_REPLY;
        else
                ctinfo = IP_CT_RELATED;
 
index e17cb7c7dd8fda61efabf12d8cefb45c4c8d4a78..6f5801eac99923eaaa66e3dd8572678df0ac8ceb 100644 (file)
@@ -368,7 +368,7 @@ static int help(struct sk_buff *skb,
 
        /* Until there's been traffic both ways, don't look in packets. */
        if (ctinfo != IP_CT_ESTABLISHED &&
-           ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) {
+           ctinfo != IP_CT_ESTABLISHED_REPLY) {
                pr_debug("ftp: Conntrackinfo = %u\n", ctinfo);
                return NF_ACCEPT;
        }
index 18b2ce5c8ced1c82f5f4e3f41af1ed1e659079f9..f03c2d4539f6b8517530c899b2bc2419e4f80f88 100644 (file)
@@ -571,10 +571,9 @@ static int h245_help(struct sk_buff *skb, unsigned int protoff,
        int ret;
 
        /* Until there's been traffic both ways, don't look in packets. */
-       if (ctinfo != IP_CT_ESTABLISHED &&
-           ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) {
+       if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
                return NF_ACCEPT;
-       }
+
        pr_debug("nf_ct_h245: skblen = %u\n", skb->len);
 
        spin_lock_bh(&nf_h323_lock);
@@ -1125,10 +1124,9 @@ static int q931_help(struct sk_buff *skb, unsigned int protoff,
        int ret;
 
        /* Until there's been traffic both ways, don't look in packets. */
-       if (ctinfo != IP_CT_ESTABLISHED &&
-           ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) {
+       if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
                return NF_ACCEPT;
-       }
+
        pr_debug("nf_ct_q931: skblen = %u\n", skb->len);
 
        spin_lock_bh(&nf_h323_lock);
index b394aa318776447d243a2c95ed0aa0c86a754f4d..4f9390b98697e55ec00cd81c9c6d327a43e908d7 100644 (file)
@@ -125,8 +125,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
                return NF_ACCEPT;
 
        /* Until there's been traffic both ways, don't look in packets. */
-       if (ctinfo != IP_CT_ESTABLISHED &&
-           ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY)
+       if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
                return NF_ACCEPT;
 
        /* Not a full tcp header? */
index 088944824e135f53c769477aa201888cd343e398..2fd4565144defa5221bef7e458253de3d7df787f 100644 (file)
@@ -519,8 +519,7 @@ conntrack_pptp_help(struct sk_buff *skb, unsigned int protoff,
        u_int16_t msg;
 
        /* don't do any tracking before tcp handshake complete */
-       if (ctinfo != IP_CT_ESTABLISHED &&
-           ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY)
+       if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
                return NF_ACCEPT;
 
        nexthdr_off = protoff;
index d9e27734b2a223adab2f73b199431df97e022b70..8501823b3f9b0f4af53b1392e465bb0215dee11d 100644 (file)
@@ -78,7 +78,7 @@ static int help(struct sk_buff *skb,
        ct_sane_info = &nfct_help(ct)->help.ct_sane_info;
        /* Until there's been traffic both ways, don't look in packets. */
        if (ctinfo != IP_CT_ESTABLISHED &&
-           ctinfo != IP_CT_ESTABLISHED+IP_CT_IS_REPLY)
+           ctinfo != IP_CT_ESTABLISHED_REPLY)
                return NF_ACCEPT;
 
        /* Not a full tcp header? */
index cb5a285817827a0ad036adba9ea08a34a6147dff..93faf6a3a6379d5f8c4c9e52fcaf91cebdfabf66 100644 (file)
@@ -1423,7 +1423,7 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
        typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust;
 
        if (ctinfo != IP_CT_ESTABLISHED &&
-           ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY)
+           ctinfo != IP_CT_ESTABLISHED_REPLY)
                return NF_ACCEPT;
 
        /* No Data ? */
index 9cc46356b5773058c0554931bc84866a14113f75..fe39f7e913dff490e948ca47b1ce6c14844013d6 100644 (file)
@@ -143,9 +143,9 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
        ct = nf_ct_get(skb, &ctinfo);
        if (ct && !nf_ct_is_untracked(ct) &&
            ((iph->protocol != IPPROTO_ICMP &&
-             ctinfo == IP_CT_IS_REPLY + IP_CT_ESTABLISHED) ||
+             ctinfo == IP_CT_ESTABLISHED_REPLY) ||
             (iph->protocol == IPPROTO_ICMP &&
-             ctinfo == IP_CT_IS_REPLY + IP_CT_RELATED)) &&
+             ctinfo == IP_CT_RELATED_REPLY)) &&
            (ct->status & IPS_SRC_NAT_DONE)) {
 
                daddr = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip;
index 925f715686a5d0e57c657ee6af40b5792c085a6d..c0c3cda19712d3a5a76307f5c85aefc98dc94957 100644 (file)
@@ -798,7 +798,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
                        getnstimeofday(&ts);
                h.h2->tp_sec = ts.tv_sec;
                h.h2->tp_nsec = ts.tv_nsec;
-               h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
+               if (vlan_tx_tag_present(skb)) {
+                       h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
+                       status |= TP_STATUS_VLAN_VALID;
+               } else {
+                       h.h2->tp_vlan_tci = 0;
+               }
+               h.h2->tp_padding = 0;
                hdrlen = sizeof(*h.h2);
                break;
        default:
@@ -1725,8 +1731,13 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
                aux.tp_snaplen = skb->len;
                aux.tp_mac = 0;
                aux.tp_net = skb_network_offset(skb);
-               aux.tp_vlan_tci = vlan_tx_tag_get(skb);
-
+               if (vlan_tx_tag_present(skb)) {
+                       aux.tp_vlan_tci = vlan_tx_tag_get(skb);
+                       aux.tp_status |= TP_STATUS_VLAN_VALID;
+               } else {
+                       aux.tp_vlan_tci = 0;
+               }
+               aux.tp_padding = 0;
                put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
        }
 
index b1721d71c27c909a8c852fd425e0b74f7c37cfbf..b4c680900d7a8c9b417d1c976ef838f9eb5eb960 100644 (file)
@@ -251,9 +251,8 @@ static void dev_watchdog(unsigned long arg)
                        }
 
                        if (some_queue_timedout) {
-                               char drivername[64];
                                WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
-                                      dev->name, netdev_drivername(dev, drivername, 64), i);
+                                      dev->name, netdev_drivername(dev), i);
                                dev->netdev_ops->ndo_tx_timeout(dev);
                        }
                        if (!mod_timer(&dev->watchdog_timer,
index 525f97c467e97e3e04a3d279431fc0bf08ff0d2f..4a62888f2e43f43a7037f556257520a462d5ece2 100644 (file)
@@ -444,15 +444,7 @@ void sctp_association_free(struct sctp_association *asoc)
 
        asoc->peer.transport_count = 0;
 
-       /* Free any cached ASCONF_ACK chunk. */
-       sctp_assoc_free_asconf_acks(asoc);
-
-       /* Free the ASCONF queue. */
-       sctp_assoc_free_asconf_queue(asoc);
-
-       /* Free any cached ASCONF chunk. */
-       if (asoc->addip_last_asconf)
-               sctp_chunk_free(asoc->addip_last_asconf);
+       sctp_asconf_queue_teardown(asoc);
 
        /* AUTH - Free the endpoint shared keys */
        sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
@@ -1646,3 +1638,16 @@ struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
 
        return NULL;
 }
+
+void sctp_asconf_queue_teardown(struct sctp_association *asoc)
+{
+       /* Free any cached ASCONF_ACK chunk. */
+       sctp_assoc_free_asconf_acks(asoc);
+
+       /* Free the ASCONF queue. */
+       sctp_assoc_free_asconf_queue(asoc);
+
+       /* Free any cached ASCONF chunk. */
+       if (asoc->addip_last_asconf)
+               sctp_chunk_free(asoc->addip_last_asconf);
+}
index d612ca1ca6c08fb979bbfad4f6252e27be729be6..534c2e5feb054c933cbd0dcf46a0cb66386646dd 100644 (file)
@@ -1670,6 +1670,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
                case SCTP_CMD_SEND_NEXT_ASCONF:
                        sctp_cmd_send_asconf(asoc);
                        break;
+               case SCTP_CMD_PURGE_ASCONF_QUEUE:
+                       sctp_asconf_queue_teardown(asoc);
+                       break;
                default:
                        pr_warn("Impossible command: %u, %p\n",
                                cmd->verb, cmd->obj.ptr);
index 7f4a4f8368ee0df6b6ccab07ada25df655b7b521..a297283154d5035c6342e54916f10445aa610d83 100644 (file)
@@ -1718,11 +1718,21 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(const struct sctp_endpoint *ep,
                return SCTP_DISPOSITION_CONSUME;
        }
 
-       /* For now, fail any unsent/unacked data.  Consider the optional
-        * choice of resending of this data.
+       /* For now, stop pending T3-rtx and SACK timers, fail any unsent/unacked
+        * data. Consider the optional choice of resending of this data.
         */
+       sctp_add_cmd_sf(commands, SCTP_CMD_T3_RTX_TIMERS_STOP, SCTP_NULL());
+       sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+                       SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
        sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_OUTQUEUE, SCTP_NULL());
 
+       /* Stop pending T4-rto timer, teardown ASCONF queue, ASCONF-ACK queue
+        * and ASCONF-ACK cache.
+        */
+       sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+                       SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
+       sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_ASCONF_QUEUE, SCTP_NULL());
+
        repl = sctp_make_cookie_ack(new_asoc, chunk);
        if (!repl)
                goto nomem;
index ec83f413a7ed19d41b3a37a0a7e0b1d7b1ce9988..98fa8eb6cc4bec27e6adbf737c3653392da32adc 100644 (file)
@@ -3406,12 +3406,12 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
        i = 0;
        if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
                nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) {
-                       if (request->ssids[i].ssid_len > IEEE80211_MAX_SSID_LEN) {
+                       if (nla_len(attr) > IEEE80211_MAX_SSID_LEN) {
                                err = -EINVAL;
                                goto out_free;
                        }
-                       memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr));
                        request->ssids[i].ssid_len = nla_len(attr);
+                       memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr));
                        i++;
                }
        }
@@ -3572,14 +3572,13 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
        if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
                nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS],
                                    tmp) {
-                       if (request->ssids[i].ssid_len >
-                           IEEE80211_MAX_SSID_LEN) {
+                       if (nla_len(attr) > IEEE80211_MAX_SSID_LEN) {
                                err = -EINVAL;
                                goto out_free;
                        }
+                       request->ssids[i].ssid_len = nla_len(attr);
                        memcpy(request->ssids[i].ssid, nla_data(attr),
                               nla_len(attr));
-                       request->ssids[i].ssid_len = nla_len(attr);
                        i++;
                }
        }
index 73a441d237b59ea89eaef14598299877459a3f61..7a6c67667d708e97400a6974175323f7cfd712e6 100644 (file)
@@ -267,13 +267,35 @@ static bool is_bss(struct cfg80211_bss *a,
        return memcmp(ssidie + 2, ssid, ssid_len) == 0;
 }
 
+static bool is_mesh_bss(struct cfg80211_bss *a)
+{
+       const u8 *ie;
+
+       if (!WLAN_CAPABILITY_IS_STA_BSS(a->capability))
+               return false;
+
+       ie = cfg80211_find_ie(WLAN_EID_MESH_ID,
+                             a->information_elements,
+                             a->len_information_elements);
+       if (!ie)
+               return false;
+
+       ie = cfg80211_find_ie(WLAN_EID_MESH_CONFIG,
+                             a->information_elements,
+                             a->len_information_elements);
+       if (!ie)
+               return false;
+
+       return true;
+}
+
 static bool is_mesh(struct cfg80211_bss *a,
                    const u8 *meshid, size_t meshidlen,
                    const u8 *meshcfg)
 {
        const u8 *ie;
 
-       if (!WLAN_CAPABILITY_IS_MBSS(a->capability))
+       if (!WLAN_CAPABILITY_IS_STA_BSS(a->capability))
                return false;
 
        ie = cfg80211_find_ie(WLAN_EID_MESH_ID,
@@ -311,7 +333,7 @@ static int cmp_bss(struct cfg80211_bss *a,
        if (a->channel != b->channel)
                return b->channel->center_freq - a->channel->center_freq;
 
-       if (WLAN_CAPABILITY_IS_MBSS(a->capability | b->capability)) {
+       if (is_mesh_bss(a) && is_mesh_bss(b)) {
                r = cmp_ies(WLAN_EID_MESH_ID,
                            a->information_elements,
                            a->len_information_elements,
@@ -457,7 +479,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
                    struct cfg80211_internal_bss *res)
 {
        struct cfg80211_internal_bss *found = NULL;
-       const u8 *meshid, *meshcfg;
 
        /*
         * The reference to "res" is donated to this function.
@@ -470,22 +491,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
 
        res->ts = jiffies;
 
-       if (WLAN_CAPABILITY_IS_MBSS(res->pub.capability)) {
-               /* must be mesh, verify */
-               meshid = cfg80211_find_ie(WLAN_EID_MESH_ID,
-                                         res->pub.information_elements,
-                                         res->pub.len_information_elements);
-               meshcfg = cfg80211_find_ie(WLAN_EID_MESH_CONFIG,
-                                          res->pub.information_elements,
-                                          res->pub.len_information_elements);
-               if (!meshid || !meshcfg ||
-                   meshcfg[1] != sizeof(struct ieee80211_meshconf_ie)) {
-                       /* bogus mesh */
-                       kref_put(&res->ref, bss_release);
-                       return NULL;
-               }
-       }
-
        spin_lock_bh(&dev->bss_lock);
 
        found = rb_find_bss(dev, res);
index 47f1b8638df9987dca1cf7ad2bbce5c674044881..b11ea692bd7d0870c8586e115bcfc6b6a69c2997 100644 (file)
@@ -265,7 +265,7 @@ static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq)
                        bitnr = bitnr & 0x1F;
                        replay_esn->bmp[nr] |= (1U << bitnr);
                } else {
-                       nr = replay_esn->replay_window >> 5;
+                       nr = (replay_esn->replay_window - 1) >> 5;
                        for (i = 0; i <= nr; i++)
                                replay_esn->bmp[i] = 0;
 
@@ -471,7 +471,7 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
                        bitnr = bitnr & 0x1F;
                        replay_esn->bmp[nr] |= (1U << bitnr);
                } else {
-                       nr = replay_esn->replay_window >> 5;
+                       nr = (replay_esn->replay_window - 1) >> 5;
                        for (i = 0; i <= nr; i++)
                                replay_esn->bmp[i] = 0;
 
index 490122c3e2aaf4e3e7ef8580cfbe9f2033c86644..40caf3c26cd5abdb3b22493715ebba38aa691a8a 100644 (file)
@@ -17,6 +17,7 @@ quiet_cmd_wrap = WRAP    $@
 cmd_wrap = echo "\#include <asm-generic/$*.h>" >$@
 
 all: $(patsubst %, $(obj)/%, $(generic-y))
+       @:
 
 $(obj)/%.h:
        $(call cmd,wrap)
index 8657f99bfb2b7d171df283d82092cba96731fcbd..b0aa2c680593d0c09857b9447d54ef933861ed3b 100755 (executable)
@@ -1943,6 +1943,11 @@ sub process {
                        WARN("LINUX_VERSION_CODE should be avoided, code should be for the version to which it is merged\n" . $herecurr);
                }
 
+# check for uses of printk_ratelimit
+               if ($line =~ /\bprintk_ratelimit\s*\(/) {
+                       WARN("Prefer printk_ratelimited or pr_<level>_ratelimited to printk_ratelimit\n" . $herecurr);
+               }
+
 # printk should use KERN_* levels.  Note that follow on printk's on the
 # same line do not need a level, so we use the current block context
 # to try and find and validate the current printk.  In summary the current
diff --git a/scripts/depmod.sh b/scripts/depmod.sh
new file mode 100755 (executable)
index 0000000..3b029cb
--- /dev/null
@@ -0,0 +1,48 @@
+#!/bin/sh
+#
+# A depmod wrapper used by the toplevel Makefile
+
+if test $# -ne 2; then
+       echo "Usage: $0 /sbin/depmod <kernelrelease>" >&2
+       exit 1
+fi
+DEPMOD=$1
+KERNELRELEASE=$2
+
+if ! "$DEPMOD" -V 2>/dev/null | grep -q module-init-tools; then
+       echo "Warning: you may need to install module-init-tools" >&2
+       echo "See http://www.codemonkey.org.uk/docs/post-halloween-2.6.txt" >&2
+       sleep 1
+fi
+
+if ! test -r System.map -a -x "$DEPMOD"; then
+       exit 0
+fi
+# older versions of depmod require the version string to start with three
+# numbers, so we cheat with a symlink here
+depmod_hack_needed=true
+mkdir -p .tmp_depmod/lib/modules/$KERNELRELEASE
+if "$DEPMOD" -b .tmp_depmod $KERNELRELEASE 2>/dev/null; then
+       if test -e .tmp_depmod/lib/modules/$KERNELRELEASE/modules.dep -o \
+               -e .tmp_depmod/lib/modules/$KERNELRELEASE/modules.dep.bin; then
+               depmod_hack_needed=false
+       fi
+fi
+if $depmod_hack_needed; then
+       symlink="$INSTALL_MOD_PATH/lib/modules/99.98.$KERNELRELEASE"
+       ln -s "$KERNELRELEASE" "$symlink"
+       KERNELRELEASE=99.98.$KERNELRELEASE
+fi
+
+set -- -ae -F System.map
+if test -n "$INSTALL_MOD_PATH"; then
+       set -- "$@" -b "$INSTALL_MOD_PATH"
+fi
+"$DEPMOD" "$@" "$KERNELRELEASE"
+ret=$?
+
+if $depmod_hack_needed; then
+       rm -f "$symlink"
+fi
+
+exit $ret
index ec1bcecf2cdacd6009c79d6a809d4b7b8443f29d..3d2fd141dff76f078d171e2abf28fd4e97b5378c 100644 (file)
@@ -612,7 +612,7 @@ static int apparmor_setprocattr(struct task_struct *task, char *name,
 static int apparmor_task_setrlimit(struct task_struct *task,
                unsigned int resource, struct rlimit *new_rlim)
 {
-       struct aa_profile *profile = aa_current_profile();
+       struct aa_profile *profile = __aa_current_profile();
        int error = 0;
 
        if (!unconfined(profile))
index d31862e0aa1c00f415d447e12e780482e2bdbca5..8e319a416eec261c2b8d15ab0d9ee7d70afa2aa5 100644 (file)
@@ -71,9 +71,8 @@ EXPORT_SYMBOL(complete_request_key);
  * This is called in context of freshly forked kthread before kernel_execve(),
  * so we can simply install the desired session_keyring at this point.
  */
-static int umh_keys_init(struct subprocess_info *info)
+static int umh_keys_init(struct subprocess_info *info, struct cred *cred)
 {
-       struct cred *cred = (struct cred*)current_cred();
        struct key *keyring = info->data;
 
        return install_session_keyring_to_cred(cred, keyring);
index a0d38459d650af32fe844ca18d4b5ff5ad03be98..20219ef5439ac69d63b61445a97403524e51c69b 100644 (file)
@@ -1476,7 +1476,6 @@ static int inode_has_perm(const struct cred *cred,
                          unsigned flags)
 {
        struct inode_security_struct *isec;
-       struct common_audit_data ad;
        u32 sid;
 
        validate_creds(cred);
@@ -1487,15 +1486,21 @@ static int inode_has_perm(const struct cred *cred,
        sid = cred_sid(cred);
        isec = inode->i_security;
 
-       if (!adp) {
-               adp = &ad;
-               COMMON_AUDIT_DATA_INIT(&ad, INODE);
-               ad.u.inode = inode;
-       }
-
        return avc_has_perm_flags(sid, isec->sid, isec->sclass, perms, adp, flags);
 }
 
+static int inode_has_perm_noadp(const struct cred *cred,
+                               struct inode *inode,
+                               u32 perms,
+                               unsigned flags)
+{
+       struct common_audit_data ad;
+
+       COMMON_AUDIT_DATA_INIT(&ad, INODE);
+       ad.u.inode = inode;
+       return inode_has_perm(cred, inode, perms, &ad, flags);
+}
+
 /* Same as inode_has_perm, but pass explicit audit data containing
    the dentry to help the auditing code to more easily generate the
    pathname if needed. */
@@ -2122,8 +2127,8 @@ static inline void flush_unauthorized_files(const struct cred *cred,
                                                struct tty_file_private, list);
                        file = file_priv->file;
                        inode = file->f_path.dentry->d_inode;
-                       if (inode_has_perm(cred, inode,
-                                          FILE__READ | FILE__WRITE, NULL, 0)) {
+                       if (inode_has_perm_noadp(cred, inode,
+                                          FILE__READ | FILE__WRITE, 0)) {
                                drop_tty = 1;
                        }
                }
@@ -3228,7 +3233,7 @@ static int selinux_dentry_open(struct file *file, const struct cred *cred)
         * new inode label or new policy.
         * This check is not redundant - do not remove.
         */
-       return inode_has_perm(cred, inode, open_file_to_av(file), NULL, 0);
+       return inode_has_perm_noadp(cred, inode, open_file_to_av(file), 0);
 }
 
 /* task security operations */
index 77d44138864fd5862a180c5907a81c2a52f1b6ab..35459340019e44399775c2f96aa9b5871f8d566c 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/audit.h>
 #include <linux/uaccess.h>
 #include <linux/kobject.h>
+#include <linux/ctype.h>
 
 /* selinuxfs pseudo filesystem for exporting the security policy API.
    Based on the proc code and the fs/nfsd/nfsctl.c code. */
@@ -751,6 +752,14 @@ out:
        return length;
 }
 
+static inline int hexcode_to_int(int code) {
+       if (code == '\0' || !isxdigit(code))
+               return -1;
+       if (isdigit(code))
+               return code - '0';
+       return tolower(code) - 'a' + 10;
+}
+
 static ssize_t sel_write_create(struct file *file, char *buf, size_t size)
 {
        char *scon = NULL, *tcon = NULL;
@@ -785,8 +794,34 @@ static ssize_t sel_write_create(struct file *file, char *buf, size_t size)
        nargs = sscanf(buf, "%s %s %hu %s", scon, tcon, &tclass, namebuf);
        if (nargs < 3 || nargs > 4)
                goto out;
-       if (nargs == 4)
+       if (nargs == 4) {
+               /*
+                * If and when the name of new object to be queried contains
+                * either whitespace or multibyte characters, they shall be
+                * encoded based on the percentage-encoding rule.
+                * If not encoded, the sscanf logic picks up only left-half
+                * of the supplied name; splitted by a whitespace unexpectedly.
+                */
+               char   *r, *w;
+               int     c1, c2;
+
+               r = w = namebuf;
+               do {
+                       c1 = *r++;
+                       if (c1 == '+')
+                               c1 = ' ';
+                       else if (c1 == '%') {
+                               if ((c1 = hexcode_to_int(*r++)) < 0)
+                                       goto out;
+                               if ((c2 = hexcode_to_int(*r++)) < 0)
+                                       goto out;
+                               c1 = (c1 << 4) | c2;
+                       }
+                       *w++ = c1;
+               } while (c1 != '\0');
+
                objname = namebuf;
+       }
 
        length = security_context_to_sid(scon, strlen(scon) + 1, &ssid);
        if (length)
index 102e9ec1b77a327477592b05a8e60933d348d7c1..d246aca3f4fbd43563bf01d20d78e28feb826ad8 100644 (file)
@@ -3222,6 +3222,9 @@ static int filename_trans_write(struct policydb *p, void *fp)
        __le32 buf[1];
        int rc;
 
+       if (p->policyvers < POLICYDB_VERSION_FILENAME_TRANS)
+               return 0;
+
        nel = 0;
        rc = hashtab_map(p->filename_trans, hashtab_cnt, &nel);
        if (rc)
index 162a864dba24f51c55156d0733fcae3a88df2da6..9fc2e15841c96f54edbefb24a96c40405d06bedd 100644 (file)
@@ -138,7 +138,7 @@ static int tomoyo_mount_acl(struct tomoyo_request_info *r, char *dev_name,
        }
        if (need_dev) {
                /* Get mount point or device file. */
-               if (kern_path(dev_name, LOOKUP_FOLLOW, &path)) {
+               if (!dev_name || kern_path(dev_name, LOOKUP_FOLLOW, &path)) {
                        error = -ENOENT;
                        goto out;
                }
index 2c41825c836ec028c4725c2e038eb3937723c543..eb9fe2e1d291850f62a0db393535a3f394d4e9a0 100644 (file)
@@ -58,26 +58,6 @@ static const char *sanity_file_name(const char *path)
        else
                return path;
 }
-
-/* print file and line with a certain printk prefix */
-static int print_snd_pfx(unsigned int level, const char *path, int line,
-                        const char *format)
-{
-       const char *file = sanity_file_name(path);
-       char tmp[] = "<0>";
-       const char *pfx = level ? KERN_DEBUG : KERN_DEFAULT;
-       int ret = 0;
-
-       if (format[0] == '<' && format[2] == '>') {
-               tmp[1] = format[1];
-               pfx = tmp;
-               ret = 1;
-       }
-       printk("%sALSA %s:%d: ", pfx, file, line);
-       return ret;
-}
-#else
-#define print_snd_pfx(level, path, line, format)       0
 #endif
 
 #if defined(CONFIG_SND_DEBUG) || defined(CONFIG_SND_VERBOSE_PRINTK)
@@ -85,15 +65,29 @@ void __snd_printk(unsigned int level, const char *path, int line,
                  const char *format, ...)
 {
        va_list args;
-       
+#ifdef CONFIG_SND_VERBOSE_PRINTK
+       struct va_format vaf;
+       char verbose_fmt[] = KERN_DEFAULT "ALSA %s:%d %pV";
+#endif
+
 #ifdef CONFIG_SND_DEBUG        
        if (debug < level)
                return;
 #endif
+
        va_start(args, format);
-       if (print_snd_pfx(level, path, line, format))
-               format += 3; /* skip the printk level-prefix */
+#ifdef CONFIG_SND_VERBOSE_PRINTK
+       vaf.fmt = format;
+       vaf.va = &args;
+       if (format[0] == '<' && format[2] == '>') {
+               memcpy(verbose_fmt, format, 3);
+               vaf.fmt = format + 3;
+       } else if (level)
+               memcpy(verbose_fmt, KERN_DEBUG, 3);
+       printk(verbose_fmt, sanity_file_name(path), line, &vaf);
+#else
        vprintk(format, args);
+#endif
        va_end(args);
 }
 EXPORT_SYMBOL_GPL(__snd_printk);
index 86ee16ca365e6fc612a4ad25baf6ecbf9fbc6893..440030818db70c88c582d0047790baed074297c9 100644 (file)
@@ -209,6 +209,7 @@ static void isight_packet(struct fw_iso_context *context, u32 cycle,
                isight->packet_index = -1;
                return;
        }
+       fw_iso_context_queue_flush(isight->context);
 
        if (++index >= QUEUE_LENGTH)
                index = 0;
index fb311d8c05bff2365776260b089f878c7cb02ac6..5c6ea113d219cc2566a75d05d0d1e9b7f8272f2e 100644 (file)
@@ -60,7 +60,7 @@ struct code_header {
            HPI_VER_MINOR(HPI_VER) * 100 + HPI_VER_RELEASE(HPI_VER)))
 
 /***********************************************************************/
-#include "linux/pci.h"
+#include <linux/pci.h>
 /*-------------------------------------------------------------------*/
 short hpi_dsp_code_open(u32 adapter, struct dsp_code *ps_dsp_code,
        u32 *pos_error_code)
index 5e619a84da061295fa4758e313935e89aea8009d..15f0161ce4a2342f9eb9b5eca86392a9481eacc2 100644 (file)
@@ -1440,6 +1440,14 @@ static struct snd_emu_chip_details emu_chip_details[] = {
         .ca0102_chip = 1,
         .spk71 = 1,
         .emu_model = EMU_MODEL_EMU0404}, /* EMU 0404 */
+       /* EMU0404 PCIe */
+       {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x40051102,
+        .driver = "Audigy2", .name = "E-mu 0404 PCIe [MAEM8984]",
+        .id = "EMU0404",
+        .emu10k2_chip = 1,
+        .ca0108_chip = 1,
+        .spk71 = 1,
+        .emu_model = EMU_MODEL_EMU0404}, /* EMU 0404 PCIe ver_03 */
        /* Note that all E-mu cards require kernel 2.6 or newer. */
        {.vendor = 0x1102, .device = 0x0008,
         .driver = "Audigy2", .name = "SB Audigy 2 Value [Unknown]",
index eacd4901a308d8b0b3c6a269ccd8c38bbf4f4f67..a7ec7030cf87e9773a1ce1f58ee318ace075498a 100644 (file)
@@ -1234,9 +1234,12 @@ static int __devinit snd_fm801_create(struct snd_card *card,
        sprintf(chip->tea.bus_info, "PCI:%s", pci_name(pci));
        if ((tea575x_tuner & TUNER_TYPE_MASK) > 0 &&
            (tea575x_tuner & TUNER_TYPE_MASK) < 4) {
-               if (snd_tea575x_init(&chip->tea))
+               if (snd_tea575x_init(&chip->tea)) {
                        snd_printk(KERN_ERR "TEA575x radio not found\n");
-       } else if ((tea575x_tuner & TUNER_TYPE_MASK) == 0)
+                       snd_fm801_free(chip);
+                       return -ENODEV;
+               }
+       } else if ((tea575x_tuner & TUNER_TYPE_MASK) == 0) {
                /* autodetect tuner connection */
                for (tea575x_tuner = 1; tea575x_tuner <= 3; tea575x_tuner++) {
                        chip->tea575x_tuner = tea575x_tuner;
@@ -1246,6 +1249,12 @@ static int __devinit snd_fm801_create(struct snd_card *card,
                                break;
                        }
                }
+               if (tea575x_tuner == 4) {
+                       snd_printk(KERN_ERR "TEA575x radio not found\n");
+                       snd_fm801_free(chip);
+                       return -ENODEV;
+               }
+       }
        strlcpy(chip->tea.card, snd_fm801_tea575x_gpios[(tea575x_tuner & TUNER_TYPE_MASK) - 1].name, sizeof(chip->tea.card));
 #endif
 
index f1de1bac042c260e5612a2f55f9ff13e79a30406..55f0647458c70aebb79028d3131e6f520d63eb19 100644 (file)
@@ -50,7 +50,12 @@ int snd_hda_enable_beep_device(struct hda_codec *codec, int enable);
 int snd_hda_attach_beep_device(struct hda_codec *codec, int nid);
 void snd_hda_detach_beep_device(struct hda_codec *codec);
 #else
-#define snd_hda_attach_beep_device(...)                0
-#define snd_hda_detach_beep_device(...)
+static inline int snd_hda_attach_beep_device(struct hda_codec *codec, int nid)
+{
+       return 0;
+}
+static inline void snd_hda_detach_beep_device(struct hda_codec *codec)
+{
+}
 #endif
 #endif
index 696ac2590307d7d46d8733a08cdef40cead16de7..d694e9d4921d85eef34f6588386c0235c9913e10 100644 (file)
@@ -506,9 +506,11 @@ static void ad198x_power_eapd_write(struct hda_codec *codec, hda_nid_t front,
                                hda_nid_t hp)
 {
        struct ad198x_spec *spec = codec->spec;
-       snd_hda_codec_write(codec, front, 0, AC_VERB_SET_EAPD_BTLENABLE,
+       if (snd_hda_query_pin_caps(codec, front) & AC_PINCAP_EAPD)
+               snd_hda_codec_write(codec, front, 0, AC_VERB_SET_EAPD_BTLENABLE,
                            !spec->inv_eapd ? 0x00 : 0x02);
-       snd_hda_codec_write(codec, hp, 0, AC_VERB_SET_EAPD_BTLENABLE,
+       if (snd_hda_query_pin_caps(codec, hp) & AC_PINCAP_EAPD)
+               snd_hda_codec_write(codec, hp, 0, AC_VERB_SET_EAPD_BTLENABLE,
                            !spec->inv_eapd ? 0x00 : 0x02);
 }
 
@@ -524,6 +526,10 @@ static void ad198x_power_eapd(struct hda_codec *codec)
        case 0x11d4184a:
        case 0x11d4194a:
        case 0x11d4194b:
+       case 0x11d41988:
+       case 0x11d4198b:
+       case 0x11d4989a:
+       case 0x11d4989b:
                ad198x_power_eapd_write(codec, 0x12, 0x11);
                break;
        case 0x11d41981:
@@ -533,12 +539,6 @@ static void ad198x_power_eapd(struct hda_codec *codec)
        case 0x11d41986:
                ad198x_power_eapd_write(codec, 0x1b, 0x1a);
                break;
-       case 0x11d41988:
-       case 0x11d4198b:
-       case 0x11d4989a:
-       case 0x11d4989b:
-               ad198x_power_eapd_write(codec, 0x29, 0x22);
-               break;
        }
 }
 
index 3e6b9a8539c2ff4bb900b62bddaa2f5d4b008a2d..694b9daf691f74208b88a069623586af1ef92c0b 100644 (file)
@@ -3102,6 +3102,7 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS),
        SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G565", CXT5066_AUTO),
        SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo", CXT5066_IDEAPAD), /* Fallback for Lenovos without dock mic */
+       SND_PCI_QUIRK(0x1b0a, 0x2092, "CyberpowerPC Gamer Xplorer N57001", CXT5066_AUTO),
        {}
 };
 
index 7a4e10002f56dcb5b2bf299c81bcc72b57a77cd2..61a774b3d3cb5e5ad14846815cb58304335835ba 100644 (file)
@@ -1141,6 +1141,13 @@ static void update_speakers(struct hda_codec *codec)
        struct alc_spec *spec = codec->spec;
        int on;
 
+       /* Control HP pins/amps depending on master_mute state;
+        * in general, HP pins/amps control should be enabled in all cases,
+        * but currently set only for master_mute, just to be safe
+        */
+       do_automute(codec, ARRAY_SIZE(spec->autocfg.hp_pins),
+                   spec->autocfg.hp_pins, spec->master_mute, true);
+
        if (!spec->automute)
                on = 0;
        else
@@ -6201,11 +6208,6 @@ static const struct snd_kcontrol_new alc260_input_mixer[] = {
 /* update HP, line and mono out pins according to the master switch */
 static void alc260_hp_master_update(struct hda_codec *codec)
 {
-       struct alc_spec *spec = codec->spec;
-
-       /* change HP pins */
-       do_automute(codec, ARRAY_SIZE(spec->autocfg.hp_pins),
-                   spec->autocfg.hp_pins, spec->master_mute, true);
        update_speakers(codec);
 }
 
@@ -11924,7 +11926,7 @@ static const struct hda_verb alc262_nec_verbs[] = {
  *  0x1b = port replicator headphone out
  */
 
-#define ALC_HP_EVENT   0x37
+#define ALC_HP_EVENT   ALC880_HP_EVENT
 
 static const struct hda_verb alc262_fujitsu_unsol_verbs[] = {
        {0x14, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | ALC_HP_EVENT},
@@ -13314,9 +13316,8 @@ static void alc268_acer_lc_setup(struct hda_codec *codec)
        struct alc_spec *spec = codec->spec;
        spec->autocfg.hp_pins[0] = 0x15;
        spec->autocfg.speaker_pins[0] = 0x14;
-       spec->automute_mixer_nid[0] = 0x0f;
        spec->automute = 1;
-       spec->automute_mode = ALC_AUTOMUTE_MIXER;
+       spec->automute_mode = ALC_AUTOMUTE_AMP;
        spec->ext_mic.pin = 0x18;
        spec->ext_mic.mux_idx = 0;
        spec->int_mic.pin = 0x12;
@@ -13860,6 +13861,7 @@ static const struct snd_pci_quirk alc268_cfg_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x015b, "Acer Aspire One",
                                                ALC268_ACER_ASPIRE_ONE),
        SND_PCI_QUIRK(0x1028, 0x0253, "Dell OEM", ALC268_DELL),
+       SND_PCI_QUIRK(0x1028, 0x02b0, "Dell Inspiron 910", ALC268_AUTO),
        SND_PCI_QUIRK_MASK(0x1028, 0xfff0, 0x02b0,
                        "Dell Inspiron Mini9/Vostro A90", ALC268_DELL),
        /* almost compatible with toshiba but with optional digital outs;
index 605c99e1e520de5d205a76046764d7e5f0f5a852..c952582fb21810933fc9891b687f516d84cf881e 100644 (file)
@@ -832,10 +832,13 @@ static int via_hp_build(struct hda_codec *codec)
        knew->subdevice = HDA_SUBDEV_NID_FLAG | nid;
        knew->private_value = nid;
 
-       knew = via_clone_control(spec, &via_hp_mixer[1]);
-       if (knew == NULL)
-               return -ENOMEM;
-       knew->subdevice = side_mute_channel(spec);
+       nid = side_mute_channel(spec);
+       if (nid) {
+               knew = via_clone_control(spec, &via_hp_mixer[1]);
+               if (knew == NULL)
+                       return -ENOMEM;
+               knew->subdevice = nid;
+       }
 
        return 0;
 }
index 34b24286d279d542e3d24e80a3531e8cf95bc8f5..2692e5ae5f2daa53822242c864a4f1b5ddf6a8ff 100644 (file)
@@ -445,7 +445,7 @@ static void lola_reset_setups(struct lola *chip)
        lola_setup_all_analog_gains(chip, PLAY, false); /* output, update */
 }
 
-static int lola_parse_tree(struct lola *chip)
+static int __devinit lola_parse_tree(struct lola *chip)
 {
        unsigned int val;
        int nid, err;
index 949691a876d3635798c6000da1b651d13684b1a6..3f08afc0f0d382b5b509c924abc2eb8532f4c822 100644 (file)
@@ -521,6 +521,7 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
 #define HDSPM_DMA_AREA_KILOBYTES (HDSPM_DMA_AREA_BYTES/1024)
 
 /* revisions >= 230 indicate AES32 card */
+#define HDSPM_MADI_OLD_REV     207
 #define HDSPM_MADI_REV         210
 #define HDSPM_RAYDAT_REV       211
 #define HDSPM_AIO_REV          212
@@ -1143,7 +1144,7 @@ static int hdspm_external_sample_rate(struct hdspm *hdspm)
 
                /* if wordclock has synced freq and wordclock is valid */
                if ((status2 & HDSPM_wcLock) != 0 &&
-                               (status & HDSPM_SelSyncRef0) == 0) {
+                               (status2 & HDSPM_SelSyncRef0) == 0) {
 
                        rate_bits = status2 & HDSPM_wcFreqMask;
 
@@ -1639,12 +1640,14 @@ static int snd_hdspm_midi_input_read (struct hdspm_midi *hmidi)
                }
        }
        hmidi->pending = 0;
+       spin_unlock_irqrestore(&hmidi->lock, flags);
 
+       spin_lock_irqsave(&hmidi->hdspm->lock, flags);
        hmidi->hdspm->control_register |= hmidi->ie;
        hdspm_write(hmidi->hdspm, HDSPM_controlRegister,
                    hmidi->hdspm->control_register);
+       spin_unlock_irqrestore(&hmidi->hdspm->lock, flags);
 
-       spin_unlock_irqrestore (&hmidi->lock, flags);
        return snd_hdspm_midi_output_write (hmidi);
 }
 
@@ -6377,6 +6380,7 @@ static int __devinit snd_hdspm_create(struct snd_card *card,
 
        switch (hdspm->firmware_rev) {
        case HDSPM_MADI_REV:
+       case HDSPM_MADI_OLD_REV:
                hdspm->io_type = MADI;
                hdspm->card_name = "RME MADI";
                hdspm->midiPorts = 3;
index 7fbfa051f6e1fb013b8a739cfc2ebe074e0c586b..eda955b158343c527548ea77bff97edc1e530425 100644 (file)
@@ -848,9 +848,10 @@ int atmel_ssc_set_audio(int ssc_id)
        if (IS_ERR(ssc))
                pr_warn("Unable to parent ASoC SSC DAI on SSC: %ld\n",
                        PTR_ERR(ssc));
-       else
+       else {
                ssc_pdev->dev.parent = &(ssc->pdev->dev);
-       ssc_free(ssc);
+               ssc_free(ssc);
+       }
 
        ret = platform_device_add(ssc_pdev);
        if (ret < 0)
index ea4951cf5526d5871af7441430d583d81fd3749c..f79d1655e035d845fc0d8118366df2cd2f397989 100644 (file)
@@ -75,7 +75,7 @@ static struct snd_soc_dai_link bf5xx_ad1836_dai[] = {
                .cpu_dai_name = "bfin-tdm.0",
                .codec_dai_name = "ad1836-hifi",
                .platform_name = "bfin-tdm-pcm-audio",
-               .codec_name = "ad1836.0",
+               .codec_name = "spi0.4",
                .ops = &bf5xx_ad1836_ops,
        },
        {
@@ -84,7 +84,7 @@ static struct snd_soc_dai_link bf5xx_ad1836_dai[] = {
                .cpu_dai_name = "bfin-tdm.1",
                .codec_dai_name = "ad1836-hifi",
                .platform_name = "bfin-tdm-pcm-audio",
-               .codec_name = "ad1836.0",
+               .codec_name = "spi0.4",
                .ops = &bf5xx_ad1836_ops,
        },
 };
index ab63d52e36e147d9759f81afa0fdf9fbb21741b9..754c496412bdd0ec1440b4c2b9175251d6158670 100644 (file)
@@ -145,22 +145,22 @@ static int ad1836_hw_params(struct snd_pcm_substream *substream,
        /* bit size */
        switch (params_format(params)) {
        case SNDRV_PCM_FORMAT_S16_LE:
-               word_len = 3;
+               word_len = AD1836_WORD_LEN_16;
                break;
        case SNDRV_PCM_FORMAT_S20_3LE:
-               word_len = 1;
+               word_len = AD1836_WORD_LEN_20;
                break;
        case SNDRV_PCM_FORMAT_S24_LE:
        case SNDRV_PCM_FORMAT_S32_LE:
-               word_len = 0;
+               word_len = AD1836_WORD_LEN_24;
                break;
        }
 
-       snd_soc_update_bits(codec, AD1836_DAC_CTRL1,
-               AD1836_DAC_WORD_LEN_MASK, word_len);
+       snd_soc_update_bits(codec, AD1836_DAC_CTRL1, AD1836_DAC_WORD_LEN_MASK,
+               word_len << AD1836_DAC_WORD_LEN_OFFSET);
 
-       snd_soc_update_bits(codec, AD1836_ADC_CTRL2,
-               AD1836_ADC_WORD_LEN_MASK, word_len);
+       snd_soc_update_bits(codec, AD1836_ADC_CTRL2, AD1836_ADC_WORD_LEN_MASK,
+               word_len << AD1836_ADC_WORD_OFFSET);
 
        return 0;
 }
index 845596717fdf522d2de2d83d37e123825a36cade..9d6a3f8f8aafa29ada5b25408d0470efbd738186 100644 (file)
@@ -25,6 +25,7 @@
 #define AD1836_DAC_SERFMT_PCK256       (0x4 << 5)
 #define AD1836_DAC_SERFMT_PCK128       (0x5 << 5)
 #define AD1836_DAC_WORD_LEN_MASK       0x18
+#define AD1836_DAC_WORD_LEN_OFFSET     3
 
 #define AD1836_DAC_CTRL2               1
 #define AD1836_DACL1_MUTE              0
@@ -51,6 +52,7 @@
 #define AD1836_ADCL2_MUTE              2
 #define AD1836_ADCR2_MUTE              3
 #define AD1836_ADC_WORD_LEN_MASK       0x30
+#define AD1836_ADC_WORD_OFFSET         5
 #define AD1836_ADC_SERFMT_MASK        (7 << 6)
 #define AD1836_ADC_SERFMT_PCK256       (0x4 << 6)
 #define AD1836_ADC_SERFMT_PCK128       (0x5 << 6)
@@ -60,4 +62,8 @@
 
 #define AD1836_NUM_REGS                16
 
+#define AD1836_WORD_LEN_24 0x0
+#define AD1836_WORD_LEN_20 0x1
+#define AD1836_WORD_LEN_16 0x2
+
 #endif
index f8c663dcff027d9d10083a41d0a6f5cd3b082e20..d68ea532cc7f59a52e46be1a89ccf4d0c35ad518 100644 (file)
@@ -262,14 +262,14 @@ static int v253_hangup(struct tty_struct *tty)
 }
 
 /* Line discipline .receive_buf() */
-static unsigned int v253_receive(struct tty_struct *tty,
-                                const unsigned char *cp, char *fp, int count)
+static void v253_receive(struct tty_struct *tty,
+                               const unsigned char *cp, char *fp, int count)
 {
        struct snd_soc_codec *codec = tty->disc_data;
        struct cx20442_priv *cx20442;
 
        if (!codec)
-               return count;
+               return;
 
        cx20442 = snd_soc_codec_get_drvdata(codec);
 
@@ -281,8 +281,6 @@ static unsigned int v253_receive(struct tty_struct *tty,
                codec->hw_write = (hw_write_t)tty->ops->write;
                codec->card->pop_time = 1;
        }
-
-       return count;
 }
 
 /* Line discipline .write_wakeup() */
index 6785688f88069ad9b56fed4a28933d34c8751ebc..9a5e67c5a6bdf9f2cc7fc5a23b49a8c76a0f462e 100644 (file)
@@ -680,20 +680,25 @@ static struct snd_soc_dai_ops wm8804_dai_ops = {
 #define WM8804_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
                        SNDRV_PCM_FMTBIT_S24_LE)
 
+#define WM8804_RATES (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
+                     SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 | \
+                     SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | \
+                     SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000)
+
 static struct snd_soc_dai_driver wm8804_dai = {
        .name = "wm8804-spdif",
        .playback = {
                .stream_name = "Playback",
                .channels_min = 2,
                .channels_max = 2,
-               .rates = SNDRV_PCM_RATE_8000_192000,
+               .rates = WM8804_RATES,
                .formats = WM8804_FORMATS,
        },
        .capture = {
                .stream_name = "Capture",
                .channels_min = 2,
                .channels_max = 2,
-               .rates = SNDRV_PCM_RATE_8000_192000,
+               .rates = WM8804_RATES,
                .formats = WM8804_FORMATS,
        },
        .ops = &wm8804_dai_ops,
index a0b1a7278284ee7419ce662d0ba913391556b02e..e2ab4fac28199130a69ce3421ffb722de75b3c97 100644 (file)
@@ -1839,7 +1839,7 @@ static int wm8915_set_sysclk(struct snd_soc_dai *dai,
        int old;
 
        /* Disable SYSCLK while we reconfigure */
-       old = snd_soc_read(codec, WM8915_AIF_CLOCKING_1);
+       old = snd_soc_read(codec, WM8915_AIF_CLOCKING_1) & WM8915_SYSCLK_ENA;
        snd_soc_update_bits(codec, WM8915_AIF_CLOCKING_1,
                            WM8915_SYSCLK_ENA, 0);
 
@@ -2038,6 +2038,7 @@ static int wm8915_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
                break;
        case WM8915_FLL_MCLK2:
                reg = 1;
+               break;
        case WM8915_FLL_DACLRCLK1:
                reg = 2;
                break;
index f90ae427242b4a26c87b9aff56fe23cc09303645..5e05eed96c381c9730b31d65ea1b786b459ec41e 100644 (file)
@@ -1999,12 +1999,12 @@ static int wm8962_put_hp_sw(struct snd_kcontrol *kcontrol,
                return 0;
 
        /* If the left PGA is enabled hit that VU bit... */
-       if (reg_cache[WM8962_PWR_MGMT_2] & WM8962_HPOUTL_PGA_ENA)
+       if (snd_soc_read(codec, WM8962_PWR_MGMT_2) & WM8962_HPOUTL_PGA_ENA)
                return snd_soc_write(codec, WM8962_HPOUTL_VOLUME,
                                     reg_cache[WM8962_HPOUTL_VOLUME]);
 
        /* ...otherwise the right.  The VU is stereo. */
-       if (reg_cache[WM8962_PWR_MGMT_2] & WM8962_HPOUTR_PGA_ENA)
+       if (snd_soc_read(codec, WM8962_PWR_MGMT_2) & WM8962_HPOUTR_PGA_ENA)
                return snd_soc_write(codec, WM8962_HPOUTR_VOLUME,
                                     reg_cache[WM8962_HPOUTR_VOLUME]);
 
index e55b298c14a06e249f54453829295c880c39998a..9e370d14ad88f2bce72ccd0ce1204eb01bd854fa 100644 (file)
@@ -215,23 +215,23 @@ static const struct snd_kcontrol_new analogue_snd_controls[] = {
 SOC_SINGLE_TLV("IN1L Volume", WM8993_LEFT_LINE_INPUT_1_2_VOLUME, 0, 31, 0,
               inpga_tlv),
 SOC_SINGLE("IN1L Switch", WM8993_LEFT_LINE_INPUT_1_2_VOLUME, 7, 1, 1),
-SOC_SINGLE("IN1L ZC Switch", WM8993_LEFT_LINE_INPUT_1_2_VOLUME, 7, 1, 0),
+SOC_SINGLE("IN1L ZC Switch", WM8993_LEFT_LINE_INPUT_1_2_VOLUME, 6, 1, 0),
 
 SOC_SINGLE_TLV("IN1R Volume", WM8993_RIGHT_LINE_INPUT_1_2_VOLUME, 0, 31, 0,
               inpga_tlv),
 SOC_SINGLE("IN1R Switch", WM8993_RIGHT_LINE_INPUT_1_2_VOLUME, 7, 1, 1),
-SOC_SINGLE("IN1R ZC Switch", WM8993_RIGHT_LINE_INPUT_1_2_VOLUME, 7, 1, 0),
+SOC_SINGLE("IN1R ZC Switch", WM8993_RIGHT_LINE_INPUT_1_2_VOLUME, 6, 1, 0),
 
 
 SOC_SINGLE_TLV("IN2L Volume", WM8993_LEFT_LINE_INPUT_3_4_VOLUME, 0, 31, 0,
               inpga_tlv),
 SOC_SINGLE("IN2L Switch", WM8993_LEFT_LINE_INPUT_3_4_VOLUME, 7, 1, 1),
-SOC_SINGLE("IN2L ZC Switch", WM8993_LEFT_LINE_INPUT_3_4_VOLUME, 7, 1, 0),
+SOC_SINGLE("IN2L ZC Switch", WM8993_LEFT_LINE_INPUT_3_4_VOLUME, 6, 1, 0),
 
 SOC_SINGLE_TLV("IN2R Volume", WM8993_RIGHT_LINE_INPUT_3_4_VOLUME, 0, 31, 0,
               inpga_tlv),
 SOC_SINGLE("IN2R Switch", WM8993_RIGHT_LINE_INPUT_3_4_VOLUME, 7, 1, 1),
-SOC_SINGLE("IN2R ZC Switch", WM8993_RIGHT_LINE_INPUT_3_4_VOLUME, 7, 1, 0),
+SOC_SINGLE("IN2R ZC Switch", WM8993_RIGHT_LINE_INPUT_3_4_VOLUME, 6, 1, 0),
 
 SOC_SINGLE_TLV("MIXINL IN2L Volume", WM8993_INPUT_MIXER3, 7, 1, 0,
               inmix_sw_tlv),
index 15dac0f20cd8aa9a37221acc3367fa269142fc5a..6680c0b4d2038cf569940df97d7f363b6b317a74 100644 (file)
@@ -310,7 +310,7 @@ static int fsl_dma_new(struct snd_card *card, struct snd_soc_dai *dai,
         * should allocate a DMA buffer only for the streams that are valid.
         */
 
-       if (dai->driver->playback.channels_min) {
+       if (pcm->streams[0].substream) {
                ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, card->dev,
                        fsl_dma_hardware.buffer_bytes_max,
                        &pcm->streams[0].substream->dma_buffer);
@@ -320,13 +320,13 @@ static int fsl_dma_new(struct snd_card *card, struct snd_soc_dai *dai,
                }
        }
 
-       if (dai->driver->capture.channels_min) {
+       if (pcm->streams[1].substream) {
                ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, card->dev,
                        fsl_dma_hardware.buffer_bytes_max,
                        &pcm->streams[1].substream->dma_buffer);
                if (ret) {
-                       snd_dma_free_pages(&pcm->streams[0].substream->dma_buffer);
                        dev_err(card->dev, "can't alloc capture dma buffer\n");
+                       snd_dma_free_pages(&pcm->streams[0].substream->dma_buffer);
                        return ret;
                }
        }
@@ -449,7 +449,8 @@ static int fsl_dma_open(struct snd_pcm_substream *substream)
        dma_private->ld_buf_phys = ld_buf_phys;
        dma_private->dma_buf_phys = substream->dma_buffer.addr;
 
-       ret = request_irq(dma_private->irq, fsl_dma_isr, 0, "DMA", dma_private);
+       ret = request_irq(dma_private->irq, fsl_dma_isr, 0, "fsldma-audio",
+                         dma_private);
        if (ret) {
                dev_err(dev, "can't register ISR for IRQ %u (ret=%i)\n",
                        dma_private->irq, ret);
index ffa09b3b2caaeedf98568331ec0825888829e5eb..992a732b5211bccb5385256eaf37ff887259ee0a 100644 (file)
@@ -191,7 +191,7 @@ static inline bool tx_active(struct i2s_dai *i2s)
        if (!i2s)
                return false;
 
-       active = readl(i2s->addr + I2SMOD);
+       active = readl(i2s->addr + I2SCON);
 
        if (is_secondary(i2s))
                active &= CON_TXSDMA_ACTIVE;
@@ -223,7 +223,7 @@ static inline bool rx_active(struct i2s_dai *i2s)
        if (!i2s)
                return false;
 
-       active = readl(i2s->addr + I2SMOD) & CON_RXDMA_ACTIVE;
+       active = readl(i2s->addr + I2SCON) & CON_RXDMA_ACTIVE;
 
        return active ? true : false;
 }
index 06b7b81a16016f714b9bb2597dc705c0bab20ffb..c005ceb70c9d1dfc624ffa11a13eec427e2215b9 100644 (file)
@@ -466,6 +466,9 @@ static bool snd_soc_set_cache_val(void *base, unsigned int idx,
 static unsigned int snd_soc_get_cache_val(const void *base, unsigned int idx,
                unsigned int word_size)
 {
+       if (!base)
+               return -1;
+
        switch (word_size) {
        case 1: {
                const u8 *cache = base;
index 999bb08cdfb143708312cd0da0e358caa077d766..32ab7fc4579ac18ee7155aa4891e709c76173bf1 100644 (file)
@@ -325,6 +325,7 @@ static int dapm_connect_mixer(struct snd_soc_dapm_context *dapm,
 }
 
 static int dapm_is_shared_kcontrol(struct snd_soc_dapm_context *dapm,
+       struct snd_soc_dapm_widget *kcontrolw,
        const struct snd_kcontrol_new *kcontrol_new,
        struct snd_kcontrol **kcontrol)
 {
@@ -334,6 +335,8 @@ static int dapm_is_shared_kcontrol(struct snd_soc_dapm_context *dapm,
        *kcontrol = NULL;
 
        list_for_each_entry(w, &dapm->card->widgets, list) {
+               if (w == kcontrolw || w->dapm != kcontrolw->dapm)
+                       continue;
                for (i = 0; i < w->num_kcontrols; i++) {
                        if (&w->kcontrol_news[i] == kcontrol_new) {
                                if (w->kcontrols)
@@ -347,9 +350,9 @@ static int dapm_is_shared_kcontrol(struct snd_soc_dapm_context *dapm,
 }
 
 /* create new dapm mixer control */
-static int dapm_new_mixer(struct snd_soc_dapm_context *dapm,
-       struct snd_soc_dapm_widget *w)
+static int dapm_new_mixer(struct snd_soc_dapm_widget *w)
 {
+       struct snd_soc_dapm_context *dapm = w->dapm;
        int i, ret = 0;
        size_t name_len, prefix_len;
        struct snd_soc_dapm_path *path;
@@ -447,9 +450,9 @@ static int dapm_new_mixer(struct snd_soc_dapm_context *dapm,
 }
 
 /* create new dapm mux control */
-static int dapm_new_mux(struct snd_soc_dapm_context *dapm,
-       struct snd_soc_dapm_widget *w)
+static int dapm_new_mux(struct snd_soc_dapm_widget *w)
 {
+       struct snd_soc_dapm_context *dapm = w->dapm;
        struct snd_soc_dapm_path *path = NULL;
        struct snd_kcontrol *kcontrol;
        struct snd_card *card = dapm->card->snd_card;
@@ -468,7 +471,7 @@ static int dapm_new_mux(struct snd_soc_dapm_context *dapm,
                return -EINVAL;
        }
 
-       shared = dapm_is_shared_kcontrol(dapm, &w->kcontrol_news[0],
+       shared = dapm_is_shared_kcontrol(dapm, w, &w->kcontrol_news[0],
                                         &kcontrol);
        if (kcontrol) {
                wlist = kcontrol->private_data;
@@ -532,8 +535,7 @@ static int dapm_new_mux(struct snd_soc_dapm_context *dapm,
 }
 
 /* create new dapm volume control */
-static int dapm_new_pga(struct snd_soc_dapm_context *dapm,
-       struct snd_soc_dapm_widget *w)
+static int dapm_new_pga(struct snd_soc_dapm_widget *w)
 {
        if (w->num_kcontrols)
                dev_err(w->dapm->dev,
@@ -1823,13 +1825,13 @@ int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm)
                case snd_soc_dapm_mixer:
                case snd_soc_dapm_mixer_named_ctl:
                        w->power_check = dapm_generic_check_power;
-                       dapm_new_mixer(dapm, w);
+                       dapm_new_mixer(w);
                        break;
                case snd_soc_dapm_mux:
                case snd_soc_dapm_virt_mux:
                case snd_soc_dapm_value_mux:
                        w->power_check = dapm_generic_check_power;
-                       dapm_new_mux(dapm, w);
+                       dapm_new_mux(w);
                        break;
                case snd_soc_dapm_adc:
                case snd_soc_dapm_aif_out:
@@ -1842,7 +1844,7 @@ int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm)
                case snd_soc_dapm_pga:
                case snd_soc_dapm_out_drv:
                        w->power_check = dapm_generic_check_power;
-                       dapm_new_pga(dapm, w);
+                       dapm_new_pga(w);
                        break;
                case snd_soc_dapm_input:
                case snd_soc_dapm_output:
index d47beffedb0ff5b4be36bab35a1014e5bd5aa22f..1e3ae3327dd3a65431b4a517ab5b340dac2ee6f7 100644 (file)
@@ -227,6 +227,7 @@ static int usb6fire_fw_ezusb_upload(
        ret = usb6fire_fw_ihex_init(fw, rec);
        if (ret < 0) {
                kfree(rec);
+               release_firmware(fw);
                snd_printk(KERN_ERR PREFIX "error validating ezusb "
                                "firmware %s.\n", fwname);
                return ret;
@@ -269,7 +270,6 @@ static int usb6fire_fw_ezusb_upload(
        data = 0x00; /* resume ezusb cpu */
        ret = usb6fire_fw_ezusb_write(device, 0xa0, 0xe600, &data, 1);
        if (ret < 0) {
-               release_firmware(fw);
                snd_printk(KERN_ERR PREFIX "unable to upload ezusb "
                                "firmware %s: end message.\n", fwname);
                return ret;
index b137b25865cc986cc8f6bc8d21ca482e34b987cd..d144cdb2f15909acefec2f0c45ea0cd1ff523030 100644 (file)
@@ -395,12 +395,12 @@ static int usb6fire_pcm_open(struct snd_pcm_substream *alsa_sub)
        alsa_rt->hw = pcm_hw;
 
        if (alsa_sub->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-               if (rt->rate >= 0)
+               if (rt->rate < ARRAY_SIZE(rates))
                        alsa_rt->hw.rates = rates_alsaid[rt->rate];
                alsa_rt->hw.channels_max = OUT_N_CHANNELS;
                sub = &rt->playback;
        } else if (alsa_sub->stream == SNDRV_PCM_STREAM_CAPTURE) {
-               if (rt->rate >= 0)
+               if (rt->rate < ARRAY_SIZE(rates))
                        alsa_rt->hw.rates = rates_alsaid[rt->rate];
                alsa_rt->hw.channels_max = IN_N_CHANNELS;
                sub = &rt->capture;
index 2e969cbb393b004af02d37419fccb080ce99e9d6..090e1930dfdcc6b37c68b7731e0862cb703a7689 100644 (file)
@@ -403,7 +403,7 @@ static int snd_usb_cm106_boot_quirk(struct usb_device *dev)
 static int snd_usb_cm6206_boot_quirk(struct usb_device *dev)
 {
        int err, reg;
-       int val[] = {0x200c, 0x3000, 0xf800, 0x143f, 0x0000, 0x3000};
+       int val[] = {0x2004, 0x3000, 0xf800, 0x143f, 0x0000, 0x3000};
 
        for (reg = 0; reg < ARRAY_SIZE(val); reg++) {
                err = snd_usb_cm106_write_int_reg(dev, reg, val[reg]);
index 26d4d3fd6deb2e1285ca44c9a710e7cadffd374d..ad73300f7bac6d1b11e4db0f1e121e0e26b0fb0c 100755 (executable)
@@ -23,12 +23,7 @@ if test -d ../../.git -o -f ../../.git &&
 then
        VN=$(echo "$VN" | sed -e 's/-/./g');
 else
-       eval $(grep '^VERSION[[:space:]]*=' ../../Makefile|tr -d ' ')
-       eval $(grep '^PATCHLEVEL[[:space:]]*=' ../../Makefile|tr -d ' ')
-       eval $(grep '^SUBLEVEL[[:space:]]*=' ../../Makefile|tr -d ' ')
-       eval $(grep '^EXTRAVERSION[[:space:]]*=' ../../Makefile|tr -d ' ')
-
-       VN="${VERSION}.${PATCHLEVEL}.${SUBLEVEL}${EXTRAVERSION}"
+       VN=$(MAKEFLAGS= make -sC ../.. kernelversion)
 fi
 
 VN=$(expr "$VN" : v*'\(.*\)')
index 1e88485c16a04b755e68bb14510c3fd5cf368769..0a7ed5b5e281c88b321de87ced66a3d29ebb003d 100644 (file)
@@ -2187,6 +2187,7 @@ static const struct flag flags[] = {
        { "TASKLET_SOFTIRQ", 6 },
        { "SCHED_SOFTIRQ", 7 },
        { "HRTIMER_SOFTIRQ", 8 },
+       { "RCU_SOFTIRQ", 9 },
 
        { "HRTIMER_NORESTART", 0 },
        { "HRTIMER_RESTART", 1 },
index 1fd29b2daa9204fff6e345c8871799dcb0e04e5e..cef28e6632b98cd4e82426f45805f676c2fc0543 100755 (executable)
@@ -788,7 +788,7 @@ sub wait_for_input
 
 sub reboot_to {
     if ($reboot_type eq "grub") {
-       run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch; reboot)'";
+       run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch && reboot)'";
        return;
     }
 
@@ -1480,7 +1480,7 @@ sub process_config_ignore {
        or dodie "Failed to read $config";
 
     while (<IN>) {
-       if (/^(.*?(CONFIG\S*)(=.*| is not set))/) {
+       if (/^((CONFIG\S*)=.*)/) {
            $config_ignore{$2} = $1;
        }
     }
@@ -1638,7 +1638,7 @@ sub run_config_bisect {
        if (!$found) {
            # try the other half
            doprint "Top half produced no set configs, trying bottom half\n";
-           @tophalf = @start_list[$half .. $#start_list];
+           @tophalf = @start_list[$half + 1 .. $#start_list];
            create_config @tophalf;
            read_current_config \%current_config;
            foreach my $config (@tophalf) {
@@ -1690,7 +1690,7 @@ sub run_config_bisect {
        # remove half the configs we are looking at and see if
        # they are good.
        $half = int($#start_list / 2);
-    } while ($half > 0);
+    } while ($#start_list > 0);
 
     # we found a single config, try it again unless we are running manually
 
index 22cdb960660a61cd6c142278b08ed175b3d9110b..96ebc0679415baeff88d7764b1c0e956820adad2 100644 (file)
@@ -467,12 +467,8 @@ static struct kvm *kvm_create_vm(void)
                if (!kvm->buses[i])
                        goto out_err;
        }
-       spin_lock_init(&kvm->mmu_lock);
-
-       r = kvm_init_mmu_notifier(kvm);
-       if (r)
-               goto out_err;
 
+       spin_lock_init(&kvm->mmu_lock);
        kvm->mm = current->mm;
        atomic_inc(&kvm->mm->mm_count);
        kvm_eventfd_init(kvm);
@@ -480,6 +476,11 @@ static struct kvm *kvm_create_vm(void)
        mutex_init(&kvm->irq_lock);
        mutex_init(&kvm->slots_lock);
        atomic_set(&kvm->users_count, 1);
+
+       r = kvm_init_mmu_notifier(kvm);
+       if (r)
+               goto out_err;
+
        raw_spin_lock(&kvm_lock);
        list_add(&kvm->vm_list, &vm_list);
        raw_spin_unlock(&kvm_lock);
@@ -651,7 +652,9 @@ int __kvm_set_memory_region(struct kvm *kvm,
        /* We can read the guest memory with __xxx_user() later on. */
        if (user_alloc &&
            ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
-            !access_ok(VERIFY_WRITE, mem->userspace_addr, mem->memory_size)))
+            !access_ok(VERIFY_WRITE,
+                       (void __user *)(unsigned long)mem->userspace_addr,
+                       mem->memory_size)))
                goto out;
        if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
                goto out;